diff --git "a/2662.jsonl" "b/2662.jsonl" new file mode 100644--- /dev/null +++ "b/2662.jsonl" @@ -0,0 +1,790 @@ +{"seq_id":"563619","text":"import RPi.GPIO as GPIO\nimport time\n \nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\ncoil_A_1_pin = 4\ncoil_A_2_pin = 26\ncoil_B_1_pin = 23\ncoil_B_2_pin = 24\n\ncoil2_A_1_pin = 16\ncoil2_A_2_pin = 22\ncoil2_B_1_pin = 27\ncoil2_B_2_pin = 20\n \n# adjust if different\nStepCount=8\nSeq = [\n [1,0,0,0],\n [1,1,0,0],\n [0,1,0,0],\n [0,1,1,0],\n [0,0,1,0],\n [0,0,1,1],\n [0,0,0,1],\n [1,0,0,1]\n ]\n\n############################################\n# Setup the pins on the drive stepper motors\n# First stepper motor\nGPIO.setup(coil_A_1_pin, GPIO.OUT)\nGPIO.setup(coil_A_2_pin, GPIO.OUT)\nGPIO.setup(coil_B_1_pin, GPIO.OUT)\nGPIO.setup(coil_B_2_pin, GPIO.OUT)\n\n# Second stepper motor\nGPIO.setup(coil2_A_1_pin, GPIO.OUT)\nGPIO.setup(coil2_A_2_pin, GPIO.OUT)\nGPIO.setup(coil2_B_1_pin, GPIO.OUT)\nGPIO.setup(coil2_B_2_pin, GPIO.OUT)\n \ndef setStep(w1, w2, w3, w4):\n GPIO.output(coil_A_1_pin, w1)\n GPIO.output(coil_A_2_pin, w2)\n GPIO.output(coil_B_1_pin, w3)\n GPIO.output(coil_B_2_pin, w4)\n \n GPIO.output(coil2_A_1_pin, w1)\n GPIO.output(coil2_A_2_pin, w2)\n GPIO.output(coil2_B_1_pin, w3)\n GPIO.output(coil2_B_2_pin, w4)\n \ndef forward(delay, steps):\n for i in range(steps):\n for j in range(StepCount):\n setStep(Seq[j][0], Seq[j][1], Seq[j][2], Seq[j][3])\n time.sleep(delay)\n \ndef backwards(delay, steps):\n for i in range(steps):\n for j in reversed(range(StepCount)):\n setStep(Seq[j][0], Seq[j][1], Seq[j][2], Seq[j][3])\n time.sleep(delay)\n \nif __name__ == '__main__':\n while True:\n delay = input(\"Time Delay (ms)?\")\n #delay = 7\n steps = input(\"How many steps forward? \")\n #steps = 500\n forward(int(delay) / 10000.0, int(steps))\n steps = input(\"How many steps backwards? \")\n backwards(int(delay) / 10000.0, int(steps))","sub_path":"test_steppermotor.py","file_name":"test_steppermotor.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228586853","text":"from .board import Board\nfrom .player import Player\nfrom .game import Game\n\nclass Train():\n def __init__(self, num_of_games):\n self.num_of_games = num_of_games\n self.game_wins = [[],[],[]]\n self.states = []\n self.moves = []\n \n def simulate(self, xbot, obot, print_game = False):\n for j in range(self.num_of_games):\n board = Board()\n current_turn = Player.x\n winner = None\n prev_board = board.grid\n if j % 1000 == 0:\n print_game = True\n print(\"Rounds {}\".format(j))\n xbot_exp_rate = xbot.exp_rate\n xbot.exp_rate = 0\n test = Game(100)\n test.simulate(xbot,obot)\n if test.o_wins < 20:\n xbot.exp_rate = xbot_exp_rate*0.9\n else:\n xbot.exp_rate = xbot_exp_rate\n # here implicitly assumed that we always train the first player\n self.game_wins[0].append(test.o_wins)\n self.game_wins[1].append(test.ties)\n self.game_wins[2].append(test.x_wins)\n self.states.append(xbot.check(prev_board))\n if test.o_wins < 20:\n xbot.savePolicy(\"/home/svu/e0235225/FYP/TicTacToe/Epo_Vmodel1_round{}\".format(str(j)))\n # print(self.states)\n for i in range(9):\n choice = []\n if (current_turn == xbot.player):\n choice = xbot.select_move(board)\n else:\n choice = obot.select_move(board)\n board.make_move(choice[0], choice[1], current_turn)\n\n winner = board.has_winner()\n\n if print_game:\n self.moves.append(board.moves)\n print_game = False\n if (winner != None):\n #print (\"Congrats \" + str(winner))\n break\n elif (i == 8):\n #print (\"It's a tie!\")\n break\n # here implicitly assumed that we always train the first player\n # and realised... we train twice for winning move in tictactoe_NN_vfunc6...\n if (current_turn == xbot.player):\n xbot.feedReward(board.grid,prev_board,0)\n current_turn = current_turn.other\n prev_board = board.grid\n\n if (winner == Player.x):\n xbot.feedReward(board.grid,prev_board,2)\n elif (winner == Player.o):\n xbot.feedReward(board.grid,prev_board,-2)\n else:\n xbot.feedReward(board.grid,prev_board,1)\n ","sub_path":"TicTacToe/SemiGradient/Minimax/tictactoe/ttt/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232985578","text":"import os\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\n\ndef init_parameter(self, pid, os_system):\n self.now_download_value = 0\n self.pid = pid\n self.os_system = os_system\n self.check_version_result = False\n self.load_end_anime_status = False\n self.load_week_label_status = False\n self.load_anime_label_status = False\n self.thread_write_download_order_status = False\n self.end_tab = dict()\n self.week_dict = dict()\n self.preview_dict = dict()\n self.end_qt_object = dict()\n self.page_button_dict = dict()\n self.week_layout_dict = dict()\n self.story_checkbox_dict = dict()\n self.download_anime_Thread = dict()\n self.history_tableWidget_dict = dict()\n self.download_progressBar_dict = dict()\n self.download_status_label_dict = dict()\n self.tableWidgetItem_download_dict = dict()\n self.week = {0: self.Monday_scrollAreaWidgetContents, 1: self.Tuesday_scrollAreaWidgetContents,\n 2: self.Wednesday_scrollAreaWidgetContents, 3: self.Thursday_scrollAreaWidgetContents,\n 4: self.Friday_scrollAreaWidgetContents, 5: self.Staurday_scrollAreaWidgetContents,\n 6: self.Sunday_scrollAreaWidgetContents}\n self.download_tableWidget.setColumnWidth(0, 400)\n self.download_tableWidget.setColumnWidth(1, 150)\n # self.download_tableWidget.setColumnWidth(2, 431)\n self.download_tableWidget.horizontalHeader().setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)\n self.download_tableWidget.verticalHeader().setVisible(False)\n self.download_tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.download_tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.download_tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.download_tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.history_tableWidget.setColumnWidth(1, 150)\n self.history_tableWidget.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n self.history_tableWidget.verticalHeader().setVisible(False)\n self.history_tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.history_tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.history_tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.setWindowIcon(QtGui.QIcon('image/logo.ico'))\n self.user_icon_label.setPixmap(QtGui.QPixmap(\"./image/noavatar_small.gif\"))\n self.setFixedSize(self.width(), self.height())\n","sub_path":"event/InitParameter.py","file_name":"InitParameter.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190046731","text":"\n\nfrom xai.brain.wordbase.adjectives._storybook import _STORYBOOK\n\n#calss header\nclass _STORYBOOKS(_STORYBOOK, ):\n\tdef __init__(self,): \n\t\t_STORYBOOK.__init__(self)\n\t\tself.name = \"STORYBOOKS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"storybook\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_storybooks.py","file_name":"_storybooks.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367158879","text":"import pika, random, time\nfrom logzero import logger as log\nfrom pydantic import BaseModel\n\n\nclass RabbitConfig(BaseModel):\n RB_EVENT_ROUTE: str\n RB_QUEUE_NAME: str\n RB_EXCHANGE_NAME: str\n RB_ROUTING_KEY: str\n RB_URLS: str\n\n\nclass RabbitClient:\n def __init__(self, config: RabbitConfig):\n self.cfg = config\n self.event_route = self.cfg[\"RB_EVENT_ROUTE\"]\n self.prepare_argument()\n\n def prepare_argument(self):\n all_endpoints = []\n\n for endpoint in self.cfg[\"RB_URLS\"].split(\",\"):\n all_endpoints.append(pika.URLParameters(endpoint))\n\n self.parameters = all_endpoints\n\n def consume(self, thread_handler):\n while True:\n try:\n threads = []\n log.info(\"Connecting...\")\n random.shuffle(self.parameters)\n connection = pika.BlockingConnection(self.parameters)\n channel = connection.channel()\n channel.basic_qos(prefetch_count=3)\n channel.queue_declare(queue=self.cfg[\"RB_QUEUE_NAME\"],\n durable=True,\n exclusive=False,\n auto_delete=False)\n channel.queue_bind(self.cfg[\"RB_QUEUE_NAME\"],\n self.cfg[\"RB_EXCHANGE_NAME\"],\n routing_key=self.cfg[\"RB_ROUTING_KEY\"])\n callback = thread_handler(threads)\n channel.basic_consume(self.cfg[\"RB_QUEUE_NAME\"], callback)\n\n try:\n channel.start_consuming()\n except KeyboardInterrupt:\n channel.stop_consuming()\n for t in threads:\n t.join()\n\n connection.close()\n break\n except pika.exceptions.ConnectionClosedByBroker:\n sleep = random.randint(1, 5)\n log.info(\"Connection was closed by broker, retrying... in %ss\", sleep)\n time.sleep(sleep)\n continue\n # Do not recover on channel errors\n except pika.exceptions.AMQPChannelError as err:\n log.error(\"Caught a channel error: {}, stopping...\".format(err))\n break\n # Recover on all other connection errors\n except pika.exceptions.AMQPConnectionError:\n sleep = random.randint(1, 5)\n log.info(\"Connection was closed, retrying... %ss\", sleep)\n time.sleep(sleep)\n continue\n","sub_path":"generators/app/templates/conn/rabbit.py","file_name":"rabbit.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"539163510","text":"# -*- coding: utf-8 -*-\n# --------------------------------------------------------------------\n# \tTests Maraquia2 payment.\n# --------------------------------------------------------------------\nimport os\nimport time\n\nimport wget\nfrom ddt import ddt\nfrom tests.frontend._classes.class_pf import PfCheckMethods as Pf\n\nfrom support import service_log\nfrom support.class_main import MainClass\nfrom support.databases.db import databases\nfrom support.utils.mail_api import Inbox\nfrom support.utils.parse_pdf import PDF\nfrom tests.frontend._classes.class_maraquia2 import Maraquia2CheckMethods\nfrom tests.support_methods.class_selenium import DriverMethods\nfrom tests.support_methods.class_helper import Helper\nfrom tests.support_methods.class_navigate import NavigateCheckMethods as Navigate\n\n__author__ = 'senchuk'\n\n\n@ddt\nclass TestPfMaraquia(Maraquia2CheckMethods, Inbox, PDF, Pf, Navigate):\n def setup_method(self, method):\n \"\"\" Пре-установка окружения для теста.\n \"\"\"\n self.method = method\n self.setup_time = time.time()\n NLS_LANG = 'RUSSIAN.CL8MSWIN1251'\n os.environ['NLS_LANG'] = NLS_LANG\n # Подготовка работы с selenium\n self.driver = DriverMethods.get_fast_driver()\n self.get_page(self.driver, env_base_url=self.ENV.get('PF_BASE_URL'))\n login = self.element_is_present(self.driver, Navigate.input_pf_login.login)\n paswd = self.element_is_present(self.driver, Navigate.input_pf_login.password)\n self.btn = self.element_is_present(self.driver, Navigate.click_pf_login.login_button)\n # Подготовка пароля для тестового пользователя\n self.user_id = self.ENV.get('USER_2_ID')\n new_password = self.ENV.get('USER_2_PASS')\n self.login, self.new_password, self.oud_user = Helper.set_password(databases.db2, self.user_id, new_password)\n # logIN\n self.input_str(login, self.login)\n self.input_str(paswd, self.new_password)\n service_log.preparing_env(self)\n\n def test_payment_maraquia2_pf(self):\n \"\"\"\n Тест на проверку покупки услуги Подари ЛЕС\n :return:\n \"\"\"\n service_log.run(self)\n tool = \"Сберегательный счет\"\n pay = \"Подари ЛЕС\"\n prod = \"Подари лес другу\"\n p_vbnk = u'Подари ЛЕС'\n data = self.get_data()\n #self.get_email() # разбираем все непрочитанные письма\n self.element_click(self.driver, self.btn, url_end=self.ENV.get('PF_BASE_URL'))\n self.wait_pf_snippet(self.driver)\n pay_btn = self.element_is_present(self.driver, self.click_pf_dashboard.payment_button)\n self.element_click(self.driver, pay_btn, change_page_url=False)\n self.element_is_present(self.driver, Navigate.check_pf_pay_menu.search_block)\n self.search = self.element_is_present(self.driver, self.input_pf_pay_menu.search)\n self.input_str(self.search, pay)\n item = self.element_is_present(self.driver, self.click_pf_pay_menu.item_maraquia2, wait=60)\n self.element_click(self.driver, item, change_page_url=False)\n\n self.element_is_present(self.driver, self.check_pf_maraquia2_form.header)\n input_fields, select_fields, checkbox_fields = self.get_pf_form_start(self.driver)\n introduced_data = self.set_pf_form_start(self.driver, input_fields, select_fields, data, p_tool=tool)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_next, change_page_url=False)\n input_fields_2, select_fields_2, checkbox_fields_2 = self.get_pf_form_2_page(self.driver)\n introduced_data = self.set_pf_form_2_page(self.driver, input_fields_2, select_fields_2, data)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_next, change_page_url=False)\n print_btn, set_sign_disable_btn = self.get_pf_signature(self.driver, data['tariff'], prod, tool)\n self.element_click(self.driver, print_btn, change_page_url=False)\n v_text, set_sign_disable_btn = self.print_pf_voucher(self.driver)\n self.driver.execute_script(\"arguments[0].removeAttribute('disabled');\", set_sign_disable_btn)\n self.driver.execute_script('arguments[0].click();', set_sign_disable_btn)\n confirm_f = self.get_pf_confirm_form(self.driver, data[\"tariff\"], product=prod, p_tool=tool)\n self.input_str(confirm_f['sms_code'], '1234')\n self.element_click(self.driver, self.click_pf_confirm_form.btn_next, change_page_url=False)\n self.checking_pf_success_pay(self.driver)\n\n oper_sum = int(introduced_data['tariff'].decode('utf-8').replace(u'\\u2009', ''))\n\n self.get_page(self.driver, env_base_url=self.ENV.get('PF_BASE_URL'))\n ext_oper_num = self.open_pay_info(self.driver, pay)\n\n do_time = time.time()\n # проверка проводок в вабанке\n while time.time() - do_time < 120:\n bd_operation = databases.db1.oracle.get_dbo_operation_by_id(ext_oper_num)\n if len(bd_operation) == 2:\n break\n self.assertEqual(len(bd_operation), 2, \"Платеж не завершен\")\n self.assertEqual(bd_operation[0]['EXT_OPER_NUM'], bd_operation[1]['EXT_OPER_NUM'])\n self.assertEqual(bd_operation[0]['FROMID'], bd_operation[1]['FROMID'])\n self.assertEqual(bd_operation[0]['TOID'], bd_operation[1]['TOID'])\n self.assertEqual(bd_operation[0]['OPERSUM'], oper_sum)\n self.assertEqual(bd_operation[1]['OPERSUM'], oper_sum)\n self.assertEqual(bd_operation[0]['SERVICENAME'].decode('cp1251'), p_vbnk)\n self.assertEqual(bd_operation[1]['SERVICENAME'].decode('cp1251'), p_vbnk)\n self.assertEqual(bd_operation[0]['PROCESS'], 1)\n self.assertEqual(bd_operation[1]['PROCESS'], 1)\n\n self.get_page(self.driver, env_base_url=self.ENV.get('PF_BASE_URL'))\n self.open_pay_info(self.driver, pay)\n document = Navigate.element_is_present(self.driver, Navigate.click_pf_pay_info.doc_event)\n name = document.text.encode('utf-8')\n url = document.get_attribute('href')\n self.check_open_pdf_in_detail_event(self.driver, [name])\n self.file_name = wget.download(url, self.LOCAL_TEMP)\n service_log.put(\"Download pdf file [%s]\" % self.file_name)\n text = self.parse_pdf(self.file_name)\n data.update(dict(Url=url.encode('utf-8')))\n self.check_mar_pdf(text, data)\n\n def test_maraquia2_back_pf(self):\n \"\"\"\n Тест на проверку кнопки Назад при офррмлении услуги Подари ЛЕС\n :return:\n \"\"\"\n service_log.run(self)\n data = self.get_data()\n #self.get_email() # разбираем все непрочитанные письма\n self.element_click(self.driver, self.btn, url_end=MainClass.ENV.get('PF_BASE_URL'))\n self.wait_pf_snippet(self.driver)\n pay_btn = self.element_is_present(self.driver, self.click_pf_dashboard.payment_button)\n self.element_click(self.driver, pay_btn, change_page_url=False)\n self.element_is_present(self.driver, Navigate.check_pf_pay_menu.search_block)\n self.search = self.element_is_present(self.driver, self.input_pf_pay_menu.search)\n self.input_str(self.search, 'Подари ЛЕС')\n item = self.element_is_present(self.driver, self.click_pf_pay_menu.item_maraquia2, wait=60)\n self.element_click(self.driver, item, change_page_url=False)\n\n self.element_is_present(self.driver, self.check_pf_maraquia2_form.header)\n input_fields, select_fields, checkbox_fields = self.get_pf_form_start(self.driver)\n introduced_data = self.set_pf_form_start(self.driver, input_fields, select_fields, data)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_next, change_page_url=False)\n input_fields, select_fields, checkbox_fields = self.get_pf_form_2_page(self.driver)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_back, change_page_url=False)\n self.check_pf_first_page(self.driver, introduced_data)\n\n def test_maraquia2_empty_fields_pf(self):\n \"\"\"\n Проверка сообщений об ошибках при пустых полях\n :return:\n \"\"\"\n service_log.run(self)\n #self.get_email() # разбираем все непрочитанные письма\n self.element_click(self.driver, self.btn, url_end=MainClass.ENV.get('PF_BASE_URL'))\n self.wait_pf_snippet(self.driver)\n pay_btn = self.element_is_present(self.driver, self.click_pf_dashboard.payment_button)\n self.element_click(self.driver, pay_btn, change_page_url=False)\n self.element_is_present(self.driver, Navigate.check_pf_pay_menu.search_block)\n self.search = self.element_is_present(self.driver, self.input_pf_pay_menu.search)\n self.input_str(self.search, 'Подари ЛЕС')\n item = self.element_is_present(self.driver, self.click_pf_pay_menu.item_maraquia2, wait=60)\n self.element_click(self.driver, item, change_page_url=False)\n\n self.element_is_present(self.driver, self.check_pf_maraquia2_form.header)\n input_fields, select_fields, checkbox_fields = self.get_pf_form_start(self.driver)\n data, error = self.get_incorrect_data()\n self.set_pf_form_incor(self.driver, input_fields, select_fields, data)\n self.check_pf_err_msg(self.driver, error)\n\n def test_maraquia2_pay_denied_pf(self):\n \"\"\"\n Тест на проверку покупки услуги Подари ЛЕС, когда не хватает денег на источнике платежа\n :return:\n \"\"\"\n service_log.run(self)\n tool=\"Онлайн карта\"\n pay = 'Подари ЛЕС'\n data = self.get_data()\n #self.get_email() # разбираем все непрочитанные письма\n self.element_click(self.driver, self.btn, url_end=self.ENV.get('PF_BASE_URL'))\n self.wait_pf_snippet(self.driver)\n pay_btn = self.element_is_present(self.driver, self.click_pf_dashboard.payment_button)\n self.element_click(self.driver, pay_btn, change_page_url=False)\n self.element_is_present(self.driver, Navigate.check_pf_pay_menu.search_block)\n self.search = self.element_is_present(self.driver, self.input_pf_pay_menu.search)\n self.input_str(self.search, pay)\n item = self.element_is_present(self.driver, self.click_pf_pay_menu.item_maraquia2, wait=60)\n self.element_click(self.driver, item, change_page_url=False)\n\n self.element_is_present(self.driver, self.check_pf_maraquia2_form.header)\n input_fields, select_fields, checkbox_fields = self.get_pf_form_start(self.driver)\n introduced_data = self.set_pf_form_start(self.driver, input_fields, select_fields, data, p_tool=tool)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_next, change_page_url=False)\n input_fields_2, select_fields_2, checkbox_fields_2 = self.get_pf_form_2_page(self.driver)\n introduced_data = self.set_pf_form_2_page(self.driver, input_fields_2, select_fields_2, data)\n self.element_click(self.driver, self.click_pf_maraquia2_form.btn_next, change_page_url=False)\n self.element_is_present(self.driver, self.check_pf_lawyer_form.payment_err)\n\n def teardown_method(self, method):\n \"\"\" Пост-работа после завершения теста.\n \"\"\"\n Helper.recover_password(databases.db2, self.user_id, self.oud_user)\n self.delete_tmp_pdf()\n Navigate.timer(self.setup_time, \"Work test\")\n self.driver.quit()\n service_log.end()\n","sub_path":"tests/frontend/payment_front/test_pf_marauia2.py","file_name":"test_pf_marauia2.py","file_ext":"py","file_size_in_byte":11932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11937612","text":"import save\r\nimport time\r\nimport signal\r\n\r\ncurrent_state = [4, 4, 4, 4, 4, 4, 0, 4, 4, 4, 4, 4, 4, 0]\r\nLEVELS = [1, 4, 7, 10]\r\n\r\nload = int(input(\"do you want to load game ? (yes = press 1, no = press 0) \\n\"))\r\nstealing = 1\r\nturn = \"\"\r\nlevel = 1\r\ninterrupt_mode = 0\r\nif load == 1:\r\n current_state, stealing, level, interrupt_mode = save.load()\r\n turn = 'min'\r\n\r\nelse:\r\n stealing = int(\r\n input(\"do you want stealing mode ? (yes = press 1, no = press 0) \\n\"))\r\n turn = input(\r\n \"do you want to start ? (yes = enter 'min', no = enter 'max') \\n\")\r\n level = int(\r\n input(\"choose level: 1 - 2 - 3 - 4 \\n\"))-1\r\n\r\n interrupt_mode = int(\r\n input(\"force play method: 0- let the kid take his time\\n1-CTRL+C\\n2- 30s time limit \\n\"))\r\n\r\n\r\nclass node:\r\n def __init__(self, state, turn):\r\n\r\n self.is_leaf = 0\r\n self.cost = 0\r\n self.min_max = turn\r\n self.children = []\r\n self.state = state\r\n self.pruned = 0\r\n self.move_made_me = 0\r\n\r\n\r\ndef create_tree(root, depth):\r\n\r\n if depth == 0:\r\n root.is_leaf = 1\r\n root.cost = cost_fun(root.state)\r\n return root\r\n if root.min_max == 'max':\r\n places = range(0, 6)\r\n else:\r\n places = range(7, 13)\r\n\r\n for place in places:\r\n valid_move, returned_state, turn = make_move(place, root.state)\r\n if valid_move == 0:\r\n continue\r\n child_Node = node(returned_state, turn)\r\n child_Node.move_made_me = place\r\n temp = create_tree(child_Node, depth-1)\r\n root.children.append(temp)\r\n return root\r\n\r\n\r\ndef get_move(state, depth=LEVELS[level]):\r\n try:\r\n s = state.copy()\r\n root = node(s, 'max')\r\n create_tree(root, depth)\r\n alpha_beta(root, float('-inf'), float('inf'), 'max')\r\n temp = []\r\n for child in root.children:\r\n if child.cost == root.cost:\r\n temp.append(child)\r\n if len(temp) > 1:\r\n for i in temp:\r\n if i.pruned == 0:\r\n return i.move_made_me\r\n else:\r\n return temp[0].move_made_me\r\n except KeyboardInterrupt:\r\n return -1\r\n\r\n\r\ndef cost_fun(state):\r\n temp1 = state[0:5]\r\n temp2 = state[7:12]\r\n cost = -1*sum([x1 - x2 for (x1, x2) in zip(temp1, temp2)])\r\n return cost\r\n\r\n\r\ndef is_end(state):\r\n s = state.copy()\r\n sum_max = 0\r\n sum_min = 0\r\n who_win = 'computer winned'\r\n for i in range(0, 6):\r\n sum_max += s[i]\r\n sum_min += s[i+7]\r\n if sum_max == 0:\r\n ended = 1\r\n if s[6] > 48-s[6]:\r\n who_win = 'computer winned'\r\n elif s[6] < 48-s[6]:\r\n who_win = 'player winned'\r\n else:\r\n who_win = 'Draw'\r\n elif sum_min == 0:\r\n ended = 1\r\n if s[13] > 48-s[13]:\r\n who_win = 'player winned'\r\n elif s[13] < 48-s[13]:\r\n who_win = 'compuuter winned'\r\n else:\r\n who_win = 'Draw'\r\n else:\r\n ended = 0\r\n return ended, who_win\r\n\r\n\r\ndef make_move(selected_place, current_state):\r\n\r\n num_of_stones = current_state[selected_place]\r\n if num_of_stones == 0 or selected_place not in range(0, 13):\r\n valid_move = 0\r\n else:\r\n valid_move = 1\r\n if selected_place in range(0, 6):\r\n turn = 'min'\r\n elif selected_place in range(7, 13):\r\n turn = 'max'\r\n\r\n returned_state = current_state.copy()\r\n returned_state[selected_place] = 0\r\n place_to_add_stone = selected_place+1\r\n\r\n while True:\r\n if num_of_stones == 0:\r\n break\r\n if stealing == 1 and num_of_stones == 1 and selected_place in range(0, 6) and place_to_add_stone in range(0, 6) and returned_state[place_to_add_stone] == 0:\r\n returned_state[6] += returned_state[12-place_to_add_stone]+1\r\n returned_state[12-place_to_add_stone] = 0\r\n break\r\n if stealing == 1 and num_of_stones == 1 and selected_place in range(7, 13) and place_to_add_stone in range(7, 13) and returned_state[place_to_add_stone] == 0:\r\n returned_state[13] += returned_state[12-place_to_add_stone]+1\r\n returned_state[12-place_to_add_stone] = 0\r\n break\r\n if num_of_stones == 1 and selected_place in range(0, 6) and place_to_add_stone == 6:\r\n turn = 'max'\r\n if num_of_stones == 1 and selected_place in range(7, 13) and place_to_add_stone == 13:\r\n turn = 'min'\r\n if not(((selected_place in range(0, 6)) and (place_to_add_stone == 13)) or ((selected_place in range(7, 13)) and (place_to_add_stone == 6))):\r\n returned_state[place_to_add_stone] += 1\r\n num_of_stones -= 1\r\n place_to_add_stone += 1\r\n place_to_add_stone = place_to_add_stone % 14\r\n return valid_move, returned_state, turn\r\n\r\n\r\ndef alpha_beta(node, alpha, beta, max_min):\r\n if node.is_leaf == 1:\r\n return node.cost\r\n\r\n if max_min == 'max':\r\n temp = float('-inf')\r\n for child in node.children:\r\n if child.min_max == 'min':\r\n child.cost = alpha_beta(child, alpha, beta, 'min')\r\n temp = max(temp, child.cost)\r\n else:\r\n child.cost = alpha_beta(child, alpha, beta, 'max')\r\n temp = max(temp, child.cost)\r\n alpha = max(alpha, temp)\r\n if alpha >= beta:\r\n node.pruned = 1\r\n break # beta cutoff\r\n node.cost = temp\r\n return temp\r\n\r\n elif max_min == 'min':\r\n temp = float('inf')\r\n for child in node.children:\r\n if child.min_max == 'max':\r\n child.cost = alpha_beta(child, alpha, beta, 'max')\r\n temp = min(temp, child.cost)\r\n else:\r\n child.cost = alpha_beta(child, alpha, beta, 'min')\r\n temp = min(temp, child.cost)\r\n beta = min(beta, temp)\r\n if beta <= alpha:\r\n node.pruned = 1\r\n break # alpha cutoff\r\n node.cost = temp\r\n return temp\r\n\r\n\r\ndef alpha_beta(node, alpha, beta, max_min):\r\n if node.is_leaf == 1:\r\n return node.cost\r\n\r\n if max_min == 'max':\r\n temp = float('-inf')\r\n for child in node.children:\r\n if child.min_max == 'min':\r\n child.cost = alpha_beta(child, alpha, beta, 'min')\r\n temp = max(temp, child.cost)\r\n else:\r\n child.cost = alpha_beta(child, alpha, beta, 'max')\r\n temp = max(temp, child.cost)\r\n alpha = max(alpha, temp)\r\n if alpha >= beta:\r\n node.pruned = 1\r\n break # beta cutoff\r\n node.cost = temp\r\n return temp\r\n\r\n elif max_min == 'min':\r\n temp = float('inf')\r\n for child in node.children:\r\n if child.min_max == 'max':\r\n child.cost = alpha_beta(child, alpha, beta, 'max')\r\n temp = min(temp, child.cost)\r\n else:\r\n child.cost = alpha_beta(child, alpha, beta, 'min')\r\n temp = min(temp, child.cost)\r\n beta = min(beta, temp)\r\n if beta <= alpha:\r\n node.pruned = 1\r\n break # alpha cutoff\r\n node.cost = temp\r\n return temp\r\n\r\n\r\nclass TimeoutException(Exception): # Custom exception class\r\n pass\r\n\r\n\r\ndef timeout_handler(signum, frame): # Custom signal handler\r\n raise TimeoutException\r\n\r\n\r\ndef get_move_irp(current_state):\r\n\r\n if interrupt_mode == 0:\r\n return get_move(current_state)\r\n \r\n elif interrupt_mode == 1:\r\n i = 2\r\n move = get_move(current_state, 1)\r\n\r\n while True:\r\n temp = get_move(current_state, i)\r\n if temp == -1:\r\n return move\r\n move = temp\r\n i += 1\r\n else:\r\n i = 2\r\n move = get_move(current_state, 1)\r\n signal.signal(signal.SIGALRM, timeout_handler)\r\n signal.alarm(30)\r\n try:\r\n while True:\r\n temp = get_move(current_state, i)\r\n move = temp\r\n except TimeoutException:\r\n return move\r\n\r\n\r\ndef print_state(state):\r\n s = state.copy()\r\n temp = s[7:13]\r\n temp.reverse()\r\n print('player -->', ' ', s[13], temp)\r\n print('computer --> ', '', s[0:6], s[6])\r\n\r\n\r\n# print state at the begining\r\nprint_state(current_state)\r\n\r\n\r\nwhile True:\r\n ended, who_win = is_end(current_state)\r\n if ended == 1:\r\n print('game ended\\n and the result is -->', who_win)\r\n break\r\n if turn == 'min':\r\n user_move = int(\r\n input('it is your turn Enter a number from 1 to 6 or 0 to save and exit\\n'))+6\r\n if user_move == 6:\r\n save.save(current_state, stealing, level, interrupt_mode)\r\n break\r\n\r\n valid_move, returned_state, next_turn = make_move(\r\n user_move, current_state)\r\n while valid_move == 0:\r\n user_move = int(\r\n input('this is not a valid move try again, Enter a number from 1 to 6\\n'))+6\r\n if user_move == 6:\r\n save.save(current_state, stealing, level, interrupt_mode)\r\n break\r\n valid_move, returned_state, next_turn = make_move(\r\n user_move, current_state)\r\n current_state = returned_state\r\n turn = next_turn\r\n print('this is your play')\r\n print_state(current_state)\r\n print(\"--------------------\")\r\n else:\r\n print('this is my move')\r\n\r\n t1 = time.time()\r\n suggested_move = get_move_irp(current_state)\r\n\r\n print(\"time: \", time.time()-t1)\r\n valid_move, returned_state, next_turn = make_move(\r\n suggested_move, current_state)\r\n current_state = returned_state\r\n turn = next_turn\r\n print_state(current_state)\r\n print(\"--------------------\")\r\n","sub_path":"whole.py","file_name":"whole.py","file_ext":"py","file_size_in_byte":9954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152877904","text":"#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: \n@file: 486. 预测赢家.py\n@time: 2020/9/1 10:06\n@desc: \n\"\"\"\nfrom typing import List\n\"\"\"\n给定一个表示分数的非负整数数组。 玩家 1 从数组任意一端拿取一个分数,随后玩家 2 继续从剩余数组任意一端拿取分数,然后玩家 1 拿,…… 。每次一个玩家只能拿取一个分数,分数被拿取之后不再可取。直到没有剩余分数可取时游戏结束。最终获得分数总和最多的玩家获胜。\n给定一个表示分数的数组,预测玩家1是否会成为赢家。你可以假设每个玩家的玩法都会使他的分数最大化。\n\n示例 1:\n\n输入:[1, 5, 2]\n输出:False\n解释:一开始,玩家1可以从1和2中进行选择。\n如果他选择 2(或者 1 ),那么玩家 2 可以从 1(或者 2 )和 5 中进行选择。如果玩家 2 选择了 5 ,那么玩家 1 则只剩下 1(或��� 2 )可选。\n所以,玩家 1 的最终分数为 1 + 2 = 3,而玩家 2 为 5 。\n因此,玩家 1 永远不会成为赢家,返回 False 。\n示例 2:\n\n输入:[1, 5, 233, 7]\n输出:True\n解释:玩家 1 一开始选择 1 。然后玩家 2 必须从 5 和 7 中进行选择。无论玩家 2 选择了哪个,玩家 1 都可以选择 233 。\n 最终,玩家 1(234 分)比玩家 2(12 分)获得更多的分数,所以返回 True,表示玩家 1 可以成为赢家。\n \n\n提示:\n1 <= 给定的数组长度 <= 20.\n数组里所有分数都为非负数且不会大于 10000000 。\n如果最终两个玩家的分数相等,那么玩家 1 仍为赢家。\n\"\"\"\n\"\"\"!\n相对分数 说成 净胜分 ,语义会更强一些。\n甲乙比赛,甲先手面对区间[i...j]时,dp[i][j]表示甲对乙的净胜分。\n最终求的就是,甲先手面对区间[0...n-1]时,甲对乙的净胜分dp[0][n-1]是否>=0。\n甲先手面对区间[i...j]时,\n如果甲拿nums[i],那么变成乙先手面对区间[i+1...j],这段区间内乙对甲的净胜分为dp[i+1][j];那么甲对乙的净胜分就应该是nums[i] - dp[i+1][j]。\n如果甲拿nums[j],同理可得甲对乙的净胜分为是nums[j] - dp[i][j-1]。\n以上两种情况二者取大即可。\n!\"\"\"\nclass Solution:\n def PredictTheWinner(self, nums: List[int]) -> bool:\n length = len(nums)\n dp = [[0]*length]*length\n for i in range(length):\n dp[i][i] = nums[i]\n for i in range(length - 2, -1, -1):\n for j in range(i+1, length):\n dp[i][j] = max((nums[i]-dp[i+1][j]), (nums[j] - dp[i][j-1]))\n return dp[0][length-1] >= 0\n # print(dp)\n\na = Solution().PredictTheWinner([1, 5, 233, 7])\nprint(a)","sub_path":"all_topic/medium/486. 预测赢家.py","file_name":"486. 预测赢家.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"288112976","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom .models import comment, content\nfrom django.utils import timezone\n\nsourceUrl = ''\n\n\n# Create your views here.\ndef index(request):\n shj = content.objects.all()\n message = comment.objects.all()\n global sourceUrl\n sourceUrl = request.path\n return render(request, 'index.html', {'shj': shj, 'message': message})\n\n\ndef submitComment(request):\n text = request.POST['text']\n if not text.strip():\n return HttpResponse('留言内容不可为空')\n timeNow = timezone.now().strftime('%Y-%m-%d %H:%M')\n submit = {'content': text, 'time': timeNow}\n print(submit)\n comment.objects.create(**submit)\n return HttpResponse('留言成功')\n","sub_path":"shjGuestbook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"311118266","text":"import random\nimport itertools\nimport pytest\nfrom unittest.mock import MagicMock, PropertyMock, call\n\nimport github\n\nimport repobee_plug as plug\n\nimport _repobee.ext.defaults.github as github_plugin\nfrom _repobee import exception\nfrom _repobee.ext.defaults.github import REQUIRED_TOKEN_SCOPES\n\nimport constants\nimport functions\n\nORG_NAME = constants.ORG_NAME\nISSUE = constants.ISSUE\nTOKEN = constants.TOKEN\n\n\nclass GithubException(Exception):\n def __init__(self, msg, status):\n super().__init__(msg)\n self.msg = msg\n self.status = status\n\n\nNOT_FOUND_EXCEPTION = GithubException(msg=None, status=404)\nVALIDATION_ERROR = GithubException(msg=None, status=422)\nSERVER_ERROR = GithubException(msg=None, status=500)\n\nUSER = constants.USER\nNOT_OWNER = \"notanowner\"\nORG_NAME = constants.ORG_NAME\nBASE_URL = constants.BASE_URL\nISSUE = constants.ISSUE\nTOKEN = constants.TOKEN\n\ngenerate_repo_url = functions.generate_repo_url\nrandom_date = functions.random_date\nto_magic_mock_issue = functions.to_magic_mock_issue\nfrom_magic_mock_issue = functions.from_magic_mock_issue\n\nUser = constants.User\n\nCLOSE_ISSUE = plug.Issue(\n \"close this issue\", \"This is a body\", 3, random_date(), \"slarse\"\n)\nDONT_CLOSE_ISSUE = plug.Issue(\n \"Don't close this issue\", \"Another body\", 4, random_date(), \"glassey\"\n)\nOPEN_ISSUES = [CLOSE_ISSUE, DONT_CLOSE_ISSUE]\n\nCLOSED_ISSUES = [\n plug.Issue(\n \"This is a closed issue\",\n \"With an uninteresting body\",\n 1,\n random_date(),\n \"tmore\",\n ),\n plug.Issue(\n \"Yet another closed issue\",\n \"Even less interesting body\",\n 2,\n random_date(),\n \"viklu\",\n ),\n]\n\n\ndef raise_404(*args, **kwargs):\n raise GithubException(\"Couldn't find something\", 404)\n\n\ndef raise_422(*args, **kwargs):\n raise GithubException(\"Already exists\", 422)\n\n\ndef raise_401(*args, **kwargs):\n raise GithubException(\"Access denied\", 401)\n\n\n@pytest.fixture\ndef review_student_teams():\n return [\n plug.Team(members=[student])\n for student in (\"ham\", \"spam\", \"bacon\", \"eggs\")\n ]\n\n\n@pytest.fixture\ndef review_teams(review_student_teams):\n master_repo = \"week-1\"\n review_teams = {}\n for i, student_team in enumerate(review_student_teams):\n review_teams[\n plug.generate_review_team_name(student_team, master_repo)\n ] = itertools.chain.from_iterable(\n team.members\n for team in review_student_teams[:i]\n + review_student_teams[i + 1 :]\n )\n return review_teams\n\n\n@pytest.fixture\ndef teams_and_members(review_teams):\n \"\"\"Fixture with a dictionary contain a few teams with member lists.\"\"\"\n return {\n \"one\": [\"first\", \"second\"],\n \"two\": [\"two\"],\n \"last_team\": [str(i) for i in range(10)],\n **review_teams,\n }\n\n\n@pytest.fixture\ndef happy_github(mocker, monkeypatch, teams_and_members):\n \"\"\"mock of github.Github which raises no exceptions and returns the\n correct values.\n \"\"\"\n github_instance = MagicMock()\n github_instance.get_user.side_effect = (\n lambda user: User(login=user)\n if user in [USER, NOT_OWNER]\n else raise_404()\n )\n type(github_instance).oauth_scopes = PropertyMock(\n return_value=REQUIRED_TOKEN_SCOPES\n )\n\n usernames = set(\n itertools.chain(*[members for _, members in teams_and_members.items()])\n )\n\n def get_user(username):\n if username in [*usernames, USER, NOT_OWNER]:\n user = MagicMock(spec=github.NamedUser.NamedUser)\n type(user).login = PropertyMock(return_value=username)\n return user\n else:\n raise_404()\n\n github_instance.get_user.side_effect = get_user\n monkeypatch.setattr(github, \"GithubException\", GithubException)\n mocker.patch(\n \"github.Github\",\n side_effect=lambda login_or_token, base_url: github_instance,\n )\n\n return github_instance\n\n\n@pytest.fixture\ndef organization(happy_github):\n \"\"\"Attaches an Organization mock to github.Github.get_organization, and\n returns the mock.\n \"\"\"\n organization = MagicMock()\n organization.get_members = lambda role: [\n User(login=\"blablabla\"),\n User(login=\"hello\"),\n User(login=USER),\n ]\n type(organization).html_url = PropertyMock(\n return_value=generate_repo_url(\"\", ORG_NAME).rstrip(\"/\")\n )\n happy_github.get_organization.side_effect = (\n lambda org_name: organization if org_name == ORG_NAME else raise_404()\n )\n return organization\n\n\ndef mock_team(name):\n \"\"\"create a mock team that tracks its members.\"\"\"\n team = MagicMock()\n members = set()\n team.get_members.side_effect = lambda: list(members)\n team.add_membership.side_effect = lambda user: members.add(user)\n type(team).name = PropertyMock(return_value=name)\n type(team).id = PropertyMock(return_value=hash(name))\n return team\n\n\n@pytest.fixture\ndef no_teams(organization):\n \"\"\"A fixture that sets up the teams functionality without adding any\n teams.\n \"\"\"\n ids_to_teams = {}\n organization.get_team.side_effect = (\n lambda team_id: ids_to_teams[team_id]\n if team_id in ids_to_teams\n else raise_404()\n )\n organization.get_teams.side_effect = lambda: list(teams_)\n teams_ = []\n\n def create_team(name, permission):\n nonlocal teams_, ids_to_teams\n\n assert permission in [\"push\", \"pull\"]\n if name in [team.name for team in teams_]:\n raise_422()\n\n team = mock_team(name)\n ids_to_teams[team.id] = team\n teams_.append(team)\n return team\n\n organization.create_team.side_effect = create_team\n return teams_\n\n\n@pytest.fixture\ndef teams(organization, no_teams, teams_and_members):\n \"\"\"A fixture that returns a list of teams, which are all returned by the\n github.Organization.Organization.get_teams function.\"\"\"\n team_names = teams_and_members.keys()\n for name in team_names:\n organization.create_team(name, permission=\"push\")\n return no_teams # the list of teams!\n\n\ndef mock_repo(name, description, private, team_id):\n repo = MagicMock()\n type(repo).name = PropertyMock(return_value=name)\n type(repo).description = PropertyMock(\n return_value=\"description of {}\".format(name)\n )\n type(repo).html_url = PropertyMock(\n return_value=generate_repo_url(name, ORG_NAME)\n )\n # repo.get_teams.side_effect = lambda: [team]\n return repo\n\n\n@pytest.fixture\ndef repo_infos(teams_and_members, teams):\n descriptions = [\"A nice repo for {}\".format(team.name) for team in teams]\n repo_names = [\"{}-week-2\".format(team.name) for team in teams]\n return [\n plug.Repo(name, description, True, team.id)\n for name, description, team in zip(repo_names, descriptions, teams)\n ]\n\n\n@pytest.fixture\ndef no_repos(teams_and_members, teams, organization):\n repos_in_org = {}\n\n def get_repo(repo_name):\n if repo_name in repos_in_org:\n return repos_in_org[repo_name]\n raise NOT_FOUND_EXCEPTION\n\n def create_repo(name, description=\"\", private=True, team_id=None):\n nonlocal repos_in_org\n if name in repos_in_org:\n raise VALIDATION_ERROR\n repo = mock_repo(name, description, private, team_id)\n repos_in_org[name] = repo\n return repo\n\n organization.create_repo.side_effect = create_repo\n organization.get_repo.side_effect = get_repo\n organization.get_repos.side_effect = lambda: list(repos_in_org.values())\n\n\n@pytest.fixture\ndef repos(organization, no_repos, repo_infos):\n for ri in repo_infos:\n organization.create_repo(\n ri.name, ri.description, ri.private, ri.team_id\n )\n\n return organization.get_repos()\n\n\n@pytest.fixture\ndef issues(repos):\n \"\"\"Adds two issues to all repos such that Repo.get_issues returns the\n issues. One issue is expected to be closed and has title CLOSE_ISSUE.title\n and is marked with, while the other is expected not to be closed and has\n title DONT_CLOSE_ISSUE.title.\n \"\"\"\n\n def attach_issues(repo):\n # for some reason, putting this inline in the loop caused every single\n # repo to get the SAME mocks returned by the lambda\n open_issue_mocks = [\n to_magic_mock_issue(issue) for issue in OPEN_ISSUES\n ]\n closed_issue_mocks = [\n to_magic_mock_issue(issue) for issue in CLOSED_ISSUES\n ]\n repo.get_issues.side_effect = (\n lambda state: open_issue_mocks\n if state == \"open\"\n else closed_issue_mocks\n )\n return open_issue_mocks + closed_issue_mocks\n\n issues = []\n for repo in repos:\n issues.extend(attach_issues(repo))\n\n return issues\n\n\n@pytest.fixture(scope=\"function\")\ndef api(happy_github, organization, no_teams):\n return github_plugin.GitHubAPI(BASE_URL, TOKEN, ORG_NAME, USER)\n\n\nclass TestInit:\n def test_raises_on_empty_user_arg(self):\n with pytest.raises(TypeError) as exc_info:\n github_plugin.GitHubAPI(BASE_URL, TOKEN, ORG_NAME, \"\")\n\n assert \"argument 'user' must not be empty\" in str(exc_info.value)\n\n @pytest.mark.parametrize(\"url\", [\"https://github.com\", constants.HOST_URL])\n def test_raises_when_url_is_bad(self, url):\n with pytest.raises(plug.PlugError) as exc_info:\n github_plugin.GitHubAPI(url, TOKEN, ORG_NAME, USER)\n\n assert (\n \"invalid base url, should either be https://api.github.com or \"\n \"end with '/api/v3'\" in str(exc_info.value)\n )\n\n @pytest.mark.parametrize(\n \"url\", [\"https://api.github.com\", constants.BASE_URL]\n )\n def test_accepts_valid_urls(self, url):\n api = github_plugin.GitHubAPI(url, TOKEN, ORG_NAME, USER)\n\n assert isinstance(api, plug.API)\n\n\nclass TestEnsureTeamsAndMembers:\n @staticmethod\n def assert_teams_equal(actual_teams, expected_teams):\n \"\"\"Assert that the teams are equal, except for the implementation and\n id.\n \"\"\"\n assert len(actual_teams) == len(expected_teams)\n for actual, expected in zip(\n sorted(actual_teams), sorted(expected_teams)\n ):\n assert (actual.name, sorted(actual.members)) == (\n expected.name,\n sorted(expected.members),\n )\n\n @pytest.fixture\n def team_wrappers(self, teams_and_members):\n \"\"\"Wrap the teams_and_members dictionaries into plug.Team classes.\n\n TODO: Remove this when this test suite is rewritten.\n \"\"\"\n return [\n plug.Team(name=team_name, members=members)\n for team_name, members in teams_and_members.items()\n ]\n\n def test_no_previous_teams(self, api, team_wrappers, no_teams):\n \"\"\"Test that ensure_teams_and_members works as expected when there are\n no previous teams, and all users exist. This is a massive end-to-end\n test of the function with only the lower level API's mocked out.\n \"\"\"\n expected_teams = list(team_wrappers)\n api.ensure_teams_and_members(expected_teams)\n self.assert_teams_equal(api.get_teams(), expected_teams)\n\n def test_all_teams_exist_but_without_members(\n self, api, team_wrappers, teams\n ):\n \"\"\"Test that ensure_teams_and_members works as expected when all of\n the teams already exist, but have no members in them.\n \"\"\"\n expected_teams = list(team_wrappers)\n api.ensure_teams_and_members(expected_teams)\n self.assert_teams_equal(api.get_teams(), expected_teams)\n\n @pytest.mark.parametrize(\n \"unexpected_exc\",\n [\n exception.APIError(\"\", 404),\n exception.APIError(\"\", 400),\n exception.APIError(\"\", 500),\n ],\n )\n def test_raises_on_non_422_exception(\n self, api, organization, team_wrappers, unexpected_exc\n ):\n \"\"\"Should raise if anything but a 422 http error is raised when\n creating the team.\n \"\"\"\n\n def raise_(*args, **kwargs):\n raise unexpected_exc\n\n organization.create_team.side_effect = raise_\n\n with pytest.raises(exception.UnexpectedException):\n api.ensure_teams_and_members(team_wrappers)\n\n def test_running_twice_has_no_ill_effects(\n self, api, no_teams, team_wrappers\n ):\n \"\"\"Tests that add_to_team is not called if all members are already\n in it.\"\"\"\n expected_teams = list(team_wrappers)\n api.ensure_teams_and_members(expected_teams)\n api.ensure_teams_and_members(expected_teams)\n self.assert_teams_equal(api.get_teams(), expected_teams)\n\n\nclass TestCreateRepos:\n def test_creates_correct_repos(self, no_repos, repo_infos, api):\n \"\"\"Assert that org.create_repo is called with the correct arguments.\"\"\"\n # expect (self, repo_info) call args\n expected_calls = [\n call(\n info.name,\n description=info.description,\n private=info.private,\n team_id=info.team_id,\n )\n for info in repo_infos\n ]\n\n api.create_repos(repo_infos)\n\n assert repos\n api.org.create_repo.assert_has_calls(expected_calls)\n\n def test_skips_existing_repos(self, no_repos, repo_infos, api):\n \"\"\"Assert that create_repo is called with all repos even when there are\n exceptions.\n \"\"\"\n expected_calls = [\n call(\n info.name,\n description=info.description,\n private=info.private,\n team_id=info.team_id,\n )\n for info in repo_infos\n ]\n # create one repo in advance\n api.create_repos(repo_infos[:1])\n\n # start test\n api.create_repos(repo_infos)\n\n api.org.create_repo.assert_has_calls(expected_calls)\n\n @pytest.mark.parametrize(\n \"unexpected_exception\",\n (SERVER_ERROR, RuntimeError(), NOT_FOUND_EXCEPTION),\n )\n def test_raises_on_unexpected_error(\n self, no_repos, repo_infos, api, unexpected_exception\n ):\n create_repo_mock = api.org.create_repo\n side_effect = [create_repo_mock] * len(repo_infos)\n side_effect_github_exception = [unexpected_exception] + side_effect[1:]\n\n create_repo_mock.side_effect = side_effect_github_exception\n with pytest.raises(exception.APIError):\n api.create_repos(repo_infos)\n\n def test_returns_all_urls(self, mocker, repos, repo_infos, api):\n \"\"\"Assert that create_repo returns the urls for all repos, even if there\n are validation errors.\n \"\"\"\n expected_urls = [\n api._insert_auth(generate_repo_url(info.name, ORG_NAME))\n for info in repo_infos\n ]\n\n actual_urls = api.create_repos(repo_infos)\n assert actual_urls == expected_urls\n for url in actual_urls:\n assert TOKEN in url\n\n def test_create_repos_without_team_id(self, api):\n \"\"\"If there is no team id specified for the repo, then\n github.Organization.create_repo must be called without the team_id\n argument (because if it is called with team_id=None, there is a crash).\n \"\"\"\n repo = plug.Repo(\n name=\"repo\",\n description=\"Some description\",\n private=True,\n team_id=None, # this is the important part!\n )\n\n api.create_repos([repo])\n\n api._org.create_repo.assert_called_once_with(\n repo.name, description=repo.description, private=repo.private\n )\n\n\nclass TestGetRepoUrls:\n \"\"\"Tests for get_repo_urls.\"\"\"\n\n def test_with_token_and_user(self, repos, api):\n repo_names = [repo.name for repo in repos]\n api._user = USER\n expected_urls = [api._insert_auth(repo.html_url) for repo in repos]\n\n urls = api.get_repo_urls(repo_names)\n\n assert sorted(urls) == sorted(expected_urls)\n for url in urls:\n assert \"{}:{}\".format(USER, TOKEN) in url\n\n def test_with_students(self, repos, api):\n \"\"\"Test that supplying students causes student repo names to be\n generated as the Cartesian product of the supplied repo names and the\n students.\n \"\"\"\n students = list(constants.STUDENTS)\n master_repo_names = [repo.name for repo in repos]\n expected_repo_names = plug.generate_repo_names(\n students, master_repo_names\n )\n # assume works correctly when called with just repo names\n expected_urls = api.get_repo_urls(expected_repo_names)\n\n actual_urls = api.get_repo_urls(master_repo_names, teams=students)\n\n assert len(actual_urls) == len(students) * len(master_repo_names)\n assert sorted(expected_urls) == sorted(actual_urls)\n\n\nclass TestOpenIssue:\n \"\"\"Tests for open_issue.\"\"\"\n\n def test_on_existing_repos(self, api, repos, issues):\n repo_names = [repo.name for repo in repos]\n\n api.open_issue(ISSUE.title, ISSUE.body, repo_names)\n\n for repo in repos:\n repo.create_issue.assert_called_once_with(\n ISSUE.title, body=ISSUE.body\n )\n\n def test_on_some_non_existing_repos(self, api, repos):\n \"\"\"Assert that repos that do not exist are simply skipped.\"\"\"\n\n repo_names = [\n \"repo-that-does-not-exist-{}\".format(i) for i in range(10)\n ] + [repo.name for repo in repos]\n\n api.open_issue(ISSUE.title, ISSUE.body, repo_names)\n\n for repo in repos:\n repo.create_issue.assert_called_once_with(\n ISSUE.title, body=ISSUE.body\n )\n\n def test_no_crash_when_no_repos_are_found(self, api, repos, happy_github):\n repo_names = [\n \"repo-that-does-not-exist-{}\".format(i) for i in range(10)\n ]\n\n api.open_issue(ISSUE.title, ISSUE.body, repo_names)\n\n\nclass TestCloseIssue:\n \"\"\"Tests for close_issue.\"\"\"\n\n def test_closes_correct_issues(self, repos, issues, api):\n \"\"\"Given repos with existing issues, assert that the corect issues are\n closed.\n \"\"\"\n repo_names = [repo.name for repo in repos]\n expected_closed = [\n issue for issue in issues if issue.title == CLOSE_ISSUE.title\n ]\n expected_not_closed = [\n issue for issue in issues if issue.title == DONT_CLOSE_ISSUE.title\n ]\n assert expected_closed, \"pre-test assert\"\n assert expected_not_closed, \"pre-test assert\"\n regex = \"^{}$\".format(CLOSE_ISSUE.title)\n\n api.close_issue(regex, repo_names)\n\n for issue in expected_not_closed:\n assert not issue.edit.called\n for issue in expected_closed:\n # issue.edit.assert_called_once_with(state='closed')\n assert issue.edit.called\n\n def test_no_crash_if_no_repos_found(self, api, repos, issues):\n \"\"\"Tests that there is no crash if no repos are found.\"\"\"\n repo_names = [\n \"repo-that-does-not-exist-{}\".format(i) for i in range(10)\n ]\n\n regex = \"^{}$\".format(CLOSE_ISSUE.title)\n api.close_issue(regex, repo_names)\n\n for issue in issues:\n assert not issue.edit.called\n\n def test_no_crash_if_no_issues_found(self, api, repos, issues):\n \"\"\"Tests that there is no crash if repos are found, but no matching\n issues.\n \"\"\"\n repo_names = [repo.name for repo in repos]\n regex = \"^{}$\".format(\"non-matching-regex\")\n\n api.close_issue(regex, repo_names)\n\n for issue in issues:\n assert not issue.edit.called\n\n\nclass TestGetIssues:\n \"\"\"Tests for get_issues.\"\"\"\n\n @staticmethod\n def assert_issues_equal(actual_issues, expected_issues):\n \"\"\"The expected issues don't have the mocked implementation, while the\n actual issues all should have it.\n \"\"\"\n actual_issues, expected_issues = (\n list(actual_issues),\n [\n plug.Issue(\n title, body, number, created_at.isoformat(), author, impl\n )\n for title, body, number, created_at, author, impl in iter(\n expected_issues\n )\n ],\n )\n assert len(actual_issues) == len(expected_issues)\n for act, exp in zip(sorted(actual_issues), sorted(expected_issues)):\n assert act.implementation\n for field_name in plug.Issue._fields:\n if field_name == \"implementation\":\n continue\n assert getattr(act, field_name) == getattr(exp, field_name)\n\n def test_get_all_open_issues(self, repos, issues, api):\n repo_names = [repo.name for repo in repos]\n\n name_issues_pairs = api.get_issues(\n repo_names, state=plug.IssueState.OPEN\n )\n\n found_repos = []\n for repo_name, issue_gen in name_issues_pairs:\n found_repos.append(repo_name)\n\n self.assert_issues_equal(\n actual_issues=issue_gen, expected_issues=OPEN_ISSUES\n )\n\n assert sorted(found_repos) == sorted(repo_names)\n\n def test_get_all_closed_issues(self, repos, issues, api):\n repo_names = [repo.name for repo in repos]\n\n name_issues_pairs = api.get_issues(\n repo_names, state=plug.IssueState.CLOSED\n )\n\n found_repos = []\n for repo_name, issue_gen in name_issues_pairs:\n found_repos.append(repo_name)\n\n self.assert_issues_equal(\n actual_issues=issue_gen, expected_issues=CLOSED_ISSUES\n )\n\n assert sorted(found_repos) == sorted(repo_names)\n\n def test_get_issues_when_one_repo_doesnt_exist(self, repos, issues, api):\n \"\"\"It should just ignore the repo that does not exist (and log the\n error).\"\"\"\n non_existing = \"definitely-non-existing-repo\"\n repo_names = [repo.name for repo in repos] + [non_existing]\n random.shuffle(repo_names)\n\n name_issues_pairs = api.get_issues(\n repo_names, state=plug.IssueState.OPEN\n )\n\n found_repos = []\n for repo_name, issue_gen in name_issues_pairs:\n found_repos.append(repo_name)\n\n self.assert_issues_equal(\n actual_issues=issue_gen, expected_issues=OPEN_ISSUES\n )\n\n assert len(found_repos) + 1 == len(repo_names)\n assert set(found_repos) == set(repo_names) - {non_existing}\n\n def test_get_open_issues_by_regex(self, repos, issues, api):\n \"\"\"Should filter by regex.\"\"\"\n sought_issue = OPEN_ISSUES[1]\n repo_names = [repo.name for repo in repos]\n regex = \"^{}$\".format(sought_issue.title)\n\n name_issues_pairs = api.get_issues(\n repo_names, state=plug.IssueState.OPEN, title_regex=regex\n )\n\n found_repos = []\n for repo_name, issue_gen in name_issues_pairs:\n found_repos.append(repo_name)\n\n self.assert_issues_equal(\n actual_issues=issue_gen, expected_issues=[sought_issue]\n )\n\n assert sorted(found_repos) == sorted(repo_names)\n\n\n@pytest.fixture\ndef team_to_repos(api, no_repos, organization):\n \"\"\"Create a team_to_repos mapping for use in _add_repos_to_teams, anc create\n each team and repo. Return the team_to_repos mapping.\n \"\"\"\n num_teams = 10\n # arrange\n team_names = [\"team-{}\".format(i) for i in range(num_teams)]\n repo_names = [\"some-repo-{}\".format(i) for i in range(num_teams)]\n for name in team_names:\n organization.create_team(name, permission=\"pull\")\n for name in repo_names:\n organization.create_repo(name)\n team_to_repos = {\n team_name: [repo_name]\n for team_name, repo_name in zip(team_names, repo_names)\n }\n return team_to_repos\n\n\nclass TestAddReposToReviewTeams:\n def test_with_default_issue(self, team_to_repos, organization, api):\n num_teams = len(team_to_repos)\n default_issue = github_plugin.DEFAULT_REVIEW_ISSUE\n assert num_teams, \"pre-test assert\"\n team_repo_tuples = [\n (team, *repos) for team, repos in team_to_repos.items()\n ]\n assert len(team_repo_tuples) == num_teams, \"pre-test assert\"\n\n api.add_repos_to_review_teams(team_to_repos, None)\n\n for team_name, repo_name in team_repo_tuples:\n team = organization.get_team(\n hash(team_name)\n ) # hash(team_name) is the id, see the fixture!\n repo = organization.get_repo(repo_name)\n assert team.add_to_repos.called_once_with(repo)\n\n repo.create_issue.assert_called_once_with(\n default_issue.title,\n body=default_issue.body,\n assignees=team.get_members(),\n )\n\n\nclass TestAddReposToTeams:\n def test_happy_path(self, team_to_repos, api):\n num_teams = len(team_to_repos)\n expected_tups = sorted(team_to_repos.items())\n\n # act\n result = list(api._add_repos_to_teams(team_to_repos))\n result.sort(key=lambda tup: tup[0].name)\n\n # assert\n assert len(result) == len(team_to_repos) == num_teams\n for res_tup, expected_tup in zip(result, expected_tups):\n expected_team_name, expected_repo_names = expected_tup\n expected_repo_name = expected_repo_names[0]\n\n actual_team, actual_repo = res_tup\n\n actual_team.add_to_repos.assert_called_once_with(actual_repo)\n assert actual_team.name == expected_team_name\n assert actual_repo.name == expected_repo_name\n\n\nclass TestDeleteTeams:\n def test_delete_non_existing_teams_does_not_crash(self, no_teams, api):\n team_names = [\"week-{}\".format(i) for i in range(10)]\n\n api.delete_teams(team_names)\n\n def test_delete_existing_teams(self, teams, api):\n team_names = [team.name for team in teams]\n\n api.delete_teams(team_names)\n\n assert all(map(lambda t: t.delete.called, teams))\n\n\n@pytest.fixture(params=[\"get_user\", \"get_organization\"])\ndef github_bad_info(request, api, happy_github):\n \"\"\"Fixture with a github instance that raises GithubException 404 when\n use the user, base_url and org_name arguments to .\n \"\"\"\n getattr(happy_github, request.param).side_effect = raise_404\n return happy_github\n\n\nclass TestVerifySettings:\n \"\"\"Tests for the verify_settings function.\"\"\"\n\n def test_happy_path(self, happy_github, organization, api):\n \"\"\"Tests that no exceptions are raised when all info is correct.\"\"\"\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, TOKEN\n )\n\n def test_empty_token_raises_bad_credentials(\n self, happy_github, monkeypatch, api\n ):\n with pytest.raises(exception.BadCredentials) as exc_info:\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, \"\"\n )\n\n assert \"token is empty\" in str(exc_info.value)\n\n def test_incorrect_info_raises_not_found_error(self, github_bad_info, api):\n with pytest.raises(exception.NotFoundError):\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, TOKEN\n )\n\n def test_bad_token_scope_raises(self, happy_github, api):\n type(happy_github).oauth_scopes = PropertyMock(return_value=[\"repo\"])\n\n with pytest.raises(exception.BadCredentials) as exc_info:\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, TOKEN\n )\n assert \"missing one or more access token scopes\" in str(exc_info.value)\n\n def test_not_owner_raises(self, happy_github, organization, api):\n with pytest.raises(exception.BadCredentials) as exc_info:\n github_plugin.GitHubAPI.verify_settings(\n NOT_OWNER, ORG_NAME, BASE_URL, TOKEN\n )\n\n assert \"user {} is not an owner\".format(NOT_OWNER) in str(\n exc_info.value\n )\n\n def test_raises_unexpected_exception_on_unexpected_status(\n self, happy_github, api\n ):\n happy_github.get_user.side_effect = SERVER_ERROR\n with pytest.raises(exception.UnexpectedException):\n api.verify_settings(USER, ORG_NAME, BASE_URL, TOKEN)\n\n def test_none_user_raises(self, happy_github, organization, api):\n \"\"\"If NamedUser.login is None, there should be an exception. Happens if\n you provide a URL that points to a GitHub instance, but not to the API\n endpoint.\n \"\"\"\n happy_github.get_user.side_effect = lambda _: User(login=None)\n\n with pytest.raises(exception.UnexpectedException) as exc_info:\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, TOKEN\n )\n\n assert \"Possible reasons: bad api url\" in str(exc_info.value)\n\n def test_mismatching_user_login_raises(\n self, happy_github, organization, api\n ):\n \"\"\"I'm not sure if this can happen, but since the None-user thing\n happened, better safe than sorry.\n \"\"\"\n wrong_username = USER + \"other\"\n happy_github.get_user.side_effect = lambda username: User(\n username + \"other\"\n )\n expected_messages = [\n \"Specified login is {}, but the fetched user's login is {}\".format(\n USER, wrong_username\n ),\n \"Possible reasons: unknown\",\n ]\n\n with pytest.raises(exception.UnexpectedException) as exc_info:\n github_plugin.GitHubAPI.verify_settings(\n USER, ORG_NAME, BASE_URL, TOKEN\n )\n\n for msg in expected_messages:\n assert msg in str(exc_info.value)\n\n\nclass TestGetPeerReviewProgress:\n \"\"\"Tests for get_peer_review_progress\n\n TODO: These tests need to be expanded. A lot.\n \"\"\"\n\n def test_nothing_returns(self, review_student_teams, review_teams, api):\n \"\"\"Test calling the function when none of the functions return\n iterables.\n \"\"\"\n review_team_names = list(review_teams.keys())\n api.get_review_progress(\n review_team_names, review_student_teams, \"peer\"\n )\n\n def test_with_review_teams_but_no_repos(\n self, review_student_teams, review_teams, teams, api\n ):\n review_team_names = list(review_teams.keys())\n api.get_review_progress(\n review_team_names, review_student_teams, \"peer\"\n )\n","sub_path":"tests/unit_tests/repobee/plugin_tests/test_github.py","file_name":"test_github.py","file_ext":"py","file_size_in_byte":30568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"527854747","text":"\"\"\"\nfunctions for handling vaologin configuration. \n\nvaologin can load various default settings from a configuration file. This\nfile can be edited to tweek the defaults, but it is usually not necessary.\n\"\"\"\nimport os, sys, re, ConfigParser\nfrom vaologin import ConfigurationError\n\n# %INSTALLDIR% is expected to be replaced by the actual installation\n# directory during a \"make\" operation\ndef_product_home = \"$INSTALLDIR\"\ndef_sys_config_dir = \"$SYSCONFIGFILENAME\"\n\ndef_unix_sys_config_dir = \"/etc/httpd/conf\"\nif def_sys_config_dir.startswith('$'):\n def_sys_config_dir = def_unix_sys_config_dir\nconfig_filename = \"vaologin.cfg\"\nvaologin_home_envvar = \"VAOLOGIN_HOME\"\nvaologin_conf_envvar = \"VAOLOGIN_CONF\"\n\ndef getConfig(conffile=None, defaults=None, fail=False, \n deffilename=config_filename):\n \"\"\"\n return configuration parameters as a dictionary. See getConfigLocations()\n for the ordered list of default locations for the configuration file.\n @param conffile the full path to the configuration file to use. If \n None, the parameters will be loaded from a default \n location.\n @param defaults a dictionary of default values to use if for any \n parameters not specified in the configuration file.\n @param fail if true, a ConfigurationError will be raised if no \n configuration file is found. The message will list \n all of the locations it looked.\n @param deffilename the path-less filename for the config file to assume\n when locating a default configuration file (i.e.\n when conffile=None).\n \"\"\"\n out = {}\n if defaults: out = defaults.copy()\n\n if not conffile:\n conffile = locateConfigFile(fail, deffilename)\n if not conffile or not os.path.exists(conffile): \n if fail: \n raise ConfigurationError(\"configuration file not found: \"+conffile)\n return out\n\n # this implementation assumes the .ini format \n cfg = ConfigParser.SafeConfigParser()\n cfg.read(conffile)\n secs = cfg.sections()\n for sec in secs:\n for namevalue in cfg.items(sec):\n out[\".\".join((sec, namevalue[0]))] = namevalue[1]\n\n return out\n\ndef locateConfigFile(fail=False, filename=config_filename):\n \"\"\"\n look for a configuration file on the system and return its path. This\n will return the first path returned by getConfigLocations() that is \n found to exist, or None if none of these exist.\n @param fail if true, a ConfigurationError will be raised if no \n configuration file is found. The message will list \n all of the locations it looked.\n @param filename the path-less filename for the config file. \n \"\"\"\n locs = getConfigLocations(filename=filename)\n for path in locs:\n if os.path.exists(path):\n return path\n if fail:\n raise ConfigurationError(\"Failed to find configuration file in any of these locations:\\n \" + \"\\n \".join(locs))\n return None\n \ndef getConfigLocations(filename=config_filename):\n \"\"\"\n return a list of file paths where we might find a configuration file, \n ordered from most prefered to least prefered. \n @param filename the path-less filename for the config file. \n \"\"\"\n out = []\n\n # Loading out in order of preference.\n # $VAOLOGIN_CONF\n #\n if os.environ.has_key(vaologin_conf_envvar):\n out.append(os.environ[vaologin_conf_envvar]);\n\n # location relative to $VAOLOGIN_HOME, script path\n #\n if os.environ.has_key(vaologin_home_envvar):\n home = os.environ[vaologin_home_envvar];\n else:\n home = def_product_home\n if home.startswith('%') or not os.path.exists(home): \n # % means that \"make\" or \"make install\" has not been done\n # look relative to the script path assuming the script is in \n # a \"bin\" or \"cgi-bin\" sub-directory relative to home \n home = os.path.dirname(os.path.dirname(sys.argv[0]))\n if not home:\n home = \".\"\n out.append(os.path.join(home, \"conf\", filename))\n\n # directly in the current directory\n #\n out.append(filename)\n\n # the default system path\n #\n out.append(os.path.join(def_sys_config_dir, filename))\n\n return out\n\n_delimre = re.compile(r\"\\s*[,\\s]\\s*\")\ndef splitList(cfgval):\n \"\"\"\n split a configuration parameter value assuming it is a comma-delimited\n list\n @param cfgval the configuration parameter value to split\n @return list the individual items as a list\n \"\"\"\n if cfgval is None:\n return []\n return _delimre.split(cfgval.strip())\n\ndef mergeIntoList(cfglist, newitems):\n \"\"\"\n merge one or more values into a comma-delimited list. Unless it already\n appears in the given comma-delimited list, each new value will be appended.\n @param cfglist a configuration parameter value assumed to be a \n comma-separated list\n @param newitems the item or items to merge into cfglist. This can be a \n python scalar or a python list.\n @return str a new comma-delimited list \n \"\"\"\n if newitems is None: \n newitems = []\n if not isinstance(newitems, list):\n newitems = [newitems]\n if not cfglist and len(newitems) > 0:\n cfglist = newitems.pop(0)\n\n vals = splitList(cfglist)\n for item in newitems:\n if item not in vals:\n cfglist += \", %s\" % str(item)\n\n return cfglist\n","sub_path":"usvao/VAO/software/security/vaologin/branches/reorg/src/main/python/vaologin/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"184405095","text":"#!/usr/bin/env vroom\nfrom vroom import *\n\ndata = open('data/earthquakes-2010.dat').readlines()[2:]\n\nvertices = []\nlabels = []\ncolors = []\ncenter = []\n\nfor row in data:\n elems = row.split()\n epicenter = map(float, elems[2:5]) \n vertices.append(epicenter)\n\n magnitude = float(elems[5])\n if magnitude > 3.5:\n colors.append([1.0, 0.0, 0.0, 0.4])\n else:\n colors.append([0.0, 1.0, 0.0, 0.4])\n\n if magnitude > 4.0:\n labels.append(('{:.1f}'.format(magnitude), epicenter))\n\ndef gl_init():\n global points\n points = PointCloud(vertices, colors)\n points.sprite('share/particle.bmp')\n global center\n center = [-1.0*x for x in points.center()]\n\n textFont('share/fonts/DroidSans.ttf')\n\ndef draw():\n lighting(False)\n pushMatrix()\n translate(center)\n points.draw()\n\n for (txt, pos) in labels:\n text(txt, pos[0], pos[1], pos[2])\n\n popMatrix()\n","sub_path":"apps/quakes.py","file_name":"quakes.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"227862729","text":"import datetime\nfrom astropy.io import fits\nimport numpy as np\nfrom shapely.geometry import Polygon\nimport h5py\nimport os\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport h5axeconfig\n\nfrom pylinear import grism\nfrom pylinear.utilities import gzip\nfrom . import methods,residuals,mcmcunc\nfrom .matrix import Matrix\nfrom . import group\nfrom ..tabulation import tabulate\nfrom .fluxunit import FLUXUNIT,FLUXSCALE\n\n\n\ndef getInitialGuess(mat,sources,grisms,extconf,conf):\n print('[info]Getting initial guesses')\n\n x0=np.zeros(mat.shape[1])\n segids=mat.segids\n\n for j,(segid,src) in enumerate(sources):\n try:\n index=segids.index(segid)\n except ValueError:\n index=None\n print(\"[warn]{} will not be extracted.\".format(segid))\n \n if index is not None:\n s,g1=mat.ri[index]\n if s ==segid:\n g2=mat.lam[g1]\n init=conf['initialize'].lower()\n if init=='direct':\n w,f,unc=src.directExtraction(grisms,extconf,conf)\n src.sed.flam=f\n src.sed.lamb=w\n elif init=='broadband':\n pass\n else:\n raise NotImplementedError(\"no valid initialization scheme\")\n \n x0[g1]=src.sed.interpolate(src.waves[g2])/FLUXSCALE\n \n else:\n raise IndexError(\"error in building initial guess\")\n\n return x0\n \ndef extractSources(conf,sources,grisms,extconf,mskconf,grismFF,grpid,\\\n h5g,h5s,pdf):\n\n # build the matrix and check validity\n mat=Matrix(conf,grisms,sources,extconf,mskconf,grismFF)\n if not hasattr(mat,'A'):\n print(\"[warn]Invalid matrix. Ignoring grpid: {}.\".format(grpid))\n return\n \n # get initial guess\n x0=getInitialGuess(mat,sources,grisms,extconf,conf)\n \n # type of extraction\n method=conf['mode'].lower()\n if method == 'grid':\n result=methods.gridSearch(conf['logdamp'],mat,x0)\n elif method == 'golden':\n result=methods.goldenSearch(conf['logdamp'],mat,x0)\n elif method == 'fixed':\n result=methods.fixedDamping(conf['logdamp'],mat,x0)\n else:\n raise NotImplementedError(\"Extraction mode is invalid.\")\n\n # plot\n mat.lcurve.plot(pdf,grpid=grpid)\n\n # write the group data\n logr1,logx,logl=mat.lcurve.values()\n c=mat.lcurve.curvature\n dtype=[('logdamp',np.float32),('logr1norm',np.float32),\\\n ('logxnorm',np.float32),('curv',np.float32)]\n data=np.array(list(zip(logl,logr1,logx,c)),dtype=dtype)\n dgrp=h5g.create_dataset(str(grpid),data=data,compression='gzip')\n dgrp.attrs['istop']=np.uint8(result.istop)\n dgrp.attrs['itn']=np.uint32(result.itn)\n dgrp.attrs['r1norm']=np.float32(result.r1norm)\n dgrp.attrs['r2norm']=np.float32(result.r2norm)\n dgrp.attrs['anorm']=np.float32(result.anorm)\n dgrp.attrs['acond']=np.float32(result.acond)\n dgrp.attrs['xnorm']=np.float32(result.xnorm)\n dgrp.attrs['arnorm']=np.float32(result.arnorm)\n dgrp.attrs['damping']=np.float32(result.damp)\n dgrp.attrs['nmatrix']=np.uint64(len(mat))\n #nmat=mat.shape[0]*mat.shape[1]\n #dgrp.attrs['density']=np.float32(len(mat))/np.float32(nmat)\n dgrp.attrs['density']=np.float32(len(mat))/np.float32(mat.count)\n dgrp.attrs['time']=np.float32(result.time)\n dgrp.attrs['nsrc']=np.uint16(len(sources))\n dgrp.attrs['npix']=np.uint32(mat.shape[0])\n dgrp.attrs['nlam']=np.uint32(mat.shape[1])\n dgrp.attrs['frob']=np.float32(mat.norm)\n dgrp.attrs['ngrism']=np.uint16(len(grisms))\n \n # update with MCMC uncertainties\n result=mcmcunc.mcmcUncertainties(conf['mcmcunc'],mat,result)\n \n # package the outputs\n mcmcconf=conf['mcmcunc'] # something for easier access later \n dtype=[('lam',np.float32),('flam',np.float32),\\\n ('flo',np.float32),('fhi',np.float32)]\n for segid,src in sources:\n lam=src.waves\n flam=np.full_like(lam,np.nan)\n flo=np.full_like(lam,np.nan)\n fhi=np.full_like(lam,np.nan)\n \n \n if segid in mat.segids:\n index=mat.segids.index(segid)\n s,g1=mat.ri[index]\n if (s==segid) and (len(g1)!=0):\n g2=mat.lam[g1]\n flam[g2]=result.x[g1]\n flo[g2]=result.lo[g1]\n fhi[g2]=result.hi[g1]\n data=np.array(list(zip(lam,flam,flo,fhi)),dtype=dtype)\n dset=h5s.create_dataset(str(segid),data=data,compression='gzip')\n\n dset.attrs['group']=np.uint16(grpid)\n dset.attrs['RA']=np.float64(src.adc[0])\n dset.attrs['Dec']=np.float64(src.adc[1])\n dset.attrs['x']=np.float32(src.xyc[0]-src.ltv[0])\n dset.attrs['y']=np.float32(src.xyc[1]-src.ltv[1])\n dset.attrs['npix']=np.uint32(src.npix)\n dset.attrs['area']=np.float32(src.area)\n dset.attrs['total']=np.float32(src.total)\n dset.attrs['mag']=np.float32(src.mag)\n dset.attrs['lamb0']=np.float32(src.lamb0)\n dset.attrs['lamb1']=np.float32(src.lamb1)\n dset.attrs['dlamb']=np.float32(src.dlamb)\n dset.attrs['nlamb']=np.uint16(len(lam))\n dset.attrs['MCMCUNC']=np.bool(mcmcconf['perform'])\n if mcmcconf['perform']:\n dset.attrs['nwalkers']=np.uint16(mcmcconf['nwalkers'])\n dset.attrs['nstep']=np.uint32(mcmcconf['nstep'])\n dset.attrs['burn']=np.float32(mcmcconf['burn'])\n \n\ndef h5yaml(h5,conf):\n for k,v in conf.items():\n if isinstance(v,dict):\n h5g=h5.create_group(k)\n h5yaml(h5g,v)\n elif isinstance(v,str):\n h5.attrs[k]=np.string_(v)\n elif v is None:\n h5.attrs[k]=np.string_(\"\")\n else:\n h5.attrs[k]=v\n \n\ndef extract(conf,sources):\n if not conf['perform']:\n return\n \n # just some stuff for short-hand\n calconf=conf['calib']\n conffile=calconf['h5conf']\n\n # read grism images\n grisms=grism.Data(conf['imglst'],'img',conffile)\n \n # get the grism config data\n extconf=h5axeconfig.Camera(conffile,grisms.grism,beams=conf['beam'])\n mskconf=h5axeconfig.Camera(conffile,grisms.grism,beams=conf['mask'])\n grismFF=h5axeconfig.FlatField(calconf['h5flat'],grisms.grism)\n\n \n # make the tables, if need-be\n tabulate(conf['tables'],grisms,sources,extconf,'odt')\n #tabulate(conf['tables'],grisms,sources,mskconf,'omt')\n\n # set extraction values for each source\n sources.setExtractionParameters(conf,extconf)\n \n print('\\n\\n')\n print('[debug]make beam masks here!')\n print('\\n\\n')\n \n # output file names\n h5file='{}.h5'.format(conf['outroot'])\n pdffile='{}.pdf'.format(conf['outroot'])\n \n with h5py.File(h5file,'w') as h5,PdfPages(pdffile) as pdf:\n now=datetime.datetime.now()\n h5.attrs['date']=np.string_(now.strftime(\"%Y-%m-%d\"))\n h5.attrs['time']=np.string_(now.strftime(\"%H:%M:%S\"))\n h5.attrs['nimage']=np.uint16(len(grisms))\n h5.attrs['nsource']=np.uint16(len(sources))\n h5.attrs['grism']=np.string_(grisms.grism)\n h5.attrs['fluxscale']=np.float32(FLUXSCALE)\n h5.attrs['fluxunit']=np.string_(FLUXUNIT)\n h5.attrs['confpath']=np.string_(os.path.abspath(conf.conffile))\n h5.attrs['conffile']=np.string_(os.path.basename(conf.conffile))\n\n # copy in the config\n h5y=h5.create_group(\"CONFIG\")\n h5yaml(h5y,conf)\n\n # create groups for the data\n h5g=h5.create_group('GROUPS')\n h5s=h5.create_group('SPECTRA')\n h5s.attrs['nsource']=np.uint32(len(sources))\n\n \n # extract the sources based on a grouping choice\n if conf['group']:\n # do the grouping!\n groups=group.makeGroups(conf,grisms,sources,extconf)\n ngrp=len(groups)\n for grpid,grp in enumerate(groups):\n theseSources=sources.select(grp)\n extractSources(conf,theseSources,grisms,extconf,mskconf,\\\n grismFF,grpid,h5g,h5s,pdf)\n else:\n grpid=0\n ngrp=1\n extractSources(conf,sources,grisms,extconf,mskconf,grismFF,\\\n grpid,h5g,h5s,pdf)\n\n # put in some data for the groups\n h5g.attrs['ngroup']=np.uint16(ngrp)\n\n if conf['residuals']['perform']:\n print('[debug]Must code up the new residuals plan.')\n\n\n\n # update the residuals\n #residuals.computeResiduals(conf['residuals'],grisms,extconf,mat,results)\n \n","sub_path":"pylinear/modules/extraction/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636052304","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Sun Feb 24 23:07:45 2019\n# @Author : JRP - Ruipeng Jia\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n##################################################################\n## dataset: fmri\nfmri = sns.load_dataset(\"fmri\")\nprint(type(fmri)) # \n\n##################################################################\n## lineplot, add in 0.9.1, 更多例子见官网\nax = sns.lineplot(x=\"timepoint\", y=\"signal\", data=fmri)\nplt.show()\n\nax = sns.lineplot(x=\"timepoint\", y=\"signal\", hue=\"event\", data=fmri)\nax = sns.lineplot(x=\"timepoint\", y=\"signal\", hue=\"event\", style=\"event\", data=fmri)\nax = sns.lineplot(x=\"timepoint\", y=\"signal\", hue=\"region\", style=\"event\", data=fmri)\nax = sns.lineplot(x=\"timepoint\", y=\"signal\", hue=\"event\", style=\"event\", markers=True, dashes=False, data=fmri)\n\n##################################################################\n## 论文会用到的\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nfigure(num=None, figsize=(18, 6), dpi=80, facecolor='w', edgecolor='k')\n\n\nplt.close(\"all\") # close other windows\n# plt.figure(figsize=(200, 100))\nsns.set(font_scale=1.3) # must before set_style\nsns.set_style(\"whitegrid\") # useful!!!\n# index = pd.date_range(\"1 1 2000\", periods=100, freq=\"m\", name=\"date\"); print(index) # range from date; 好厉害啊\nindex = pd.Series(np.arange(100)); print(index.shape)\ndata = np.random.randn(100, 4).cumsum(axis=0); print(data.shape) # (100, 4)\nwide_df = pd.DataFrame(data, index, [\"a\", \"b\", \"c\", \"d\"])\npalette = sns.color_palette([\"#00ccff\", \"#cc3300\", \"#ff9900\", \"#99ffcc\"]) # fix the color\nax = sns.lineplot(data=wide_df, palette=palette)\nax.set(xlabel=r\"x $\\alpha$\", ylabel=\"y\")\n# plt.set_size_inches(18.5, 10.5)\nplt.show()\n\n## save\nplt.rcParams['savefig.dpi'] = 300 # 图片像素\nplt.rcParams['figure.dpi'] = 300 # 分辨率\nplt.rcParams[\"figure.figsize\"] = [27,9]\n# 默认的像素: [6.0,4.0], 分辨率为 100, 图片尺寸为 600&400\n# 指定 dpi=200, 图片尺寸为 1200*800\n# 指定 dpi=300, 图片尺寸为 1800*1200\nplt.savefig('tmp.jpg')\n## 论文注意点:\n## 1. 坐标轴说明\n## 2. 图示颜色默认会随机变化, 多幅图会不一致\n","sub_path":"bin/template/src/jptseaborn/l9_dataset-fmri_lineplot.py","file_name":"l9_dataset-fmri_lineplot.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624083348","text":"'''\nrequest handler class\n'''\nfrom http.server import BaseHTTPRequestHandler\nfrom os.path import isfile\nimport re\nimport time\nfrom streams import test_streamer\nfrom auth import usr_auth\nfrom fserver import deliver\nfrom allow import is_allowed\nfrom datetime import datetime as dt\nfrom IPC import IPC_client\n\nhome=\"common/login.html\"\n\nusrs=usr_auth({'admin':{'pwd':'1234','dat':'admin'},\n 'dood':{'pwd':'wat','dat':'user'}})\n\nevt_streams=['common/test.evt']\nts=test_streamer(refresh_rate=1000)\nts.start()\nevt_runners=[ts]\n\ndef msg_format(prefix,msg,width=80):\n pfx=\"[{}] \".format(prefix)+\"{}\\n\"\n spc=(len(pfx)-3)*\" \"+\"{}\\n\"\n ret=pfx.format(dt.now())\n lines=msg.split(\"\\n\")\n eff_w=width-len(pfx)+2\n for l in lines:\n while len(l)>eff_w:\n ret+=spc.format(l[:eff_w])\n l=l[eff_w:]\n ret+=spc.format(l)\n return ret[:-1]\n\ntry:\n ipc=IPC_client(50001)\n ipc.start()\nexcept:\n print(msg_format(\"FAIL\",\"Could not link IPC client to pipeline.\"))\n\nclass HTTPHandler(BaseHTTPRequestHandler):\n\n def log_info(self,msg):\n print(msg_format(\"INFO\",msg))\n\n def log_warn(self,msg):\n print(msg_format(\"WARN\",msg))\n\n def log_err(self,msg):\n print(msg_format(\"FAIL\",msg))\n\n def log_message(self,a=None,b=None,c=None,d=None):\n pass\n\n def do_GET(self):\n # log and parse request\n url=self.path[1:].split(\"?\")\n path=url[0]\n if path is \"\":path=home\n # log extra request data\n qstring=re.findall(r'(\\w+)=(\\w+)&*',url[1]) if len(url)>1 else None\n qstring={i[0]:i[1] for i in qstring} if qstring else None\n self.log_info(\"Remote: {}\\nPath: {}\\nQstring: {}\".format(self.client_address[0],path,qstring))\n # get permission level for this request\n sid=''\n if qstring:\n if 'sid' in qstring:\n sid=qstring['sid']\n profile=usrs.get_profile(sid)\n # check if request is valid for given permission level\n if not is_allowed(path,profile):\n self.log_warn(\"Request for '{}' not authorized!\".format(path))\n self.send_error(403)\n return\n # log valid request\n elif path.endswith(\"evt\"):\n self.log_info(\"Request allowed for user {}\".format(usrs.get_user(sid)))\n self.log_info(\"Connecting SSE...\")\n if path not in evt_streams:\n self.log_warn(\"Unknown event stream requested.\")\n return\n self.send_response(200)\n self.send_header(\"Content-Type\",\"text/event-stream\")\n self.send_header(\"Expires\",\"-1\")\n self.end_headers()\n h=evt_runners[evt_streams.index(path)]\n self.wfile.write(\"retry: {}\\n\\n\".format(h.refresh_rate).encode(\"UTF-8\"))\n self.log_info(\"Writing '{}' from {}.\".format(h.output_data,h))\n self.wfile.write(\"data: {}\\n\\n\".format(h.output_data).encode(\"UTF-8\"))\n return\n elif isfile(path):\n self.log_info(\"Request allowed for user {}\".format(usrs.get_user(sid)))\n self.log_info(\"Requested file found.\")\n else:\n self.log_info(\"Requested file not found!\")\n self.send_error(404)\n return\n deliver(path,self,True if \"/res/\" in path else False)\n\n def do_POST(self):\n self.log_info(\"Got POST containing '{}'.\".format(self.path))\n content=self.path[1:]\n content={i[0]:i[1] for i in re.findall(r'(\\w+)=(\\w+);*',content)}\n print(content)\n if content['type']=='test':\n global pointless_counter\n pointless_counter+=1;\n self.log_info(\"Pointless counter increased.\")\n self.send_response(303)\n self.send_header(\"Content-Type\",\"text/html\")\n self.send_header(\"Location\",\"/\")\n self.end_headers()\n return\n if content['type']=='auth':\n usrs.login(content,self)\n\n def do_HEAD(self):\n print(\"got HEAD\")\n self.send_response(200)\n self.send_header(\"Content-Type\",\"text/html\")\n self.end_headers()\n","sub_path":"server/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"499160858","text":"from conftest import option\nfrom unit.applications.proto import TestApplicationProto\n\n\nclass TestApplicationPHP(TestApplicationProto):\n application_type = \"php\"\n\n def load(self, script, index='index.php', **kwargs):\n script_path = option.test_dir + '/php/' + script\n appication_type = self.get_appication_type()\n\n if appication_type is None:\n appication_type = self.application_type\n\n self._load_conf(\n {\n \"listeners\": {\"*:7080\": {\"pass\": \"applications/\" + script}},\n \"applications\": {\n script: {\n \"type\": appication_type,\n \"processes\": {\"spare\": 0},\n \"root\": script_path,\n \"working_directory\": script_path,\n \"index\": index,\n }\n },\n },\n **kwargs\n )\n","sub_path":"test/unit/applications/lang/php.py","file_name":"php.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546882056","text":"# !/usr/bin/env python\n\nimport rospy\nfrom flexbe_core import EventState, Logger\n\n'''\nCreated on 21.04.2019\n@author: Quentin Gaillot\n'''\n\n\nclass SetSegmentationRosParam(EventState):\n '''\n Set the Rosparams /process_table_segmentation and /process_object_segmentation to true or false to activate/desactivate the table/object segmentation.\n ># ValueTableSegmentation object The rosparam to set for table segmentation.\n ># ValueObjectSegmentation object The rosparam to set for object segmentation.\n\n <= done The rosparams are set\n '''\n\n def __init__(self, ValueTableSegmentation, ValueObjectSegmentation):\n '''\n Constructor\n '''\n super(SetSegmentationRosParam, self).__init__(outcomes=['done'])\n self.ValueTable = ValueTableSegmentation\n self.ValueObject = ValueObjectSegmentation\n\n def execute(self, userdata):\n '''\n Execute this state\n '''\n if self.ValueTable:\n rospy.set_param(\"/process_table_segmentation\", self.ValueTable)\n else:\n if rospy.has_param(\"/process_table_segmentation\"):\n rospy.delete_param(\"/process_table_segmentation\")\n\n if self.ValueObject:\n rospy.set_param(\"/process_object_segmentation\", self.ValueObject)\n else:\n if rospy.has_param(\"/process_object_segmentation\"):\n rospy.delete_param(\"/process_object_segmentation\")\n\n return \"done\"\n","sub_path":"sara_flexbe_states/src/sara_flexbe_states/SetSegmentationRosParam.py","file_name":"SetSegmentationRosParam.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184935681","text":"from abc import ABC\nfrom datetime import datetime\nfrom typing import Dict, Any\n\nfrom investing_algorithm_framework.core.utils import TimeUnit\nfrom investing_algorithm_framework.core.workers.worker import Worker\n\n\nclass ScheduledWorker(Worker, ABC):\n time_unit: TimeUnit = None\n time_interval: int = None\n\n def start(self, **kwargs: Dict[str, Any]) -> None:\n\n # If the worker has never run, run it\n if self.last_run is None:\n super(ScheduledWorker, self).start()\n\n else:\n # Get the current time\n elapsed_time = datetime.now() - self.last_run\n\n # Second evaluation\n if self.get_time_unit() is TimeUnit.SECOND:\n seconds = elapsed_time.total_seconds()\n\n if seconds > self.get_time_interval():\n super(ScheduledWorker, self).start()\n\n # Minute evaluation\n elif self.get_time_unit() is TimeUnit.MINUTE:\n minutes = divmod(elapsed_time.total_seconds(), 60)\n\n if minutes > self.get_time_interval():\n super(ScheduledWorker, self).start()\n\n # Hour evaluation\n elif self.get_time_unit() is TimeUnit.HOUR:\n hours = divmod(elapsed_time.total_seconds(), 3600)\n\n if hours > self.get_time_interval():\n super(ScheduledWorker, self).start()\n\n def get_time_unit(self) -> TimeUnit:\n assert getattr(self, 'time_unit', None) is not None, (\n \"{} should either include a time_unit attribute, or override the \"\n \"`get_time_unit()` method.\".format(self.__class__.__name__)\n )\n\n return getattr(self, 'time_unit')\n\n def get_time_interval(self) -> int:\n assert getattr(self, 'time_interval', None) is not None, (\n \"{} should either include a time_interval attribute, or override \"\n \"the `get_time_interval()` method.\".format(self.__class__.__name__)\n )\n\n return getattr(self, 'time_interval')\n","sub_path":"investing_algorithm_framework/core/workers/scheduled_worker.py","file_name":"scheduled_worker.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445082377","text":"import pygame\n\n# Initialize the pygame\npygame.init()\n\n# Create the screen\nscreen = pygame.display.set_mode((800, 600))\n\n# Title and Icon\npygame.display.set_caption(\"Dropping Hearts\")\nicon = pygame.image.load('img/Dropping-Hearts.png')\npygame.display.set_icon(icon)\n\n# Player\nplayerImg = pygame.image.load('img/girl.png')\nplayerX = 370\nplayerY = 480\n\n\ndef player():\n screen.blit(playerImg, (playerX, playerY))\n\n\n# Game Loop makes the game run always and doesn't close down\nrunning = True\nwhile running:\n\n # RGB = Red, Green, Blue\n screen.fill((255, 0, 0))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n player()\n pygame.display.update()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477673832","text":"import unittest\r\nimport os\r\nfrom HTMLTestRunner import HTMLTestRunner\r\nsuite = unittest.TestSuite()\r\n\r\nloader = unittest.defaultTestLoader\r\n\r\ncases = loader.discover(os.getcwd()+\"\\\\TestCalc.py\",pattern=\"*.py\")\r\nsuite.addTest(cases)\r\nwith open(\"银行开户测试.html\",\"w+\",encoding=\"utf-8\") as f:\r\n runner = HTMLTestRunner.HTMLTestRunner(\r\n stream=f,\r\n verbosity=1,\r\n title=\"银行开户测试\",\r\n description=\"这是第三次迭代测试\"\r\n )\r\n runner.run(suite)\r\n","sub_path":"pythonProject2day16/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"166844418","text":"import pdfkit\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, user_passes_test, permission_required\nfrom django.contrib.auth.models import Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.template.loader import get_template\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import generic\nfrom django.views.generic.detail import DetailView\n\nfrom users.models import User, Note\nfrom users.forms import AddNoteForm, EditNoteForm, DeleteNoteForm, EditUserForm, ToggleUserForm, AddUserForm, DivErrorList\n\nfrom utils.models import ChangeLog\n\nLOGIN_URL = ''\n\n# Function for checking if the user is staff. \ndef is_staff(user):\n return user.is_staff\n\n# Function for outputting error string\ndef errorMessage(action, edited):\n return ('Det skjedde en feil ved %s av %s. Se detaljer nedenfor.' % (action, edited))\n\n# Function for outputting success string\ndef successMessage(action, edited):\n return ('%s av %s var vellykket.' % (action, edited))\n\n# View for listing all users\nclass users(generic.ListView):\n model = User\n template_name = 'users/backend/users.html'\n paginate_by = 10\n queryset = User.objects.all()\n\n# View for exporting a list of users to PDF.\n@permission_required('users.has_users_export', login_url = LOGIN_URL)\ndef exportUsers(request):\n\n # Query all users\n users = User.objects.all()\n\n # Getting template, and rendering data\n template = get_template('exports/users.html')\n html = template.render({'users': users})\n pdf = pdfkit.from_string(html, False)\n\n # Method for creating file name\n def create_file_name():\n file_name = 'users %s.pdf' % (timezone.now())\n return file_name.strip()\n\n # Set filename as the filename created by create_file_name() method\n filename = create_file_name()\n\n # Dump file contents into a response with the appropriate content type\n response = HttpResponse(pdf, content_type = 'application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"' + filename + '\"'\n\n return response\n\n# View for the backend user detail page\n#@login_required\n#@user_passes_test(is_staff, login_url='/login')\ndef user(request, pk):\n\n # Query appropriate user based on pk returned in url and sticky notes attached to the user\n user = User.objects.get(pk=pk)\n sticky_notes = Note.objects.filter(user_id = pk, is_sticky = True).order_by('date_edited')\n\n context = {\n 'user': user,\n 'sticky_notes': sticky_notes\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user.html', context)\n\n# View for adding a new user\n@permission_required('users.has_user_add', login_url = LOGIN_URL)\ndef addUser(request):\n\n # Get the AddUserForm \n add_user_form = AddUserForm()\n\n # Show a warning alert on page-load\n messages.warning(request, 'Man kan ikke tildele brukeren en brukergruppe ved å opprette brukeren gjennom dette skjemaet. Brukeren vil automatisk bli lagt til i \"privatkunde\", men dette kan byttes i etterkant.')\n\n if request.method == 'POST':\n\n # Bind data to the form class, and add the error class DivErrorList which adds styling to eventual errors\n add_user_form = AddUserForm(request.POST, error_class=DivErrorList)\n\n # Validate form inputs\n if add_user_form.is_valid():\n\n # If the form is valid, save form, add user to group \"Privatkunde\" and show successmessage\n user = add_user_form.save()\n customer_group = Group.objects.get(name = 'Privatkunde')\n user.roles.add(customer_group)\n\n # Give the user successful feedback\n messages.success(request, successMessage('Oppretting', 'bruker'))\n\n # Redirect to the users overview page\n return redirect('users')\n\n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, errorMessage('oppretting', 'bruker'))\n\n context = {\n 'add_user_form': add_user_form,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_add.html', context)\n\n# View for deactivating/activating user\n@permission_required('users.has_user_high_level_management', login_url = LOGIN_URL)\ndef toggleUser(request, pk):\n\n # Query appropriate user based on pk returned in url\n user = User.objects.get(pk = pk)\n\n # Check if the user exists, if not, return 404\n user_to_toggle = get_object_or_404(User, pk = pk)\n\n if request.method == 'POST':\n\n # Bind data to the form class, and add the user as instance\n toggle_user_form = ToggleUserForm(request.POST, error_class=DivErrorList, instance = user_to_toggle)\n\n # Validate form inputs\n if toggle_user_form.is_valid():\n\n # Check if user is active, and change active state based on result. Redirect to the user profile page.\n if user.is_active == True:\n user.is_active = False\n user.save()\n\n # Give the user successful feedback\n messages.success(request, 'Brukeren ble deaktivert.')\n return redirect('user', pk)\n \n else:\n user.is_active = True\n user.save()\n\n # Give the user successful feedback\n messages.success(request, 'Brukeren ble aktivert.')\n return redirect('user', pk)\n\n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, 'Det skjedde en feil ved deaktivering av bruker. Se detaljer nedenfor.')\n\n context = {\n 'user': user,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_edit_toggle.html', context)\n\n# View for editing an existing user\n@permission_required('users.has_user_management', login_url = LOGIN_URL)\ndef editUser(request, pk):\n\n # Query appropriate user based on pk returned in url\n user = User.objects.get(pk = pk)\n\n # Get the EditUserForm and add the user as instance\n edit_user_form = EditUserForm(instance = user)\n\n if request.method == 'POST':\n\n # Bind data to the form class, and add the user as instance\n edit_user_form = EditUserForm(request.POST, error_class=DivErrorList, instance = user)\n \n old_user_instance = User.objects.get(pk = pk)\n\n # Validate form inputs\n if edit_user_form.is_valid():\n\n # Save edits\n edit_user_form.save()\n\n \n # Log change\n ChangeLog.change_message(request.user, User, old_user_instance)\n \n\n # Give the user successful feedback and redirect\n messages.success(request, successMessage('Redigering', 'bruker'))\n return redirect('user', pk)\n \n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, 'Det skjedde en feil ved redigering av bruker. Se detaljer nedenfor.')\n \n context = {\n 'user': user,\n 'edit_user_form': edit_user_form,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_edit.html', context)\n\n# View for exporting a user profile to PDF\n@permission_required('users.has_user_export', login_url = LOGIN_URL)\ndef exportUser(request, pk):\n\n # Query appropriate user based on pk returned in url and the users group\n user = User.objects.get(pk = pk)\n group = Group.objects.get(user = pk)\n\n # Getting template, and rendering data\n template = get_template('exports/user.html')\n html = template.render({'user':user, 'group': group})\n pdf = pdfkit.from_string(html, False)\n\n # Method for creating file name\n def create_file_name(self):\n file_name = '%s-%s-%s.pdf' % (self.first_name, self.last_name, timezone.now())\n return file_name.strip()\n\n # Set filename as the filename created by create_file_name() method\n filename = create_file_name(user)\n\n # Dump file contents into a response with the appropriate content type\n response = HttpResponse(pdf, content_type = 'application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"' + filename + '\"'\n\n return response\n\n# View for listing all notes attached to user\n#@login_required\ndef userNotes(request, pk):\n\n # Query appropriate user based on pk returned in url\n user = User.objects.get(pk=pk)\n\n # Query appropriate notes and sticky notes based on pk returned in url and order by date edited.\n notes = Note.objects.filter(user_id=pk, is_sticky=False).order_by('date_edited')\n sticky_notes = Note.objects.filter(user_id=pk, is_sticky=True).order_by('date_edited')\n\n context = {\n 'user': user,\n 'notes': notes,\n 'sticky_notes': sticky_notes\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_notes.html', context)\n\n# View for editing an existing note\ndef editNote(request, pk):\n\n # Query appropriate note based on pk returned in url\n note = Note.objects.get(pk = pk)\n\n # Get the EditNoteForm and add the note as instance\n edit_note_form = EditNoteForm(instance = note)\n\n if request.method == 'POST':\n\n # Bind data to the form class, and add the note as instance\n edit_note_form = EditNoteForm(request.POST, error_class=DivErrorList, instance = note)\n\n # Validate form inputs\n if edit_note_form.is_valid():\n\n # In addition to fields edited, set date_edited to now, and author to request.user\n edit_note_form.instance.date_edited = timezone.now()\n edit_note_form.instance.author = request.user\n\n # Save edited inputs and instances\n edit_note_form.save()\n\n # Give the user successful feedback and redirect\n messages.success(request, successMessage('Redigering', 'notat'))\n return redirect ('user', note.user_id)\n\n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, errorMessage('redigering', 'notat'))\n\n context = {\n 'note': note,\n 'edit_note_form': edit_note_form,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_notes_edit.html', context)\n\n# View for adding a new note\ndef addNote(request, pk):\n\n # Query appropriate user based on pk returned in url and get AddNoteForm()\n user = User.objects.get(pk = pk)\n add_note_form = AddNoteForm()\n\n if request.method == 'POST':\n\n # Bind data to the form class\n add_note_form = AddNoteForm(request.POST, error_class=DivErrorList)\n\n # Validate form inputs\n if add_note_form.is_valid():\n\n # In adition to the field inputs, add date_edited to now, user queried to user and request.user to author\n add_note_form.instance.date_edited = timezone.now()\n add_note_form.instance.user = user\n add_note_form.instance.author = request.user\n\n # Save inputs and instances\n add_note_form.save()\n\n # Give the user successful feedback and redirect\n messages.success(request, 'Oppretting av notat var vellykket')\n return redirect ('user', pk)\n\n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, 'Det skjedde en feil ved oppretting av notat. Se detaljer nedenfor.')\n\n context = {\n 'user': user,\n 'add_note_form': add_note_form,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_notes_add.html', context)\n\n# View for deleting note\ndef deleteNote(request, pk):\n\n # Query appropriate note based on pk returned in url and get the user attached to the note\n note = Note.objects.get(pk = pk)\n user = User.objects.get(id = note.user_id)\n\n # Check if the note exists, if not, return 404\n new_to_delete = get_object_or_404(Note, pk = pk)\n\n if request.method == 'POST':\n\n # Bind data to the form class, and add the note to delete as instance\n delete_note_form = DeleteNoteForm(request.POST, error_class=DivErrorList, instance = new_to_delete)\n \n # Validate form inputs\n if delete_note_form.is_valid():\n\n # Delete note\n new_to_delete.delete()\n\n # Give the user successful feedback and redirect\n messages.success(request, successMessage('Sletting', 'notat'))\n return redirect ('user', user.id)\n\n else:\n # If form inputs is invalid, give user feedback\n messages.error(request, errorMessage('sletting', 'notat'))\n \n context = {\n 'note': note,\n 'user': user, \n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_notes_delete.html', context)\n\n# View for listing all changes attached to user\ndef userChangelog(request, pk):\n\n # querying user, chnagelogs and getting the first page of the request\n user = User.objects.get(pk = pk)\n changelog_list = ChangeLog.objects.filter(object_id = pk)\n page = request.GET.get('page', 1)\n\n # giving paginator a list of objects and number of objects per page\n paginator = Paginator(changelog_list, 10)\n \n # error handler for paginator\n try:\n changelogs = paginator.page(page)\n except PageNotAnInteger:\n changelogs = paginator.page(1)\n except EmptyPage:\n changelogs = paginator.page(paginator.num_pages)\n \n context = {\n 'user': user,\n 'changelogs': changelogs,\n }\n\n # Render request, template and context\n return render(request, 'users/backend/user/user_changelog.html', context)\n\n\ndef base(request):\n\n context = {\n }\n\n # Render request, template and context\n return render(request, 'frontend_base.html', context)","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26825246","text":"# Arithmetic operations\r\n# BODMAS applies, can override with brackets\r\n# five main operators: + - * / **\r\n\r\nx=8+2\r\ny=100*2\r\nz=100 * (2 + 2)\r\n\r\n#power symbol **\r\np=10**2\r\n\r\n#modulo / remainder / like modular arithemtic in Maths\r\nq=10%3\r\n\r\n# can represent complex numbers as well using j, need to specify 1\r\nr = 1j\r\ns=(-1)**(0.5)\r\n\r\n#####################\r\n\r\n# import the random library, this is included in python\r\nimport random\r\n\r\n# set the starting health\r\nHealth = 50\r\nprint(Health)\r\n\r\n# create difficulty, set to easy (1), or higher value for higher difficulty as the denominator\r\nDiff = 3\r\n\r\n# generate the health potion value (include difficulty as denominator), specify integer\r\n# this uses the random integer function over a defined range 25->50\r\n\r\npotion_health = int(random.randint(25,50) / Diff)\r\nprint(potion_health)\r\n\r\n# add the health potion to the original health\r\nHealth = Health+potion_health\r\nprint(Health)\r\n\r\n# EXTRA not part of lesson\r\n# rounding\r\n\r\nx = round(2.2)\r\n\r\n# this makes the type of the number an integer\r\n\r\n# floor division, round down to nearest integer\r\n\r\nb = 9//2\r\n\r\n# operands\r\n\r\na = 1\r\nb = 2\r\nprint(a==b)\r\n# will be false\r\n\r\nprint(a>b)\r\n# will be false\r\n\r\nprint(b=, <=, <>,\r\n\r\n#logical\r\n","sub_path":"Lesson2,numerics,random function.py","file_name":"Lesson2,numerics,random function.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2084483","text":"# COMP3201\n# by Ting Hu\n\n# Assignment 5 - LGP program generation structural intron removal\n\nimport random\n\ndef main():\n\n # setting parameters\n max_prog_length = 6 # 6 instructions in total is the upper limit\n n_calculation_reg = 3 # {r0, r1, r2} and r0 is designated as the output register\n n_input_reg = 2 # {r3, r4}\n n_operators = 4 # {+, -, *, /}\n n_constant = 5 # {1, 2, 3, 4, 5}\n constant_rate = 0.4 # An operand can be a constant with a 40% chance, however, both operands cannot be constants at the same time\n\n\n ##### 1. randomly generate an LGP program with no more than [max_prog_length] instructions\n # Hint: an instruction can be represented by a list of 4 elements,\n # i.e., its return register, operator, first and second operands\n # an LGP program is thus a list of instructions\n\n program = []\n\n # student code begins\n\n # random the number of instructions of this program #\n instruction_nums = random.randint(1, max_prog_length)\n # generate the rest of the instructions\n for i in range(instruction_nums):\n if random.random() < constant_rate:\n constant = random.randint(0, n_constant-1)\n if random.random() < 0.5:\n operand1 = constant\n operand2 = \"r\"+str(random.randint(0, n_calculation_reg + n_input_reg - 1))\n else:\n operand1 = \"r\"+str(random.randint(0, n_calculation_reg + n_input_reg - 1))\n operand2 = constant\n else:\n operand1 = \"r\"+str(random.randint(0, n_calculation_reg + n_input_reg - 1))\n operand2 = \"r\"+str(random.randint(0, n_calculation_reg + n_input_reg - 1))\n\n register = \"r\"+str(random.randint(0, n_calculation_reg-1))\n operator = random.randint(0, n_operators-1)\n\n program.append([i, register, operator, operand1, operand2])\n\n\n # student code ends\n\n\n\n ##### 2. print the LGP program as a list of instructions\n # An instruction should be printed as, for instance r1 = r3 + r0 or r2 = r0 * 5\n\n print(\"The randomly generated LGP program is:\")\n\n # student code begins\n operators = ['+', '-', '*', '/']\n for instruction in program:\n print( instruction[1], \"=\", instruction[3], operators[ instruction[2] ], instruction[4] )\n\n\n\n # student code ends\n\n\n\n ##### 3. remove a program's structural intron\n program_intron_free = []\n effective_registers = ['r0']\n effective_instruction_indices = []\n\n # student code begins\n\n trace = program[::-1]\n\n # all calculation registers #\n registers = []\n for k in range(n_calculation_reg):\n registers.append(\"r\"+str(k))\n\n # workflow #\n for i in range( len(trace) ):\n if trace[i][1] == \"r0\": # find the first appeared r0 (the most outlet one)\n r0 = trace[i]\n effective_instruction_indices.append(trace[i][0])\n program_intron_free.append(r0)\n\n if r0[3] in registers:\n effective_registers.append(r0[3])\n if r0[4] in registers:\n effective_registers.append(r0[4])\n\n for j in range( i+1, len(trace) ):\n newest = program_intron_free[len(program_intron_free)-1]\n instruction = trace[j]\n if instruction[1] in registers and ( instruction[1] == newest[3] or instruction[1] == newest[4] ):\n effective_registers.append(instruction[1])\n program_intron_free.append(instruction)\n effective_instruction_indices.append(instruction[0])\n effective_registers.append(newest[3])\n effective_registers.append(newest[4])\n break\n\n\n\n\n # student code ends\n\n\n\n ##### 4. print the structual-intron free LGP program\n\n # print the indices of the effective instructions\n print(\"The indices of effective instructions are:\", effective_instruction_indices)\n\n # print the LGP program without structural intron\n print(\"The LGP program without any structual intron is:\")\n\n # student code begins\n for instruction in program_intron_free:\n print( instruction[1], \"=\", instruction[3], operators[ instruction[2] ], instruction[4] )\n\n\n\n # student code ends\n\n\n# end of main\n\n\nmain()\n","sub_path":"Assignments/a5/my/A5.py","file_name":"A5.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547800954","text":"import os\nimport shutil\nimport subprocess\nimport utilities.log as log\nimport utilities.docker_wrapper as docker\nimport utilities.config_loader as config_loader\nfrom utilities.docker_wrapper import DockerArgument, DockerArgumentType\nfrom utilities.cli import parse_args\nfrom subprocess import CalledProcessError\n\nfrom utilities.exceptions_handle import handle_called_process_error\n\n# project_path = '/media/matryoshka/Stuff/Docs/Projects/Sharp/RozkladNpuBot'\n#\n# webapi_project_path = os.path.join(project_path, 'RozkladNpuBot.WebApi')\n# script_work_dir = os.path.join(project_path, 'docker-build')\n# environment_files_path = os.path.join(script_work_dir, 'env-files')\n# nginx_certs_path = os.path.join(script_work_dir, 'nginx', 'nginx-certs')\n#\n# webapi_output_folder_name = 'release'\n# dotnet_configuration = 'Docker'\nproject_path: str\n\nwebapi_project_path: str\nscript_work_dir: str\nenvironment_files_path: str\nnginx_certs_path: str\n\nwebapi_output_folder_name: str\ndotnet_configuration: str\n\n\ndef publish_dotnet_app():\n webapi_output_folder = os.path.join(webapi_project_path, webapi_output_folder_name)\n if os.path.isdir(webapi_output_folder):\n shutil.rmtree(webapi_output_folder)\n log.info('Publishing app...')\n log.info(f'Command: dotnet publish {webapi_project_path} -c {dotnet_configuration} -o {webapi_output_folder}\\n')\n try:\n subprocess.run(\n ['dotnet', 'publish', webapi_project_path, '-c', dotnet_configuration, '-o', webapi_output_folder],\n capture_output=True, check=True)\n log.info('Done publishing\\n')\n except CalledProcessError as error:\n handle_called_process_error(error, 'Error when publishing dotnet app:')\n\n\ndef check_docker_env_files_existence():\n log.info(\"Checking env files existence...\\n\")\n if not os.path.isfile(os.path.join(environment_files_path, 'mysql-db.env')):\n log.error(\"Environment file for mysql does not exist\")\n exit(1)\n if not os.path.isfile(os.path.join(environment_files_path, 'mongo.env')):\n log.error(\"Environment file for mongo does not exist\")\n exit(1)\n\n\ndef get_multiple_compose_file_argument():\n docker_compose_path = os.path.join(script_work_dir, 'docker-compose.yml')\n docker_compose_build_paths_path = os.path.join(script_work_dir, 'docker-compose.build-paths.yml')\n docker_compose_current_conf_path = os.path.join(script_work_dir,\n f'docker-compose.{__run_arguments__.configuration}.yml')\n return f'-f {docker_compose_path} ' \\\n f'-f {docker_compose_build_paths_path} ' \\\n f'-f {docker_compose_current_conf_path}'\n\n\ndef docker_compose_build():\n log.info(\"Docker compose build...\\n\")\n try:\n docker.call_docker_compose([\n DockerArgument(type=DockerArgumentType.SPLIT,\n value=f\"{compose_files_arg} build\")\n ], capture_output=True, check=True)\n except CalledProcessError as error:\n handle_called_process_error(error, 'Error when docker-compose build:')\n\n\ndef docker_compose_push():\n log.info(\"Docker compose push...\\n\")\n try:\n docker.call_docker_compose([\n DockerArgument(type=DockerArgumentType.SPLIT,\n value=f\"{compose_files_arg} push\")\n ], capture_output=False, check=True)\n except CalledProcessError as error:\n handle_called_process_error(error, 'Error when trying docker-compose push:')\n\n\ndef docker_compose_up():\n try:\n docker.call_docker_compose([\n DockerArgument(type=DockerArgumentType.SPLIT,\n value=f\"{compose_files_arg} up\")\n ], capture_output=False)\n except CalledProcessError as error:\n handle_called_process_error(error, \"Error when trying docker-compose up :\")\n\n\ndef publish():\n log.info(\"Publishing...\")\n cur_dir_path = script_work_dir\n\n publish_path = os.path.join(cur_dir_path, 'publish')\n if os.path.isdir(publish_path):\n shutil.rmtree(publish_path)\n os.mkdir(publish_path)\n\n rozklad_app_publish_folder_name = 'rozklad-app-secrets'\n nginx_certs_publish_folder_name = 'rozklad-app-nginx-certs'\n os.mkdir(os.path.join(publish_path, rozklad_app_publish_folder_name))\n os.mkdir(os.path.join(publish_path, nginx_certs_publish_folder_name))\n\n secret_file_path = os.path.join(webapi_project_path, 'Properties', f'secret.{dotnet_configuration}.json')\n if not os.path.isfile(secret_file_path):\n log.error(f'File secret.{dotnet_configuration}.json does not exist')\n exit(1)\n shutil.copy(secret_file_path, os.path.join(publish_path, rozklad_app_publish_folder_name))\n\n os.mkdir(os.path.join(publish_path, 'env-files'))\n for file in os.listdir(environment_files_path):\n shutil.copy(os.path.join(environment_files_path, file), os.path.join(publish_path, 'env-files'))\n\n shutil.copy(os.path.join(cur_dir_path, 'docker-compose.yml'), publish_path)\n shutil.copyfile(os.path.join(cur_dir_path, f'docker-compose.{__run_arguments__.configuration}.yml'),\n os.path.join(publish_path, 'docker-compose.override.yml'))\n\n current_conf_nginx_certs_path = os.path.join(nginx_certs_path, __run_arguments__.configuration)\n for file in os.listdir(current_conf_nginx_certs_path):\n shutil.copy(os.path.join(current_conf_nginx_certs_path, file),\n os.path.join(publish_path, nginx_certs_publish_folder_name))\n\n shutil.copy(os.path.join(cur_dir_path, 'prepare-host.sh'), publish_path)\n log.info(\"Done!\")\n\n\ndef set_configuration_vars(configurations):\n global project_path, webapi_output_folder_name, dotnet_configuration, webapi_project_path, \\\n script_work_dir, environment_files_path, nginx_certs_path\n\n project_path = configurations['project_path']\n webapi_output_folder_name = configurations['webapi_output_folder_name']\n dotnet_configuration = configurations['dotnet_configuration']\n\n webapi_project_path = os.path.join(project_path, 'RozkladNpuBot.WebApi')\n script_work_dir = os.path.join(project_path, 'docker-build')\n environment_files_path = os.path.join(script_work_dir, 'env-files')\n nginx_certs_path = os.path.join(script_work_dir, 'nginx', 'nginx-certs')\n\n\nif __name__ == '__main__':\n set_configuration_vars(config_loader.load())\n __run_arguments__ = parse_args()\n\n if __run_arguments__.configuration != 'Production':\n dotnet_configuration = 'DockerDevel'\n\n publish_dotnet_app()\n\n check_docker_env_files_existence()\n\n if __run_arguments__.configuration == 'Development':\n docker.recreate_dev_volumes(webapi_project_path, nginx_certs_path, dotnet_configuration)\n\n compose_files_arg = get_multiple_compose_file_argument()\n\n docker_compose_build()\n\n if __run_arguments__.push is True:\n docker_compose_push()\n\n if __run_arguments__.publish is True:\n publish()\n\n if __run_arguments__.up is True:\n docker_compose_up()\n","sub_path":"docker-build/build-script-source/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"334074440","text":"import sys\nimport os\nimport math\n\n# in = sys.stdin\nfin = open('A-small-attempt0.in')\n# fout = sys.stdout\nfout = open('out_1', 'w')\n\n\ndef solve(test):\n result = test[0]\n for s in test[1:]:\n \tresult = s + result if (s + result) > (result + s) else result + s\n return result \n\nif __name__ == '__main__':\n count = int(fin.readline().strip())\n\n for i in range(count):\n result = solve(fin.readline().strip())\n fout.write('Case #%s: %s\\n' % (i + 1, result))\n\n\n","sub_path":"solutions_5631989306621952_0/Python/PhilippSinitsyn/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"533367800","text":"import json\nimport glob\nimport sys\n\ndef mangle(s):\n return s.strip()[1:-1]\n\ndef cat_json(output_filename, input_filenames):\n with open(output_filename, \"w\") as outfile:\n first = True\n for infile_name in input_filenames:\n with open(infile_name) as infile:\n if first:\n outfile.write('[')\n first = False\n else:\n outfile.write(',')\n outfile.write(mangle(infile.read()))\n outfile.write(']')\n\n\n\nif __name__ == '__main__':\n output_filename = str(sys.argv[1])\n path_to_rawdocs = str(sys.argv[2])\n\n filenames = glob.glob(path_to_rawdocs+\"/*.json\")\n\n input_filenames = []\n for file in filenames:\n input_filenames.append(file)\n\n cat_json(output_filename,input_filenames)\n\n","sub_path":"rank_documents/mergejson.py","file_name":"mergejson.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"162955319","text":"#!/usr/bin/env python\n\n# Copyright (C) 2017 Udacity Inc.\n#\n# This file is part of Robotic Arm: Pick and Place project for Udacity\n# Robotics nano-degree program\n#\n# All Rights Reserved.\n\n# Author: Harsh Pandya\n\n# Import modules\nimport rospy\nimport pcl\nimport numpy as np\nimport math\nimport ctypes\nimport struct\nimport sensor_msgs.point_cloud2 as pc2\nimport matplotlib.colors\nimport matplotlib.pyplot as plt\n\nfrom sensor_msgs.msg import PointCloud2, PointField\nfrom std_msgs.msg import Header\nfrom random import randint\nfrom rospy_message_converter import message_converter\nimport yaml\n\ndef random_color_gen():\n \"\"\" Generates a random color\n\n Args: None\n\n Returns:\n list: 3 elements, R, G, and B\n \"\"\"\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]\n\n\ndef ros_to_pcl(ros_cloud):\n \"\"\" Converts a ROS PointCloud2 message to a pcl PointXYZRGB\n\n Args:\n ros_cloud (PointCloud2): ROS PointCloud2 message\n\n Returns:\n pcl.PointCloud_PointXYZRGB: PCL XYZRGB point cloud\n \"\"\"\n points_list = []\n\n for data in pc2.read_points(ros_cloud, skip_nans=True):\n points_list.append([data[0], data[1], data[2], data[3]])\n\n pcl_data = pcl.PointCloud_PointXYZRGB()\n pcl_data.from_list(points_list)\n\n return pcl_data\n\n\ndef pcl_to_ros(pcl_array):\n \"\"\" Converts a pcl PointXYZRGB to a ROS PointCloud2 message\n\n Args:\n pcl_array (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud\n\n Returns:\n PointCloud2: A ROS point cloud\n \"\"\"\n ros_msg = PointCloud2()\n\n ros_msg.header.stamp = rospy.Time.now()\n ros_msg.header.frame_id = \"world\"\n\n ros_msg.height = 1\n ros_msg.width = pcl_array.size\n\n ros_msg.fields.append(PointField(\n name=\"x\",\n offset=0,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"y\",\n offset=4,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"z\",\n offset=8,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"rgb\",\n offset=16,\n datatype=PointField.FLOAT32, count=1))\n\n ros_msg.is_bigendian = False\n ros_msg.point_step = 32\n ros_msg.row_step = ros_msg.point_step * ros_msg.width * ros_msg.height\n ros_msg.is_dense = False\n buffer = []\n\n for data in pcl_array:\n s = struct.pack('>f', data[3])\n i = struct.unpack('>l', s)[0]\n pack = ctypes.c_uint32(i).value\n\n r = (pack & 0x00FF0000) >> 16\n g = (pack & 0x0000FF00) >> 8\n b = (pack & 0x000000FF)\n\n buffer.append(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))\n\n ros_msg.data = \"\".join(buffer)\n\n return ros_msg\n\n\ndef XYZRGB_to_XYZ(XYZRGB_cloud):\n \"\"\" Converts a PCL XYZRGB point cloud to an XYZ point cloud (removes color info)\n\n Args:\n XYZRGB_cloud (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud\n\n Returns:\n PointCloud_PointXYZ: A PCL XYZ point cloud\n \"\"\"\n XYZ_cloud = pcl.PointCloud()\n points_list = []\n\n for data in XYZRGB_cloud:\n points_list.append([data[0], data[1], data[2]])\n\n XYZ_cloud.from_list(points_list)\n return XYZ_cloud\n\n\ndef XYZ_to_XYZRGB(XYZ_cloud, color):\n \"\"\" Converts a PCL XYZ point cloud to a PCL XYZRGB point cloud\n\n All returned points in the XYZRGB cloud will be the color indicated\n by the color parameter.\n\n Args:\n XYZ_cloud (PointCloud_XYZ): A PCL XYZ point cloud\n color (list): 3-element list of integers [0-255,0-255,0-255]\n\n Returns:\n PointCloud_PointXYZRGB: A PCL XYZRGB point cloud\n \"\"\"\n XYZRGB_cloud = pcl.PointCloud_PointXYZRGB()\n points_list = []\n\n float_rgb = rgb_to_float(color)\n\n for data in XYZ_cloud:\n points_list.append([data[0], data[1], data[2], float_rgb])\n\n XYZRGB_cloud.from_list(points_list)\n return XYZRGB_cloud\n\n\ndef rgb_to_float(color):\n \"\"\" Converts an RGB list to the packed float format used by PCL\n\n From the PCL docs:\n \"Due to historical reasons (PCL was first developed as a ROS package),\n the RGB information is packed into an integer and casted to a float\"\n\n Args:\n color (list): 3-element list of integers [0-255,0-255,0-255]\n\n Returns:\n float_rgb: RGB value packed as a float\n \"\"\"\n hex_r = (0xff & color[0]) << 16\n hex_g = (0xff & color[1]) << 8\n hex_b = (0xff & color[2])\n\n hex_rgb = hex_r | hex_g | hex_b\n\n float_rgb = struct.unpack('f', struct.pack('i', hex_rgb))[0]\n\n return float_rgb\n\n\ndef float_to_rgb(float_rgb):\n \"\"\" Converts a packed float RGB format to an RGB list\n\n Args:\n float_rgb: RGB value packed as a float\n\n Returns:\n color (list): 3-element list of integers [0-255,0-255,0-255]\n \"\"\"\n s = struct.pack('>f', float_rgb)\n i = struct.unpack('>l', s)[0]\n pack = ctypes.c_uint32(i).value\n\n r = (pack & 0x00FF0000) >> 16\n g = (pack & 0x0000FF00) >> 8\n b = (pack & 0x000000FF)\n\n color = [r,g,b]\n\n return color\n\n\ndef get_color_list(cluster_count):\n \"\"\" Returns a list of randomized colors\n\n Args:\n cluster_count (int): Number of random colors to generate\n\n Returns:\n (list): List containing 3-element color lists\n \"\"\"\n if (cluster_count > len(get_color_list.color_list)):\n for i in xrange(len(get_color_list.color_list), cluster_count):\n get_color_list.color_list.append(random_color_gen())\n return get_color_list.color_list\n\n# Helper function to create a yaml friendly dictionary from ROS messages\ndef make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):\n yaml_dict = {}\n yaml_dict[\"test_scene_num\"] = test_scene_num.data\n yaml_dict[\"arm_name\"] = arm_name.data\n yaml_dict[\"object_name\"] = object_name.data\n yaml_dict[\"pick_pose\"] = message_converter.convert_ros_message_to_dictionary(pick_pose)\n yaml_dict[\"place_pose\"] = message_converter.convert_ros_message_to_dictionary(place_pose)\n return yaml_dict\n\n# Helper function to output to yaml file\ndef send_to_yaml(yaml_filename, dict_list):\n data_dict = {\"object_list\": dict_list}\n with open(yaml_filename, 'w') as outfile:\n yaml.dump(data_dict, outfile, default_flow_style=False)\n\n#############################################################################\n# Some extra helper functions needed for the perception pipeline\n# Author: Wilbert Pumacay - a.k.a Daru\n#############################################################################\n\n\"\"\"\nTransforms one histogram to another with smaller bin size\n\n: param hist : source histogram\n: param nbins : target number of bins of the transformed histogram\n\"\"\"\ndef hist2hist( hist, nbins ) :\n assert ( len( hist ) >= nbins )\n\n _rmin = np.min( hist )\n _rmax = np.max( hist )\n\n _newhist = np.zeros( nbins )\n _newedges = np.linspace( _rmin, _rmax, num = ( nbins + 1 ), endpoint = True )\n \n # compute bin sizes, new and old, for indexing\n _newbinsize = ( _rmax - _rmin ) / nbins\n _oldbinsize = ( _rmax - _rmin ) / len( hist )\n\n for i in range( nbins ) :\n _startIndx = int( math.floor( _newedges[i] / _oldbinsize ) )\n _stopIndx = int( math.floor( _newedges[i + 1] / _oldbinsize ) - 1 )\n _newhist[i] = hist[ _startIndx : ( _stopIndx + 1 ) ].sum()\n\n return _newhist\n\n\"\"\"\nPlots a histogram returned from numpy.histogram\nAdapted from this post: https://stackoverflow.com/questions/5328556/histogram-matplotlib\n\n: param hist : numpy histogram\n: param rmin : min range for the values of the histogram\n: param rmax : max range for the values of the histogram\n: param title : optional title for the histogram\n\"\"\"\ndef plotHistogram( hist, rmin, rmax, title = 'empty title' ) :\n _nbins = len( hist )\n _bins = np.linspace( rmin, rmax, num = ( _nbins + 1 ), endpoint = True )\n _widths = np.diff( _bins )\n _centers = ( _bins[:-1] + _bins[1:] ) / 2.0\n \n plt.figure()\n plt.bar( _centers, hist, align = 'center', width = _widths )\n plt.title( title )\n # plt.xticks( _bins )\n\n\"\"\"\nNormalizes a histogram to have cumsum = 1 ( percentages instead of frequencies )\n\n: param hist : histogram to normalize\n\"\"\"\ndef normalizeHistogram( hist ) :\n return hist / float( np.sum( hist ) )\n\n#############################################################################\n# Some extra helper functions needed for the perception pipeline\n# Author: Wilbert Pumacay - a.k.a Daru\n#############################################################################\n\n\"\"\"\nConverts a list of rgb values to a list of hsv values\n\n: param rgbList : rgb list ( 0 - 255 ) to convert to hsv\n\"\"\"\ndef rgb2hsv( rgbList ) :\n _rgbNormalized = [ 1.0 * rgbList[0] / 255, \n 1.0 * rgbList[1] / 255, \n 1.0 * rgbList[2] / 255 ]\n _hsvNormalized = matplotlib.colors.rgb_to_hsv( [ [ _rgbNormalized ] ] )[0][0]\n return _hsvNormalized\n\n\"\"\"\nComputes a normalized feature vector ...\nfrom the histograms of the buffers in buffer_list\n\n:param buffer_list: a list of the buffers to use for the histograms\n:param nbins: number of bins to generate the histograms\n\"\"\"\ndef _featuresFromBuffers( buffer_list, nbins, ranges ) :\n # compute histograms\n _hists = []\n for _buffer in buffer_list :\n _hist, _ = np.histogram( _buffer, bins = nbins, range = ranges )\n _hists.append( _hist )\n \n # concatenate into single feature vector\n _featureVector = np.concatenate( _hists ).astype( np.float64 )\n\n # normalize feature vector\n _normalizedFeatureVector = _featureVector / np.sum( _featureVector )\n\n return _normalizedFeatureVector\n\n\"\"\"\nComputes a feature vector from the color histograms of the given cloud\n\n:param cloud : ros cloud with color information on it\n:param using_hsv : flag to whether or not to use hsv colorspace instead\n:param nbins : number of bins to use as the size of the histogram\n\"\"\"\ndef computeColorHistograms(cloud, using_hsv=True, nbins = 255):\n point_colors_list = []\n\n # Step through each point in the point cloud\n for point in pc2.read_points( cloud, skip_nans = True ) :\n rgb_list = float_to_rgb( point[3] )\n if using_hsv :\n point_colors_list.append( rgb2hsv( rgb_list ) * 255 )\n else :\n point_colors_list.append( rgb_list )\n\n # Populate lists with color values\n channel_1_vals = []\n channel_2_vals = []\n channel_3_vals = []\n\n for color in point_colors_list:\n channel_1_vals.append( color[0] )\n channel_2_vals.append( color[1] )\n channel_3_vals.append( color[2] )\n \n # Compute feature vector - use 0 to 255 as range\n normed_features = _featuresFromBuffers( [ channel_1_vals, channel_2_vals, channel_3_vals ], nbins, ( 0., 255. ) )\n\n return normed_features \n\n\"\"\"\nComputes a feature vector from the normals histograms of the given cloud\n\n:param cloud : ros cloud with normals information on it\n:param nbins : number of bins to use as the size of the histogram\n\"\"\"\ndef computeNormalHistograms( normal_cloud, nbins = 250 ):\n norm_x_vals = []\n norm_y_vals = []\n norm_z_vals = []\n\n for norm_component in pc2.read_points( normal_cloud,\n field_names = ( 'normal_x', 'normal_y', 'normal_z' ),\n skip_nans = True ):\n norm_x_vals.append( norm_component[0] )\n norm_y_vals.append( norm_component[1] )\n norm_z_vals.append( norm_component[2] )\n\n # Compute feature vector - use -1 to 1 as range\n normed_features = _featuresFromBuffers( [ norm_x_vals, norm_y_vals, norm_z_vals ], nbins, ( -1., 1. ) )\n\n return normed_features\n","sub_path":"pr2_robot/scripts/perception/PUtils.py","file_name":"PUtils.py","file_ext":"py","file_size_in_byte":12119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107340158","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# by JeffMa at http://devework.com/\n\"\"\"\n第 0024 题: 使用 Python 的 Web 框架,做一个 Web 版本 TodoList 应用。\n\n\"\"\"\n\nimport sqlite3, sys, sae\nfrom bottle import route, run, debug, template, request, static_file, error, redirect\n\napp = Bottle()\n\n# 强制编码\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n# 全局连接数据库\nconn = sqlite3.connect('db/todo.db')\n\n# 中文编码hack\nconn.text_factory = str\n\n\n# ----------------------------------------------------\n# 首页\n@route('/')\n@route('/todo')\ndef todo_list():\n c = conn.cursor()\n c.execute(\"SELECT id, task FROM todo WHERE status LIKE '1'\")\n result = c.fetchall()\n c.close()\n output = template(\"tpl/make_table\", rows=result)\n return output\n\n# ----------------------------------------------------\n# 已完成页面\n@route(\"/done\")\ndef show_done():\n c = conn.cursor()\n c.execute(\"SELECT id, task FROM todo WHERE status LIKE 0\")\n result = c.fetchall()\n c.close()\n output = template(\"tpl/show_done\", rows=result)\n return output\n\n# ----------------------------------------------------\n# 新建任务\n@route('/new', method='GET')\ndef new_item():\n if request.GET.get('save','').strip():\n new = request.GET.get('task', '').strip()\n c = conn.cursor()\n c.execute(\"INSERT INTO todo (task,status) VALUES (?,?)\", (new,1))\n new_id = c.lastrowid\n conn.commit()\n c.close()\n output = template(\"tpl/notice\",msg = '已添加到数据库中!' , no=new_id)\n return output\n else:\n return template('tpl/new_task.tpl')\n\n# ----------------------------------------------------\n# 编辑\n@route('/edit/:no', method='GET')\ndef edit_item(no):\n if request.GET.get('save','').strip():\n edit = request.GET.get('task','').strip()\n status = request.GET.get('status','').strip()\n\n if status == 'open':\n status = 1\n else:\n status = 0\n\n c = conn.cursor()\n c.execute(\"UPDATE todo SET task = ?, status = ? WHERE id LIKE ?\" ,(edit, status, no))\n conn.commit()\n\n output = template(\"tpl/notice\",msg = '已成功更新!' , no=no)\n return output\n else:\n c = conn.cursor()\n c.execute(\"SELECT task FROM todo WHERE id LIKE ?\" ,[str(no)])\n cur_data = c.fetchone()\n\n return template('tpl/edit_task', old=cur_data, no=no)\n\n\n\n@route('/help')\ndef help():\n return static_file('help.html', root='static/')\n\n@route('/json:json#[1-9]+#')\ndef show_json(json):\n c = conn.cursor()\n c.execute(\"SELECT task FROM todo WHERE id LIKE ?\", (json))\n result = c.fetchall()\n c.close()\n\n if not result:\n return {'task':'对应的待办事项不存在'}\n else:\n return {'Task': result[0]}\n\n@error(403)\ndef mistake403(code):\n return '参数格式错误!'\n\n@error(404)\ndef mistake404(code):\n return '该页面不存在!'\n\n@route('/static/:filename')\ndef serve_static(filename):\n return static_file(filename, root='./static/')\n\ndebug(True)\nrun(host='localhost', port=8800, reloader=True)\n\napplication = sae.create_wsgi_app(app)","sub_path":"0024/index.wsgi","file_name":"index.wsgi","file_ext":"wsgi","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"415147721","text":"from django.test import TestCase\nfrom mixer.backend.django import mixer\n\nfrom .models import (\n DepartmentUser, Computer, Mobile, EC2Instance, FreshdeskTicket,\n FreshdeskConversation, FreshdeskContact)\n\n\nclass DepartmentUserTestCase(TestCase):\n\n def setUp(self):\n mixer.blend(DepartmentUser, photo=None)\n self.du = DepartmentUser.objects.first()\n\n def test_save(self):\n \"\"\"Test save() override for DepartmentUser\n \"\"\"\n self.du.employee_id = '1'\n self.du.save() # employee_id should be left-padded with zeroes.\n self.assertEqual(self.du.employee_id, '000001')\n self.du.employee_id = 'n/a'\n self.du.save() # employee_id should be set to None.\n self.assertFalse(self.du.employee_id)\n","sub_path":"tracking/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"82453891","text":"import requests\r\nimport bs4\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport shutil\r\n\r\n#logging settings:\r\nimport botlogger as bl\r\nLOG_FORMAT = bl.LOG_FORMAT\r\nlogger = bl.configLogger()\r\n\r\n\r\nbase_url = \"https://imdb.com\"\r\nPATH = 'mediagraphs'\r\n\r\n# return episode code es. s4 ep 1 --> 401\r\ndef getEpcode(season_counter, episode_counter):\r\n season_counter_str = str(season_counter)\r\n episode_counter_str = str(episode_counter)\r\n\r\n if len(episode_counter_str) == 1:\r\n episode_counter_str = \"0\" + episode_counter_str\r\n\r\n epcode_str = season_counter_str + episode_counter_str \r\n\r\n epcode = int(epcode_str)\r\n\r\n return epcode\r\n\r\ndef get_page_contents(url):\r\n page = requests.get(url, headers={\"Accept-Language\": \"en-US\"})\r\n return bs4.BeautifulSoup(page.text, \"html.parser\")\r\n\r\ndef parse(word):\r\n #conveniently parse keyword\r\n parsed = word.replace(\" \", \"+\")\r\n return parsed\r\n\r\n#find title page given a keyword, return the url of that page\r\ndef findPage(keyword='mr robot', index=0):\r\n\r\n URL = \"https://www.imdb.com/find?q=\" + parse(keyword)\r\n\r\n soup = get_page_contents(URL)\r\n \r\n # list containing all the titles found with that keyword\r\n titles_names = soup.findAll('td', class_='result_text')\r\n\r\n # create a list of all the href of results\r\n hrefs = []\r\n for title in titles_names:\r\n # print(title)\r\n href = title.find('a')['href']\r\n hrefs.append(href)\r\n\r\n # in case of bad result user should be able to change this index\r\n best_url = \"https://imdb.com\" + hrefs[index]\r\n\r\n return best_url\r\n\r\n# dictionary that will be returned in order to access data by key\r\ndata_dic = {}\r\n\r\n#get info about a title and return info in a dictionary\r\ndef titleInfo(url):\r\n #add series link to dictionary\r\n data_dic['url'] = url\r\n\r\n #parse contents\r\n soup = get_page_contents(url)\r\n\r\n # get full title\r\n title_div = soup.find('div', class_='title_wrapper')\r\n title = title_div.find('h1').text\r\n data_dic['title'] = title.rstrip()\r\n\r\n # get image div and link and then insert it in the dictionary\r\n image_div = soup.find('div', class_='poster')\r\n image_link = image_div.find('img')['src']\r\n data_dic['image_link'] = image_link\r\n\r\n # get overall title rating\r\n overall_rating_div = soup.find('div', 'ratingValue')\r\n overall_rating = overall_rating_div.find('span').text\r\n data_dic['overall_rating'] = overall_rating\r\n\r\n # get summary\r\n summary = soup.find('div', class_='summary_text').text\r\n summary = summary.strip('/n')\r\n summary = summary.rstrip()\r\n summary = summary[::-1]\r\n summary = summary.rstrip()\r\n summary = summary[::-1]\r\n data_dic['summary'] = summary\r\n\r\n # get season urls\r\n season_selector = soup.find('div', class_='seasons-and-year-nav')\r\n season_a_list = season_selector.findAll('a')\r\n season_urls = []\r\n # season here also contain division by year\r\n for season in season_a_list:\r\n url = \"https://imdb.com\" + season['href']\r\n # eliminate year divisions and ovas\r\n if url.find('season') != -1 and url.find('-1') == -1:\r\n season_urls.append(url)\r\n data_dic['seasons_urls'] = season_urls\r\n\r\n# plot episodes/reviews graph\r\ndef plotRatings(rating_tuple):\r\n ep_numbers = []\r\n ratings = []\r\n for rating in rating_tuple:\r\n ep_numbers.append(rating[0])\r\n ratings.append(rating[1])\r\n\r\n n_of_episodes = []\r\n for i in range(len(ep_numbers)):\r\n n_of_episodes.append(i+1)\r\n\r\n plt.figure(figsize=(12, 6), facecolor='w', edgecolor='k')\r\n\r\n plt.plot(n_of_episodes, ratings)\r\n plt.scatter(n_of_episodes, ratings)\r\n plt.xticks(n_of_episodes, ep_numbers, rotation='vertical')\r\n title = data_dic['title'] + \"'s episodes reviews over time\"\r\n plt.title(title)\r\n\r\n plt.savefig(os.path.join(PATH, 'graph.png'))\r\n\r\n# create a list of tuples containing episodes number and ratings and plot them\r\ndef getRatings():\r\n\r\n season_url_list = data_dic['seasons_urls']\r\n season_counter = 1\r\n\r\n ratings = []\r\n for url in reversed(season_url_list):\r\n soup = get_page_contents(url)\r\n\r\n episodes_block = soup.find('div', class_='list detail eplist')\r\n rating_block = episodes_block.findAll('div', class_='ipl-rating-star small')\r\n\r\n \r\n episode_counter = 1\r\n for episode in rating_block:\r\n rating = episode.find('span', 'ipl-rating-star__rating').text\r\n\r\n # get episode code\r\n ep_code = getEpcode(season_counter, episode_counter)\r\n\r\n ratings.append((ep_code, float(rating)))\r\n\r\n episode_counter += 1\r\n\r\n season_counter += 1\r\n \r\n plotRatings(ratings)\r\n\r\n #rating_span = rating_block.find('span', class_='ipl-rating-star__rating')\r\n\r\n# clear folder\r\ndef clear_folder():\r\n\tfor filename in os.listdir(PATH):\r\n\t\tfile_path = os.path.join(PATH, 'graph.png')\r\n\t\ttry:\r\n\t\t\tif os.path.isfile(file_path) or os.path.islink(file_path):\r\n\t\t\t\tos.unlink(file_path)\r\n\t\t\telif os.path.isdir(file_path):\r\n\t\t\t\tshutil.rmtree(file_path)\r\n\t\texcept Exception as e:\r\n\t\t\tlogger.warning('Failed to delete %s. Reason: %s' % (file_path, e))\r\n\r\n# return the dictionary\r\ndef dic_pass(data_dic):\r\n return data_dic\r\n\r\n\r\ndef discord_call(keyword, index=0):\r\n url = findPage(keyword, index)\r\n titleInfo(url)\r\n getRatings()\r\n data = dic_pass(data_dic)\r\n return data\r\n\r\n","sub_path":"imdb_plotter.py","file_name":"imdb_plotter.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"35498823","text":"import os, shutil, sys, getopt\nimport urllib.request\nimport json\n\nclass ModDownloader:\n def __init__(self, modName):\n self.factorioFolderDir = None\n\n opts, args = getopt.getopt(sys.argv[1:], \":m:\", ['dir='])\n for opt, arg in opts:\n if opt in ('-m', '--factoriodir'):\n self.factorioFolderDir = os.path.realpath(arg.strip())\n\n if self.factorioFolderDir == None:\n self.factorioFolderDir = \"{0}/Factorio/\".format(os.getenv('APPDATA'))\n self.modFolderDir = \"{0}mods/\".format(self.factorioFolderDir)\n\n self.modData = self.__getModAPI(modName)\n self.userData = self.__getUserData()\n\n def __str__(self):\n return json.dumps(self.modData, indent=4, sort_keys=True)\n\n def getReleases(self):\n return self.modData[\"releases\"]\n\n def getLatestModVersion(self, factorioVersion = None):\n fixedFactorioVersion = False if factorioVersion is None else True\n \n modReleases = self.getReleases()\n\n latestReleaseFactorioVersion = factorioVersion\n latestReleaseModVersion = None\n\n for release in modReleases:\n # compare factorio release version\n releaseFactorioVersion = release[\"info_json\"][\"factorio_version\"]\n if not self.__compareVersion(latestReleaseFactorioVersion, releaseFactorioVersion):\n continue # release is for an older version of factorio\n if fixedFactorioVersion:\n if not self.__compareVersion(releaseFactorioVersion, factorioVersion):\n continue # release is for a newer version of factorio\n else:\n if not self.__compareVersion(releaseFactorioVersion, latestReleaseFactorioVersion):\n # newer factorioVersion detected\n latestReleaseFactorioVersion = releaseFactorioVersion\n latestReleaseModVersion = release[\"version\"]\n continue # no need to compare further\n\n # compare mod release version\n releaseModVersion = release[\"version\"]\n if not self.__compareVersion(latestReleaseModVersion, releaseModVersion):\n continue # release is for an older version of the mod\n if self.__compareVersion(releaseModVersion, latestReleaseModVersion):\n continue # release is for the same version of the mod\n latestReleaseModVersion = releaseModVersion\n\n if latestReleaseModVersion is None:\n latestReleaseFactorioVersion = None\n return [latestReleaseFactorioVersion, latestReleaseModVersion]\n\n def downloadModVersion(self, factorioVersion = None, modVersion = None):\n print(\"Updating '{0}'\".format(self.modData[\"name\"]))\n\n # delete any local versions\n self.__deleteAllVersions(self.modData[\"name\"])\n\n # download the most suitable release\n if factorioVersion is None:\n modVersion = None\n if modVersion is None:\n factorioVersion, modVersion = self.getLatestModVersion(factorioVersion)\n modRelease = self.__getReleaseInfo(factorioVersion, modVersion)\n print(\" Creating '{0}'\".format(modRelease[\"file_name\"]))\n modDownloadUrl = 'http://mods.factorio.com{0}?username={1}&token={2}'.format(modRelease[\"download_url\"], self.userData[0], self.userData[1])\n modDownloadRequest = urllib.request.Request(modDownloadUrl, headers={'User-Agent': 'Mozilla/5.0'})\n with urllib.request.urlopen(modDownloadRequest) as downloadFile:\n with open('{0}{1}'.format(self.modFolderDir, modRelease[\"file_name\"]), 'wb+') as zipFile:\n zipFile.write(downloadFile.read())\n\n def download(self):\n return self.downloadModVersion()\n\n def __getUserData(self):\n with open('{}/player-data.json'.format(self.factorioFolderDir)) as playerDataFile:\n playerData = json.load(playerDataFile)\n return [playerData[\"service-username\"], playerData[\"service-token\"]]\n\n def __getModAPI(self, modName):\n apiQuerry = urllib.request.Request('http://mods.factorio.com/api/mods/{0}'.format(modName))\n try:\n apiRequest = urllib.request.urlopen(apiQuerry).read()\n apiContent = json.loads(apiRequest.decode('utf-8'))\n apiMessage = apiContent[\"message\"]\n except urllib.request.HTTPError:\n raise NameError(\"Could not find a mod named '{0}'.\".format(modName))\n except KeyError:\n apiMessage = None\n if apiMessage == \"Mod not found\":\n raise NameError(\"Could not find a mod named '{0}'.\".format(modName))\n return apiContent\n\n def __compareVersion(self, version1, version2):\n \"\"\" Returns true if version 2 >= version 1 \"\"\"\n # test version validity\n if version1 is None:\n return True\n if version2 is None:\n return False\n\n # prematurely test equality\n if version1 == version2:\n return True\n\n # compare versions graduately\n version1 = version1.split('.')\n version2 = version2.split('.')\n for version in range(0, len(version1)):\n if version2[version] < version1[version]:\n return False\n if version2[version] > version1[version]:\n return True\n\n def __getReleaseInfo(self, factorioVersion, modVersion):\n modReleases = self.getReleases()\n for release in modReleases:\n if factorioVersion == release[\"info_json\"][\"factorio_version\"] and\\\n modVersion == release[\"version\"]:\n return release\n\n def __deleteAllVersions(self, modName, deleteZip=True):\n # deleting folders\n folders = [folderName for folderName in next(os.walk(self.modFolderDir))[1] if folderName.find(modName) >= 0]\n for folder in folders:\n print(\" Removing '{0}/'\".format(folder))\n shutil.rmtree(self.modFolderDir + folder)\n\n # deleting zip folders\n folders = [folderName for folderName in os.listdir(self.modFolderDir) if deleteZip and folderName.find(modName) >= 0 ]\n for folder in folders:\n print(\" Removing '{0}'\".format(folder))\n os.remove(self.modFolderDir + folder)\n\nif __name__ == \"__main__\":\n bobmods = {\n \"bobassembly\" : True,\n \"bobclasses\" : True,\n \"bobelectronics\" : True,\n \"bobenemies\" : True,\n \"bobequipment\" : True,\n \"bobgreenhouse\" : True,\n \"bobinserters\" : True,\n \"boblibrary\" : True,\n \"boblogistics\" : True,\n \"bobmining\" : True,\n \"bobmodules\" : True,\n \"bobores\" : True,\n \"bobplates\" : True,\n \"bobpower\" : True,\n \"bobrevamp\" : True,\n \"bobtech\" : True,\n \"bobvehicleequipment\": True,\n \"bobwarfare\" : True\n }\n for bobmod in bobmods:\n ModDownloader(bobmod).download()","sub_path":"build_bobmods.py","file_name":"build_bobmods.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285563096","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nclass EPLTableItem(scrapy.Item):\n\n Season = scrapy.Field() # season year for league table\n Club = scrapy.Field() # club's name for position in the league table\n Goal_Difference = scrapy.Field() # each club's goal difference for the season\n Points = scrapy.Field() # each club's total point tally for the season\n Position = scrapy.Field() # club's final position in the league table","sub_path":"Scrapping/Spiders/EPL_table/EPL_table/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"230306322","text":"import os\nimport contextlib\nimport unittest\nfrom tests import fixtures\nfrom vai.models import EditorState\n\nclass TestConfiguration(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls._editor_state_path_orig = EditorState.editorStatePath\n\n @classmethod\n def tearDownClass(cls):\n EditorState.editorStatePath = cls._editor_state_path_orig\n\n def tearDown(self):\n EditorState._instance = None\n\n def testInit(self):\n f = fixtures.tempFile(\"not_there_editor_state\")\n EditorState.editorStatePath = lambda x : f\n state = EditorState.instance()\n self.assertEqual(state._state, {})\n\n def testSaveBufferCursorPos(self):\n f = fixtures.tempFile(\"editor_state\")\n EditorState.editorStatePath = lambda x : f\n state = EditorState.instance()\n\n state.setCursorPosForPath(\"foobar\", (1,2))\n self.assertEqual(state.cursorPosForPath('foobar'), (1,2))\n\n state.save()\n self.assertTrue(os.path.exists(f))\n with contextlib.closing(open(f, \"r\")) as tmp:\n # Just checking lengths. the order is arbitrary.\n self.assertEqual(len(tmp.read()),\n len(\"{'buffers': [{'absolute_path': 'foobar', 'cursor_pos': [1, 2]}]}\"))\n\n def testGetBufferCursorPos(self):\n f = fixtures.get(\"editor_state\")\n EditorState.editorStatePath = lambda x : f\n state = EditorState.instance()\n\n self.assertEqual(state.cursorPosForPath('foobar'), (1,2))\n self.assertEqual(state.cursorPosForPath('notpresent'), None)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/vai/models/test_EditorState.py","file_name":"test_EditorState.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277956084","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef policy(state, theta):\n \"\"\"\n\n Parameters\n ----------\n state : numpy array\n contains state of cartpole environment.\n theta : numpy array\n contains parameters of linear features\n\n Returns\n -------\n numpy array\n return output of softmax function\n\n \"\"\"\n z = state.dot(theta)\n exp = np.exp(z)\n return exp/np.sum(exp)\n\n\n\ndef generate_episode(env, theta, display=False):\n \"\"\" enerates one episode and returns the list of states, the list of rewards and the list of actions of that episode \"\"\"\n state = env.reset()\n states = [state]\n actions = []\n rewards = []\n for t in range(500):\n if display:\n env.render()\n \n p = policy(state, theta)\n action = np.random.choice(len(p), p=p)\n state, reward, done, info = env.step(action)\n rewards.append(reward)\n actions.append(action)\n if done:\n break\n states.append(state)\n\n return states, rewards, actions\n\n\ndef REINFORCE(env):\n \n \n # policy parameters\n alpha = 0.025\n gamma = 0.99\n n_episodes = 800\n theta = np.random.rand(4, 2) \n \n # init lists to store rewards of each episode and means of last 100 episodes \n last_100_episodes = []\n episodes = []\n means = []\n \n for e in range(n_episodes):\n \n # render env every x steps\n if e % 100 == 0:\n states, rewards, actions = generate_episode(env, theta, True)\n else:\n states, rewards, actions = generate_episode(env, theta, False)\n\n \n # keep track of previous 100 episode lengths\n if e < 100:\n last_100_episodes.append(sum(rewards))\n else:\n last_100_episodes.append(sum(rewards))\n last_100_episodes.pop(0)\n \n # compute mean\n mean = np.mean(last_100_episodes)\n means.append(mean)\n episodes.append(e)\n \n # learning rate decay\n if e % 200 == 0:\n # alpha = alpha/2\n if mean > 495:\n alpha = 0.00001 # slow down learning if mean of last 100 episodes is 500\n if mean < 495:\n alpha = 0.025\n # print mean every 100 episodes \n if e % 100 == 0 or e == (n_episodes - 1):\n print(\"episode: \" + str(e) + \" Mean of last 100 episodes: \" + str(mean)) \n \n # REINFORCE Algorithm\n steps = len(states) # length of episode\n G_t = np.zeros([steps]) # init G_t\n for t in range(steps):\n # MC sampling of G_t\n for k in range(t+1,steps+1):\n G_t[t] += np.power(gamma,k-t-1) * rewards[k-1]\n pi = policy(states[t], theta)\n action = actions[t]\n # update rule\n theta[:,action] = theta[:,action] + alpha * np.power(gamma, t) * G_t[t] * (states[t] * (1 - pi[action]))\n \n # create plot\n plt.plot(episodes,means,'b')\n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Mean of last 100 episodes\")\n plt.title(\"REINFORCE\")\n\ndef main():\n env = gym.make('CartPole-v1')\n REINFORCE(env)\n env.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ex08-pg/ex08-pg-tutor.py","file_name":"ex08-pg-tutor.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592220202","text":"# -*- coding: utf-8 -*-\n\nfrom kay.utils import forms\n\n\nclass AddNewThumb(forms.Form):\n img = forms.FileField(\n label=u\"Картинка\",\n required=True,\n help_text=u'Выберите картинку для загрузки')\n\n def __init__(self, **kwargs):\n super(AddNewThumb, self).__init__(\n action = '/upload_new_thumb', **kwargs)\n","sub_path":"apps/gift/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509175909","text":"from fairseq.models.roberta import RobertaModel\nimport torch.nn.functional as F\nimport torch\nimport argparse\nimport numpy as np\ndef predict():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--output_dir\", default=None,type=str, required=True,\n help=\"\")\n parser.add_argument(\"--task\", default=None,type=str, required=True,\n help=\"\")\n parser.add_argument(\"--data_dir\", default=None,type=str, required=True,\n help=\"\")\n args = parser.parse_args()\n # print(args)\n\n roberta = RobertaModel.from_pretrained(\n args.output_dir, \n # './outputs/RTE/7/',\n checkpoint_file='checkpoint_best.pt',\n data_name_or_path=args.data_dir\n )\n \n label_fn = lambda label: roberta.task.label_dictionary.string(\n [label + roberta.task.target_dictionary.nspecial]\n )\n # print(label_fn)\n ncorrect, nsamples = 0, 0\n roberta.cuda()\n roberta.eval()\n with open('../data-superglue-csv/'+args.task+'/val.tsv') as fin:\n fin.readline()\n logits=np.array([])\n num_classes =2\n for index, line in enumerate(fin):\n tokens = line.strip().split('\\t')\n sent1, sent2 = tokens[0], tokens[1]\n tokens = roberta.encode(sent1, sent2)\n logit = roberta.predict('sentence_classification_head', tokens).item()\n logits = np.append(logits,logit)\n print(logit)\n logits = logits.reshape((-1, num_classes))\n preds = np.argmax(logits, -1)\n\n print(preds)\n \n with open(args.output_dir+'eval_results2', \"w\") as writer:\n # print(label_list)\n for i in range(len(preds)):\n # json_i= \"\\\"idx: %d, \\\"label\\\": \\\"label_i\\\"\"\n writer.write(\"{\\\"idx\\\": %d, \\\"label\\\": \\\"%s\\\"}\\n\"%(i,preds[i]))\n # print(preds)\n\n\nif __name__ == '__main__':\n\n predict()\n","sub_path":"eval2_copa.py","file_name":"eval2_copa.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308808398","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 16 17:49:59 2018\n\n@author: Prodipta\n\"\"\"\n\nimport os\nimport pandas as pd\nimport zipfile\nimport shutil\nimport requests\nfrom bs4 import BeautifulSoup\nimport bisect\nimport numpy as np\n\ndef touch(fname, fpath, times=None):\n with open(os.path.join(fpath,fname), 'a'):\n os.utime(os.path.join(fpath,fname), times)\n\ndef unzip_to_directory(zippath, extractpath):\n with zipfile.ZipFile(zippath) as z:\n for f in z.namelist():\n if f.endswith('.csv'):\n filename = os.path.basename(f)\n source = z.open(f)\n target = file(os.path.join(extractpath, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n\ndef copy_to_directory(inputpath, outputpath):\n pass\n\ndef clean_up(strpath):\n items = os.listdir(strpath)\n folders = [f for f in items if os.path.isdir(os.path.join(strpath,f))]\n files = [f for f in items if os.path.isfile(os.path.join(strpath,f))]\n for f in files:\n shutil.os.remove(os.path.join(strpath,f))\n for folder in folders:\n shutil.rmtree(os.path.join(strpath,folder))\n\ndef if_csvs_in_dir(strpath):\n items = os.listdir(strpath)\n files = [f for f in items if f.endswith(\".csv\")]\n if len(files) > 0:\n return True\n return False\n\ndef get_ohlcv(dfr, ticker=None):\n if ticker:\n dfr = dfr[ticker]\n return dfr.loc[:,['open','high','low','close','volume']]\n\ndef split_csvs(dfr, strpath, maps=None, OHLCV=True):\n cols = list(dfr.columns.values)\n #cols.remove('ticker')\n syms = set(dfr['ticker'].tolist())\n for s in syms:\n dfs = dfr.loc[dfr['ticker']==s,cols].set_index('date')\n dfs.index = pd.to_datetime(dfs.index)\n if maps is not None:\n start_date = pd.to_datetime(maps.loc[maps.symbol==s,'start_date'].tolist())[-1]\n end_date = pd.to_datetime(maps.loc[maps.symbol==s,'end_date'].tolist())[-1]\n dfs = dfs[start_date:end_date]\n if OHLCV:\n dfs = get_ohlcv(dfs)\n\n dfs.to_csv(os.path.join(strpath,s+\".csv\"))\n\ndef update_csvs(dfr,strpath, OHLCV=True):\n items = os.listdir(strpath)\n syms = [f.split(\".csv\")[0] for f in items if f.endswith(\".csv\")]\n for s in syms:\n dfs_o = pd.read_csv(os.path.join(strpath,s+\".csv\"),parse_dates=[0],index_col=0).sort_index()\n dfs_n = dfr.loc[dfr['ticker']==s].set_index('date')\n if OHLCV:\n dfs_n = get_ohlcv(dfs_n)\n dfs = pd.concat([dfs_o,dfs_n])\n dfs.to_csv(os.path.join(strpath,s+\".csv\"))\n\ndef read_big_csv(strpath,tickers, pattern=\"\", header = 0, ticker_col=0):\n items = os.listdir(strpath)\n files = [f for f in items if f.endswith(\".csv\") and pattern in f]\n\n ts = [os.stat(os.path.join(strpath,f)).st_mtime for f in files]\n idx = ts.index(max(ts))\n\n datafile = files[idx]\n print(\"reading {}\".format(datafile))\n reader = pd.read_csv(os.path.join(strpath,datafile), header=header, iterator=True, chunksize=1000)\n dfr = pd.concat([chunk[chunk.iloc[:,ticker_col].isin(tickers)] for chunk in reader])\n\n print(\"read total {} rows\".format(len(dfr)))\n\n return dfr\n\ndef download_spx_changes(wiki_url):\n req = requests.get(wiki_url)\n soup = BeautifulSoup(req.content, 'lxml')\n table_classes = {\"class\": [\"sortable\", \"wikitable\", \"jquery-tablesorter\"]}\n wikitables = soup.findAll(\"table\", table_classes)\n tickertable = wikitables[0]\n changetable = wikitables[1]\n\n # get the current ticker\n rows = [item.get_text() for item in tickertable.find_all('tr')]\n col_names = rows[0].split('\\n')[1:-1]\n tickers = pd.DataFrame(columns=col_names)\n for i in range(1,len(rows)):\n row = rows[i].split('\\n')[1:-1]\n try:\n tickers.loc[len(tickers)] = tuple(row)\n except:\n pass\n #print(row)\n tickers.columns = ['symbol','name','filing','sector','sub_industry','address','date','CIK', 'Founded']\n\n # now get the ticker change table\n rows = [item.get_text() for item in changetable.find_all('tr')]\n col_names = rows[1].split('\\n')[1:-1]\n running_reason = ''\n\n tabs = pd.DataFrame(columns=col_names)\n for i in range(2,len(rows)):\n row = rows[i].split('\\n')[1:-1]\n if len(row) > 6:\n row = row[:5]\n try:\n dt = pd.to_datetime(row[0],format='%B %d, %Y')\n if len(row) < 6:\n row = row + [\"\" for f in range(len(row),6)]\n row[-1] = running_reason\n\n tabs.loc[len(tabs)] = (dt,) + tuple(row[1:])\n running_dt = dt\n running_reason = row[-1]\n except ValueError:\n if len(row) < 5:\n row = row + [\"\" for f in range(len(row),5)]\n row[-1] = running_reason\n tabs.loc[len(tabs)] = (running_dt,) + tuple(row)\n\n tabs.columns = ['date','add','name_added','delete','name_deleted','reason']\n tabs = tabs.sort_values('date')\n return {\"tickers\": tickers, \"change\":tabs}\n\ndef find_interval(x, lst):\n try:\n idx = lst.index(x)\n except:\n idx = max(0,bisect.bisect_left(lst,x)-1)\n return idx\n\ndef upsert_pandas(dfr, sym_col, sym, date_col, date, names_dict):\n if sym in dfr[sym_col].tolist():\n dfr.loc[dfr[sym_col]==sym,date_col] = date.strftime(\"%Y-%m-%d\")\n else:\n dfr.loc[len(dfr),:] = sym,names_dict.get(sym,sym),date.strftime(\"%Y-%m-%d\"),date.strftime(\"%Y-%m-%d\")\n\ndef update_ticker_change(membership_maps,tickers_list):\n membership_maps['start_date'] = pd.to_datetime(membership_maps['start_date'])\n membership_maps['end_date'] = pd.to_datetime(membership_maps['end_date'])\n\n if len(tickers_list) == 0:\n return membership_maps\n\n old_tickers = tickers_list['old'].tolist()\n new_tickers = tickers_list['new'].tolist()\n new_names = tickers_list['name'].tolist()\n ticker_maps = dict(zip(old_tickers,new_tickers))\n names_maps = dict(zip(old_tickers,new_names))\n\n for t in old_tickers:\n old_entry = membership_maps[membership_maps['symbol']==t]\n new_entry = membership_maps[membership_maps['symbol']==ticker_maps[t]]\n\n if len(old_entry)==0:\n continue\n if len(old_entry)>1:\n raise ValueError(\"Duplicate entries in membership maps\")\n update_idx = old_entry.index\n membership_maps.loc[membership_maps['symbol']==t,'asset_name'] = names_maps[t]\n membership_maps.loc[membership_maps['symbol']==t,'symbol'] = ticker_maps[t]\n\n if len(new_entry) ==0:\n continue\n if len(new_entry)>1:\n raise ValueError(\"Duplicate entries in membership maps\")\n\n remove_idx = new_entry.index\n start_date = min(old_entry['start_date'].values,new_entry['start_date'].values)\n end_date = max(old_entry['end_date'].values,new_entry['end_date'].values)\n membership_maps.iloc[update_idx,2] = start_date\n membership_maps.iloc[update_idx,3] = end_date\n membership_maps.iloc[remove_idx] = np.nan\n\n membership_maps = membership_maps.dropna()\n return membership_maps\n\ndef ensure_data_between_dates(fname,start_date, end_date):\n if not os.path.isfile(fname):\n raise IOError(\"file does not exists\")\n\n dfr = pd.read_csv(fname,parse_dates=[0], infer_datetime_format=True,\n index_col=0).sort_index()\n try:\n dfr = dfr[start_date:end_date]\n except:\n print(\"sym {}, start {}, end {}\".format(fname,start_date,end_date))\n os.remove(fname)\n\n if len(dfr) == 0:\n os.remove(fname)\n return\n\n dfr.to_csv(fname)\n","sub_path":"zipline/data/bundles/ingest_utilities.py","file_name":"ingest_utilities.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"253850350","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5 import QtCore\r\nimport AddSpecialList\r\nimport Specialmail\r\nimport imap\r\nimport NewSign\r\nimport WarningBox\r\n\r\n\r\nclass SpecialListUi(QDialog):\r\n def __init__(self, im, mails):\r\n super(SpecialListUi, self).__init__()\r\n self.mails = mails\r\n self.im = im\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setWindowTitle(\"特别关心\")\r\n self.setWindowIcon(QIcon('images/LOGO.png'))\r\n self.resize(380, 500)\r\n self.setFixedSize(self.width(), self.height())\r\n layout = QVBoxLayout()\r\n self.setWindowOpacity(0.9)\r\n self.setStyleSheet('''\r\n QDialog{\r\n background:#DCDCDC;\r\n font-family: \"Microsoft Yahei\";} \r\n ''')\r\n\r\n self.addBtn = QPushButton(\"添加\")\r\n self.addBtn.clicked.connect(self.onBtnAdd)\r\n self.addBtn.setStyleSheet('''QPushButton\r\n {text-align : center;\r\n background-color : white;\r\n font: bold;\r\n font-family: \"Microsoft Yahei\";\r\n border-color: gray;\r\n border-width: 2px;\r\n border-radius: 10px;\r\n padding: 6px;\r\n height : 14px;\r\n border-style: outset;\r\n font : 14px;}\r\n QPushButton:pressed\r\n {text-align : center;\r\n background-color : light gray;\r\n font: bold;\r\n border-color: gray;\r\n border-width: 2px;\r\n border-radius: 10px;\r\n padding: 6px;\r\n height : 14px;\r\n border-style: outset;\r\n font : 14px;}''')\r\n\r\n self.tableWidget = QTableWidget()\r\n self.tableWidget.setColumnCount(3)\r\n self.tableWidget.verticalHeader().setVisible(False)\r\n self.tableWidget.horizontalHeader().setVisible(False)\r\n self.tableWidget.setShowGrid(False)\r\n self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)\r\n self.tableWidget.setColumnWidth(0, 170)\r\n self.tableWidget.setColumnWidth(1, 80)\r\n self.tableWidget.setColumnWidth(1, 80)\r\n self.row_count = 0\r\n\r\n self.tableWidget.setStyleSheet('''\r\n QTableWidget{\r\n text-align: center;\r\n background: transparent;\r\n font-family: SimHei;} \r\n ''')\r\n # 建立连接\r\n\r\n # 断开连接\r\n for each in imap.special_list:\r\n self.addLine(each)\r\n\r\n layout.addWidget(self.tableWidget)\r\n layout.addWidget(self.addBtn)\r\n self.setLayout(layout)\r\n\r\n def onBtnAdd(self):\r\n dialog = AddSpecialList.AddSpecialListUi()\r\n dialog.addBtn.clicked.connect(lambda: self.addLine(dialog.memberEdit.text()))\r\n dialog.exec()\r\n\r\n #### 动态添加一行信息\r\n def addLine(self, name):\r\n if '@' in name:\r\n # 添加一空白行\r\n self.row_count = self.tableWidget.rowCount()\r\n self.tableWidget.insertRow(self.row_count)\r\n\r\n # 联系人\r\n member = QLineEdit()\r\n member.setFocusPolicy(QtCore.Qt.NoFocus)\r\n member.setText(name)\r\n member.setStyleSheet('''\r\n QLineEdit{\r\n color:#4F4F4F;\r\n font-family:\"Segoe UI\";\r\n background: #DCDCDC;\r\n border:none}\r\n ''')\r\n\r\n # 查看按钮\r\n style = '''QPushButton\r\n {text-align : center;\r\n background-color : white;\r\n font: bold;\r\n font-family: \"Microsoft Yahei\";\r\n border-color: gray;\r\n border-width: 2px;\r\n border-radius: 10px;\r\n padding: 6px;\r\n height : 14px;\r\n border-style: outset;\r\n font : 14px;}\r\n QPushButton:pressed\r\n {text-align : center;\r\n background-color : light gray;\r\n font: bold;\r\n border-color: gray;\r\n border-width: 2px;\r\n border-radius: 10px;\r\n padding: 6px;\r\n height : 14px;\r\n border-style: outset;\r\n font : 14px;}'''\r\n checkBtn = QPushButton()\r\n checkBtn.setText(\"查看\")\r\n checkBtn.clicked.connect(lambda: self.onBtnChk(name))\r\n checkBtn.setStyleSheet(style)\r\n\r\n\r\n # 移除按钮\r\n reMovBtn = QPushButton(\"delete\")\r\n reMovBtn.clicked.connect(lambda: self.onBtnReMov(name))\r\n reMovBtn.setStyleSheet(style)\r\n\r\n self.tableWidget.setCellWidget(self.row_count, 0, member)\r\n self.tableWidget.setCellWidget(self.row_count, 1, checkBtn)\r\n self.tableWidget.setCellWidget(self.row_count, 2, reMovBtn)\r\n def onBtnChk(self, name):\r\n dialog = Specialmail.SpecialmailUi(name, self.im, self.mails)\r\n dialog.exec()\r\n\r\n def onBtnReMov(self, name):\r\n button = self.sender()\r\n if button:\r\n r = self.tableWidget.indexAt(button.pos()).row()\r\n row = int(r)\r\n self.tableWidget.removeRow(row)\r\n self.row_count = self.row_count - 1\r\n imap.special_list.remove(name)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n dialag = SpecialListUi()\r\n dialag.show()\r\n sys.exit(app.exec_())","sub_path":"Spam/SpecialList.py","file_name":"SpecialList.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"302926006","text":"import pandas as pd\n\ndata = pd.read_csv('rawdatafc.csv', index_col = 0)\nnodelist = []\nwith open('nodelist.txt', 'r') as nodelistf:\n for node in nodelistf:\n nodelist.append(node.strip())\n\ncritab = []\nablist = [ab for ab in data.index]\nfor node in nodelist:\n for ab in ablist:\n if node in ab:\n critab.append(ab)\n\nwith open('critablist.txt', 'w') as f:\n for ab in critab:\n f.write(ab + '\\n')\n ","sub_path":"diary/sugyun/jsk_1801_3_UnseenPert/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419419120","text":"import unittest\nimport json\nimport os\n\n\nclass TestConfigFormat(unittest.TestCase):\n\n def setUp(self):\n self.dirname = os.path.dirname(__file__)\n self.dataset_dir = os.path.join(self.dirname, 'resources/38_sick')\n self.search_config_keys = [\"problem_schema\", \"problem_root\", \"dataset_schema\", \"training_data_root\"]\n self.test_config_keys = [\"problem_schema\", \"problem_root\", \"dataset_schema\", \"test_data_root\"]\n\n def test_config_exist(self):\n json_files = [x for x in os.listdir(self.dataset_dir) if x.endswith(\".json\")]\n\n self.assertIn(\"search_config.json\", json_files, \"search_config.json missing\")\n self.assertIn(\"test_config.json\", json_files, \"test_config.json missing\")\n\n def test_config_keys(self):\n search_config = json.load(open(os.path.join(self.dataset_dir, \"search_config.json\"), 'r'))\n self.assertEqual(len(set(self.search_config_keys).intersection(search_config.keys())), 4, \"search_config missing key\")\n\n test_config = json.load(open(os.path.join(self.dataset_dir, \"test_config.json\"), 'r'))\n self.assertEqual(len(set(self.test_config_keys).intersection(test_config.keys())), 4, \"test_config missing key\")\n\n def test_config_path_exist(self):\n search_config = json.load(open(os.path.join(self.dataset_dir, \"search_config.json\"), 'r'))\n for k, v in search_config.items():\n if k in self.search_config_keys:\n self.assertEqual(os.path.exists(os.path.join(self.dataset_dir, v)), True,\n \"path: {} not exist\".format(os.path.join(self.dataset_dir, v)))\n\n test_config = json.load(open(os.path.join(self.dataset_dir, \"test_config.json\"), 'r'))\n for k, v in test_config.items():\n if k in self.test_config_keys:\n self.assertEqual(os.path.exists(os.path.join(self.dataset_dir, v)), True,\n \"path: {} not exist\".format(os.path.join(self.dataset_dir, v)))\n","sub_path":"unit_tests/test_config_format.py","file_name":"test_config_format.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"113306374","text":"from .parsers import is_primitive, get_primitive_name, NAME_ATTRIBUTE, Parser\nfrom .validators import Validator\n\n'''\nPrints out all the inforamtion of a single argument including its name,\nhelp, short name, and default value, parser type, and position.\n'''\ndef _dump_arg(parser, arg):\n container = parser._argument_container\n if arg.position:\n print('\\t{}'.format(arg.name), end='')\n else:\n print('\\t{}{}'.format(container._prefix, arg.name), end='')\n if arg.short:\n print(' | {}{}'.format(container._short_prefix, arg.short), end='')\n\n if arg.help:\n print(' -> {}'.format(arg.help), end='')\n print()\n\n line = '\\t\\t'\n if arg.required:\n line = '{}[Required]'.format(line)\n\n if isinstance(arg.parser, Parser):\n if hasattr(arg.parser, NAME_ATTRIBUTE):\n line = '{}[Type: {}]'.format(line, getattr(arg.parser, NAME_ATTRIBUTE))\n else:\n line = '{}[Type: {}]'.format(line, type(arg.parser).__name__)\n elif is_primitive(arg.parser):\n line = '{}[Type: {}]'.format(line, get_primitive_name(arg.parser))\n\n if arg.position != 0:\n line = '{}[Position: {}]'.format(line, arg.position)\n print(line)\n\n if arg.default is not None:\n print('\\t\\t[Default Value: {}]'.format(str(arg.default)))\n\n if arg.validators is not None and len(arg.validators) > 0:\n found = False\n for validator in arg.validators:\n if not isinstance(validator, Validator) or validator.help_message() == '':\n continue\n if not found:\n print('\\t\\tArgument Restrictions:')\n found = True\n print('\\t\\t\\t- {}'.format(validator.help_message()))\n\n print('')\n\n\n'''\nPrints out the basic usage information.\n'''\ndef dump_usage(parser):\n container = parser._argument_container\n required = [arg for arg in container.all_arguments() if arg.required]\n optional = [arg for arg in container.all_arguments() if not arg.required and arg.position == 0]\n\n usage = ''\n if parser._program:\n usage = 'Usage: {}'.format(parser._program)\n\n position = 1\n argument = container.get_argument_at_position(position)\n while argument is not None:\n if argument.required:\n usage = '{} {}'.format(usage, argument.name)\n else:\n usage = '{} [{}]'.format(usage, argument.name)\n position += 1\n argument = container.get_argument_at_position(position)\n for arg in [req for req in required if req.position == 0]:\n usage = '{} {}{}'.format(usage, container._prefix, arg.name)\n for arg in optional:\n usage = '{} [{}{}]'.format(usage, container._prefix, arg.name)\n print(usage)\n\n\n'''\nPrints out all the help information for a provided ArgumentParser instance.\n'''\ndef dump_help(parser):\n all_arguments = parser._argument_container.all_arguments()\n required = [arg for arg in all_arguments if arg.required and arg.position == 0]\n optional = [arg for arg in all_arguments if not arg.required and arg.position == 0]\n positional = [arg for arg in all_arguments if arg.position != 0]\n dump_usage(parser)\n\n print('')\n if parser._description:\n print('{}\\n'.format(parser._description))\n\n # print out all the required parameter information\n if len(required) > 0 or len(optional) > 0:\n print('Named Arguments')\n for arg in required:\n _dump_arg(parser, arg)\n for arg in optional:\n _dump_arg(parser, arg)\n\n if len(positional) > 0:\n positional = sorted(positional, key=lambda pos: pos.position)\n print('Positional Arguments')\n for arg in positional:\n _dump_arg(parser, arg)\n","sub_path":"pyargparse/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"198741733","text":"\n# coding: utf-8\n\n# In[117]:\n\n\n#This script will search current and child directories for HKL files from XDS\n#and submit these files to dimple pipeline for automate structure solving\n#Developed by Gustavo Lima at MAX IV Laboratory\n#contact: gustavo.lima@maxiv.lu.se\n\nimport argparse\nimport os\nimport sys\nimport time\nimport subprocess\nimport glob\n \nparser = argparse.ArgumentParser()\nparser.add_argument('-proposal', action='store', dest='proposal', help='Proposal Number', required=True)\nparser.add_argument('-shift', action='store', dest='shift', help='Shift date' , required=True)\nparser.add_argument('-ptype', action='store', dest='proposal_type', help='Type of proposal: staff or visitors', default=\"visitors\")\nparser.add_argument('--version', action='version', version='%(prog)s 1.0')\n\nresults = parser.parse_args()\nproposal = results.proposal \nshift = results.shift\nproposal_type = results.proposal_type\n\n# In[124]:\n\n\n##User specific inputs\n\n#proposal =\"20180489\"\n#shift =\"20190127\"\n#proposal_type =\"visitors\"\n\npath=\"/data/\"+proposal_type+\"/biomax/\"+proposal+\"/\"+shift\n\n\n# In[125]:\n\n\nclass CytherError(Exception):\n def __init__(self, message):\n Exception.__init__(self, message)\n self.message = 'CytherError: {}'.format(repr(message))\n\ndef where(name, flags=os.F_OK):\n result = []\n extensions = os.environ.get('PATHEXT', '').split(os.pathsep)\n if not extensions:\n raise CytherError(\"The 'PATHEXT' environment variable doesn't exist\")\n\n paths = os.environ.get('PATH', '').split(os.pathsep)\n if not paths:\n raise CytherError(\"The 'PATH' environment variable doesn't exist\")\n\n for path in paths:\n path = os.path.join(path, name)\n if os.access(path, flags):\n result.append(os.path.normpath(path))\n for ext in extensions:\n whole = path + ext\n if os.access(whole, flags):\n result.append(os.path.normpath(whole))\n return result\n\n\n# In[126]:\ndef fspipeline_rearrange(path):\n \n ##Take each folder with mtz2map (last file created after successful run) and\n ##retrieve dataset name. This is usefull to set as exclude list in fsp_run\n \n fsp_list =glob.glob(path+\"/fragmax/results/*fspipeline*\")\n path_list=list()\n for fsp_run in fsp_list:\n for roots, dirs, files in os.walk(fsp_run):\n if \"mtz2map.log\" in files: \n path_list.append(roots)\n \n success_list=[x.split(\"/\")[-2].split(\"_merged\")[0] for x in path_list]\n \n \n ##This function will take the file list from successful fspipeline run\n ##and copy to results folder along other pipelines (dimple, pipedream)\n \n softwares = [\"autoproc\",\"EDNA_proc\",\"dials\",\"fastdp\",\"xdsapp\"]\n for file,fpath in zip(success_list,path_list):\n outdir=path+\"/fragmax/results/\"\n print(file,fpath)\n for sw in softwares:\n if sw in file:\n outp=outdir+file.split(\"_\"+sw)[0]+\"/\"+sw\n copy_string=\"rsync --ignore-existing -raz --progress \"+fpath+\"/* \"+outp+\"/fspipeline\"\n #subprocess.call(copy_string, shell=True)\n print(copy_string)\n \n\ndef fix_fsplistPath(cootpyfile):\n with open(cootpyfile,\"r\") as inp:\n a=inp.readlines()\n outdir=cootpyfile.split(\"coot.py\")[0]\n for j in a:\n if \"make_and_draw_map\" in j: \n oldmtz=j.split('\"')[1] \n newmtz=outdir+oldmtz.split(\"/\")[-1]\n if \"read_pdb\" in j: \n oldpdb=j.split('\"')[1] \n newpdb=outdir+oldpdb.split(\"/\")[-1]\n \n return \"\".join(a).replace(oldmtz,newmtz).replace(oldpdb,newpdb).replace(\"/gpfs/offline0/visitors/\",\"/data/visitors/\")\n\n \ndef fix_runcootPath(runcootfile):\n with open(runcootfile,\"r\") as inp:\n a=inp.readlines()\n\n outdir=runcootfile.split(\"run_coot\")[0]\n old=a[1].split(\"coot --python --script\")[1].split(\"--no-guano &\")[0]\n new=\" \"+outdir+\"coot.py \"\n\n return \"\".join(a).replace(old,new).replace(\"/gpfs/offline0/visitors/\",\"/data/visitors/\")\n \n \ndef find_cootFiles(path):\n coot_path=where(\"coot\")[0]\n\n dimpleCootshList=glob.glob(path+\"/fragmax/results/*/*/dimple/coot.sh\")\n fspCootpyList=glob.glob(path+\"/fragmax/results/*/*/fspipeline/coot.py\")\n fspRunCootList=glob.glob(path+\"/fragmax/results/*/*/fspipeline/run_coot\")\n \n for cootfile in dimpleCootshList:\n with open (cootfile,\"r\") as inp:\n a=inp.readlines()\n with open (cootfile,\"w\") as inp:\n index=a[0].find(\"coot --no-guano\")\n inp.write(coot_path+a[0][index+4:])\n\n for fspcootpy in fspCootpyList:\n try:\n a = fix_fsplistPath(fspcootpy)\n with open(fspcootpy,\"w\") as inp: \n inp.write(\"\".join(a))\n except:\n print(\"Path ok\")\n\n for fspcootpy in fspRunCootList:\n try:\n a=fix_runcootPath(fspcootpy) \n with open(fspcootpy,\"w\") as inp:\n inp.write(\"\".join(a))\n except:\n print(\"Path ok\")\n \n\n# In[127]:\nfspipeline_rearrange(path)\n\nfind_cootFiles(path)\n\n","sub_path":"Coot_path.py","file_name":"Coot_path.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"482127190","text":"from django.http import HttpResponse, JsonResponse, HttpResponseNotAllowed\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import TGoodsProcessed, TCommentProcessed\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone as datetime\nfrom django.core.serializers import serialize\nfrom django.db.models import Avg\nimport json\nfrom django.db import connection\nfrom django.template.defaulttags import register\n\n\n# 注册一个用于模板的获取字典值的函数\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n\n# SQL查询数据函数\ndef db_query(sql):\n try:\n cursor = connection.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n fields = cursor.description\n data_list = []\n for row in result:\n rowdict = {}\n for i in range(len(fields)):\n rowdict[fields[i][0]] = row[i]\n data_list.append(rowdict)\n return data_list\n except Exception as e:\n print(e)\n return None\n\n# 获取排行信息函数\ndef get_goods_ranking():\n sql = 'select goods_id, name, comment_num from t_goods_processed ORDER BY comment_num desc limit 5'\n comment_ranking = db_query(sql)\n sql = \"\"\"select goods_id,name ,round((worth /(worth + worthless))*100,2) as worth_rate from t_goods_processed where worth+worthless>10 ORDER BY worth_rate desc LIMIT 5\"\"\"\n worth_ranking = db_query(sql)\n sql = \"\"\"select goods_id,name ,round((positive/(positive+neutral+negative))*100,2) as positive_rate from t_goods_processed where comment_num>10 ORDER BY positive_rate desc LIMIT 5\"\"\"\n positive_ranking = db_query(sql)\n return comment_ranking, worth_ranking, positive_ranking\n\n\n# 获取品牌数量排行信息函数\ndef get_brand_summary():\n name_list = []\n data_list = []\n sql = \"\"\"\n SELECT\n brand,\n count(brand) AS num\n FROM\n t_goods_processed\n GROUP BY\n brand\n ORDER BY\n num DESC\n \"\"\"\n try:\n cursor = connection.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n # fields = cursor.description\n total_num = 0\n rank_total_num = 0\n i = 0\n for row in result:\n if i <= 6:\n name_list.append(row[0].split(\"/\")[-1])\n data_list.append(row[1])\n total_num += row[1]\n rank_total_num += 0\n i += 1\n except:\n pass\n print(name_list)\n print(data_list)\n return name_list, data_list\n\n\n# 首页\n@login_required(login_url=\"/user/login\")\ndef index(request):\n return render(request, 'index.html', {\"user\": request.user})\n\n\n# DashBoard 信息概览页面\n@login_required(login_url=\"/user/login\")\ndef console(request):\n dt = datetime.datetime.now() - datetime.timedelta(days=1)\n query_time = dt.strftime('%Y-%m-%d %H:%M:%S') # 查询最近一天数据\n name_list, data_list = get_brand_summary() # 获取前七大 品牌名称和数量,用于柱状图\n goods_num = TGoodsProcessed.objects.all().count() # 统计商品数量\n comment_num = TCommentProcessed.objects.all().count() # 统计评论数量\n type_num = TGoodsProcessed.objects.values(\"brand\").distinct().count() # 统计总品牌数量\n avg_price = f\" {TGoodsProcessed.objects.aggregate(Avg('price'))['price__avg']:0.0f}\" # 评价价格\n comment_ranking, worth_ranking, positive_ranking = get_goods_ranking() # 各大排名信息\n # print(comment_ranking)\n return render(request, 'console.html', locals())\n\n\n# 显示商品信息页面\n@login_required(login_url=\"/user/login\")\ndef show_goods_page(request):\n keyword = request.GET.get('keyword')\n brand = request.GET.get('brand')\n start_time = request.GET.get('start_time')\n end_time = request.GET.get('end_time')\n if not keyword:\n keyword = \"\"\n if not brand:\n brand = \"\"\n if not start_time:\n start_time = \"\"\n if not end_time:\n end_time = \"\"\n return render(request, 'show_goods.html', locals())\n\n\n# 显示评论信息页面\n@login_required(login_url=\"/user/login\")\ndef show_comment_page(request):\n keyword = request.GET.get('keyword')\n sentiment = request.GET.get('sentiment')\n start_time = request.GET.get('start_time')\n end_time = request.GET.get('end_time')\n if not keyword:\n keyword = \"\"\n if not sentiment:\n sentiment = \"\"\n if not start_time:\n start_time = \"\"\n if not end_time:\n end_time = \"\"\n return render(request, 'show_comment.html', locals())\n\n\n# 商品详情页面\n@login_required(login_url=\"/user/login\")\ndef detail(request, goods_id):\n goods = TGoodsProcessed.objects.get(goods_id=goods_id)\n if goods:\n goods.url = '/static/img/' + \"/\".join(goods.url.split(\"/\")[3:]) # 将图片地址换为本地地址\n return render(request, 'detail.html', locals())\n else:\n return HttpResponse(\"没有找到该商品\")\n\n\n# 获取评论信息接口\n@login_required(login_url=\"/user/login\")\ndef get_comment_info(request, comment_id):\n total_count = 0\n datalist = []\n try:\n start_time = request.GET.get('start_time')\n if not start_time:\n start_time = None\n except:\n start_time = None\n try:\n end_time = request.GET.get('end_time')\n if not end_time:\n end_time = None\n except:\n end_time = None\n try:\n sentiment = int(request.GET.get('sentiment'))\n except:\n sentiment = None\n try:\n keyword = request.GET.get('keyword')\n except:\n keyword = None\n try:\n goods_id = request.GET.get('goods_id')\n except:\n goods_id = None\n try:\n page = int(request.GET.get('page'))\n plimit = int(request.GET.get('limit'))\n except:\n page = 1\n plimit = 10\n sql = \"\"\"\n SELECT\n c.comment_id,\n g.goods_id,\n g.name,\n g.brand,\n c.time,\n c.text,\n c.sentiment,\n c.positive_prob,\n c.negative_prob,\n c.confidence\n FROM\n t_goods_processed g,\n t_comment_processed c\n WHERE\n g.goods_id = c.goods_id\"\"\"\n if comment_id != \"0\":\n sql = sql + f\" and c.comment_id={comment_id}\"\n if goods_id is not None:\n sql = sql + f\" and c.goods_id={goods_id}\"\n if keyword is not None:\n sql = sql + f\" and c.text like '%{keyword}%'\"\n if sentiment is not None:\n sql = sql + f\" and c.sentiment={sentiment}\"\n if start_time is not None:\n sql = sql + f\" and c.time>='{start_time}'\"\n if end_time is not None:\n sql = sql + f\" and c.time<='{end_time}'\"\n sql = sql + \" order by c.time desc\"\n\n try:\n datalist = db_query(sql)\n start_num = (page - 1) * plimit\n end_num = page * plimit\n total_count = len(datalist)\n datalist = datalist[start_num:end_num]\n except:\n pass\n res_json = {\n \"code\": 0,\n \"msg\": \"ok\",\n \"count\": total_count,\n \"data\": datalist\n }\n return JsonResponse(res_json)\n\n\n# 获取商品信息接口\n@login_required(login_url=\"/user/login\")\n@csrf_exempt\ndef get_goods_info(request, goods_id):\n if request.method == 'GET':\n total_count = 0\n datalist = []\n try:\n start_time = datetime.datetime.strptime(request.GET.get('start_time'), '%Y-%m-%d %H:%M:%S')\n except:\n start_time = None\n try:\n end_time = datetime.datetime.strptime(request.GET.get('end_time'), '%Y-%m-%d %H:%M:%S')\n except:\n end_time = None\n try:\n brand = request.GET.get('brand')\n except:\n brand = None\n try:\n keyword = request.GET.get('keyword')\n except:\n keyword = None\n try:\n page = int(request.GET.get('page'))\n plimit = int(request.GET.get('limit'))\n except:\n page = 1\n plimit = 10\n queryset = TGoodsProcessed.objects.order_by(\"-time\").all()\n if goods_id != \"0\":\n queryset = queryset.filter(goods_id=goods_id)\n if keyword is not None:\n queryset = queryset.filter(name__contains=keyword)\n if brand is not None:\n queryset = queryset.filter(brand__contains=brand)\n if start_time is not None:\n queryset = queryset.filter(time__gte=start_time)\n if end_time is not None:\n queryset = queryset.filter(time__lte=end_time)\n if len(queryset) > 0:\n data = serialize(\"json\", queryset)\n json_data = json.loads(data)\n for info in json_data:\n info['fields'][\"goods_id\"] = info['pk']\n datalist.append(info['fields'])\n # 检查请求页数据是否超出范围,如果正常则过滤出相关页数和条数\n datapages = float(len(datalist) / plimit + 1)\n if page >= datapages:\n return JsonResponse({\"code:\": 1, \"msg\": \"没有查到符合要求的数据\"})\n start_num = (page - 1) * plimit\n end_num = page * plimit\n total_count = len(datalist)\n datalist = datalist[start_num:end_num]\n res_json = {\n \"code\": 0,\n \"msg\": \"ok\",\n \"count\": total_count,\n \"data\": datalist\n }\n return JsonResponse(res_json)\n","sub_path":"week10/mysite/apps/smzdm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"625824823","text":"\"\"\"Graphical user interfaces for the circular weighing algorithm,\r\ndata colelction, and the final analysis of the masses. The first class\r\nif the gui and handling of circular pop-up window. The second class is for the\r\nmain body of the program and final least squares analysis.\"\"\"\r\nimport wx\r\nimport wx.xrc\r\nimport wx.grid\r\nimport numpy as np\r\nimport time #Used for a time stamp in file name.\r\nimport visa\r\n\r\nimport gui_collect #Gui of the main parent table.\r\n\r\nimport weighing #Circular weighing thread.\r\n\r\nclass Collector(gui_collect.MyFrame11):\r\n def __init__(self, parent):\r\n gui_collect.MyFrame11.__init__(self, parent)\r\n self.parent = parent\r\n self.Show(True)\r\n self.weigher = None\r\n \r\n def setup(self,masses):\r\n self.masses = masses\r\n self.m_textCtrl2.SetValue(\",\".join(masses))\r\n positions = range(1,len(masses)+1)\r\n positions = [str(p) for p in positions]\r\n self.m_textCtrl3.SetValue(\",\".join(positions))\r\n \r\n def on_stop(self,event):\r\n \"\"\"Calls abort on the thread, the thread can't be killed but this will\r\n inform it that it is time to stop working now. It will stop when it wants.\"\"\"\r\n if self.weigher:\r\n self.weigher.abort()\r\n self.weigher = None\r\n\r\n def on_run(self, event):\r\n \"\"\"Collects the run options from the input buttons and then\r\n sends it to a independent thread. The thread then sends the\r\n info back. The thread is then independent.\"\"\"\r\n port = self.m_comboBox3.GetValue()\r\n masses = self.m_textCtrl2.GetValue().split(\",\")\r\n positions = self.m_textCtrl3.GetValue()\r\n num_cycles = self.m_textCtrl51.GetValue().replace(\" \",\"\")\r\n centerings = self.m_textCtrl4.GetValue().replace(\" \",\"\")\r\n simulated = False\r\n \r\n if positions in ('',' '):\r\n positions = range(1,len(masses)+1)\r\n else:\r\n positions = positions.split(',')\r\n positions = [int(p) for p in positions] #Make sure they are all ints\r\n \r\n if len(masses) >= 5:\r\n reads_per_mass = 3\r\n else:\r\n reads_per_mass = 7 - len(masses)\r\n \r\n parent = self\r\n \r\n selection = self.m_comboBox4.GetValue()\r\n if \"Auto\" in selection:\r\n run_option = 'AUTO'\r\n try:\r\n num_cycles = int(num_cycles)\r\n except ValueError:\r\n num_cycles = 1\r\n self.m_textCtrl51.SetValue('1')\r\n try:\r\n centerings = int(centerings)\r\n except ValueError:\r\n centerings = 1\r\n self.m_textCtrl4.SetValue('1')\r\n else:\r\n run_option = 'SEMI'\r\n if \"Sim.\" in selection:\r\n simulated = True\r\n if not self.weigher:\r\n self.weigher = weighing.Thread(port,masses,positions,reads_per_mass,centerings,parent,run_option,num_cycles,simulated)\r\n \r\n def on_refresh_adresses(self, event):\r\n \"\"\"Refresh the connected visa adresses and add to the selection box\"\"\"\r\n #No need for a try block?\r\n rm = visa.ResourceManager()#new Visa\r\n resources = rm.list_resources()\r\n self.m_comboBox3.Clear()\r\n for adress in resources:\r\n self.m_comboBox3.Append(adress)\r\n\r\n def update_popup(self,text):\r\n \"\"\"Updates the large instruction text\"\"\"\r\n self.m_staticText5.SetLabel(text)\r\n\r\n def report_event(self,text):\r\n \"\"\"Report something in the text box, used like live updates\"\"\"\r\n self.m_textCtrl5.AppendText(str(text)+\"\\n\")\r\n \r\n def on_char_click(self,event):\r\n \"\"\"When a character is clicked, if it is teh space key, move to the next mass.\r\nUsed in the semi-auto reading mode, program waits for user to put a new mass on.\"\"\"\r\n keycode = event.GetKeyCode()\r\n if keycode == wx.WXK_SPACE:\r\n if self.weigher:\r\n self.weigher.space_pressed = True\r\n\r\n def recieve_results(self,data,positions):\r\n \"\"\"Passes the data to the parent window\"\"\"\r\n self.parent.recieve_results(data,positions)\r\n","sub_path":"modules/main_collect.py","file_name":"main_collect.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"635078424","text":"from .signals import object_viewed_signal\n\n# Custom View Mixin (for Class Based Views)\nclass ObjectViewMixin(object):\n # Dispatch method might run .get_object() twice since some class view will be calling .get_object\n # def dispatch(self, request, *args,**kwargs):\n # try:\n # instance = self.get_object()\n # except DoesNotExist:\n # instance = None\n # if instance is not None:\n # object_viewed_signal.send(instance.__class__, instance=instance, request=request)\n # return super(ObjectViewMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n context = super(ObjectViewMixin, self).get_context_data(*args, **kwargs)\n request = self.request\n instance = context.get('object')\n if instance:\n object_viewed_signal.send(instance.__class__, instance=instance, request=request)\n return context","sub_path":"analytics/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19631650","text":"import numpy as np\nfrom scipy import special\nif __name__==\"__main__\":\n print(np.sqrt(6*np.sum(1/np.arange(1,100000,dtype=np.float)**2)))\n method='strict'\n if method=='simulation':\n p=0.6\n a,b,c=0,0,0\n t,T=0,1000000\n while tb:\n c+=1\n t+=1\n print(float(c)/float(T))\n elif method=='simple':\n answer=0\n p=0.6\n N=11\n for x in np.arange(N):\n answer +=special.comb(N+x-1,x)*((1-p)**x)*(p**N)\n print(answer)\n else:\n answer=0\n p=0.6\n N=11\n for x in np.arange(N-1):\n answer +=special.comb(N+x-1,x)*((1-p)**x)*(p**N)\n p10=special.comb(2*(N-1),N-1)*((1-p)**x)**(N-1)\n t=0\n for n in np.arange(100):\n t+=(2*p*(1-p))**n*p*p\n answer+=p10*t\n print(answer)","sub_path":"邹博 机器学习/模拟乒乓球.py","file_name":"模拟乒乓球.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71339479","text":"from database import DB\n\nclass Catagory:\n\tdef __init__(self, id, name):\n\t\tself.id = id\n\t\tself.name = name\n\t\n\t@staticmethod\n\tdef all():\n\t\twith DB() as db:\n\t\t\trows = db.execute('SELECT * FROM catagories').fetchall()\n\t\t\treturn [Catagory(*row) for row in rows]\n\t\n\t@staticmethod\n\tdef find(catagory):\n\t\twith DB() as db:\n\t\t\trow = db.execute('SELECT * FROM catagories WHERE name = ?', (catagory,)).fetchone()\n\t\t\tif not row:\n\t\t\t\treturn None\n\t\t\treturn Catagory(*row)\n\t\n\t@staticmethod\n\tdef find_by_id(id):\n\t\twith DB() as db:\n\t\t\trow = db.execute('SELECT * FROM catagories WHERE id = ?', (id,)).fetchone()\n\t\t\tif not row:\n\t\t\t\treturn None\n\t\t\treturn Catagory(*row)\n","sub_path":"catagory.py","file_name":"catagory.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"77474211","text":"import discord\nimport os\nimport asyncio\nimport requests\nimport json\nimport random\nimport time\nfrom datetime import datetime\nimport pytz\nimport aiohttp\nimport io\nfrom PIL import Image, ImageDraw, ImageFont\nfrom discord.ext import commands\nfrom staying_alive import staying_alive\nfrom variable_s import *\nfrom react_commands import *\nfrom aiohttp import request\nfrom discord import File\nfrom PIL import Image, ImageDraw, ImageFont\n\n\nintents = discord.Intents.default()\nintents.members = True\nbot = commands.Bot(command_prefix=commands.when_mentioned_or(\"tt\"),case_insensitive=True, intents=intents)\n\n@bot.event\nasync def on_ready():\n\tprint('{0.user} is in! '.format(bot))\n\tawait bot.change_presence(activity=discord.Game('EPIC RPG'))\n\n@bot.event\nasync def on_member_join(member):\n guild = bot.get_guild(830593281817182208)\n channel = bot.get_channel(830593281817182212)\n emb = discord.Embed(title='歡迎新成員既加入', description=f'{member.mention} 你終於來咗啦? 想睇咩都講聲啊!唔洗怕羞既尼度!', colour=0xFF99FF)\n fields = [(\"歡迎來到\",f'{guild.name}', True),(\"Server現有成員\", f'{len(guild.members)}', True),]\n for name, value, inline in fields:\n emb.add_field(name=name, value=value, inline=inline)\n emb.set_author(name='New Member', icon_url=f'{member.avatar_url}')\n emb.set_footer(text=\"多d宣傳suck友入來一齊睇片啦~ →_→\")\n emb.set_thumbnail(url=f'{member.avatar_url}')\n # emb.set_image(url=f'{member.avatar_url}')\n msg = await channel.send(embed=emb)\n\n\n@bot.event\nasync def on_voice_state_update(member, before, after):\n if not member.bot:\n if after.channel.id == 827272740817076235: #VC\n if not before.channel or before.channel.id != 827272740817076235:\n channel = bot.get_channel(827278669494878278) #文字chan \n await channel.send(f'{member.name} = Here')\n if after.channel.id == 837057086339809360: #VC\n if not before.channel or before.channel.id != 837057086339809360:\n channel = bot.get_channel(837057315365715969) #文字chan\n if member.id != 809795855988555787:\n if member.nick is None:\n await channel.send(f'{member.name}入咗來,條友仲未改名 >_>')\n else:\n await channel.send(f'{member.nick}來咗啦!你今日忍咗未?')\n\n@bot.command(name='ser')\nasync def fetchServerInfo(ctx):\n guild = ctx.guild\n emb = discord.Embed(title=\"Server 資料\", description=f\"哩個ser我暫時俾{random.randint(6,10)}分!\", colour=0xFF0000)\n fields = [(\"Server Size\", f'{len(guild.members)}', True),(\"Server Owner\", f'{guild.owner.display_name}', True),('Server \"起源地\"', f'{guild.region}', False)]\n for name, value, inline in fields:\n emb.add_field(name=name, value=value, inline=inline)\n emb.set_author(name=f'{guild.name}', icon_url=ctx.guild.icon_url)\n emb.set_footer(text=\"つづく\")\n emb.set_thumbnail(url=ctx.guild.icon_url)\n emb.set_image(url=ctx.guild.icon_url)\n msg = await ctx.channel.send(embed=emb)\n\n#R&M API random character generator\n@bot.command(name=\"RnM\")\nasync def rick_morty(ctx):\n character_num = random.randint(1,671)\n endpoint = f\"https://rickandmortyapi.com/api/character/{character_num}\"\n response = requests.get(endpoint).json()\n emb = discord.Embed(title=f'角色:{response[\"name\"]}', description=f\"```狀態:{response['status']}```\", colour=0x33ffff)\n fields = [(\"人物ID\",f\"{response['id']}\", True),(\"性別\", f\"{response['gender']}\", True),(\"種族\", f\"{response['species']}\", True),(\"起源\", f\"{response['type']}\",True),(\"Origin\", f\"{response['origin']['name']}\",False),(\"位置\", f\"{response['location']['name']}\",False)]\n for name, value, inline in fields:\n emb.add_field(name=name, value=value, inline=inline)\n emb.set_author(name='Rick & Morty 人物卡')#, icon_url=ctx.guild.icon_url)\n emb.set_footer(text=f\"出現集數:{len(response['episode'])}\")\n emb.set_thumbnail(url=f'{response[\"image\"]}')\n emb.set_image(url=f'{response[\"image\"]}')\n msg = await ctx.channel.send(embed=emb)\n\n@bot.command()\nasync def nick(ctx, *, user: discord.Member):\n if user.nick == None:\n await ctx.send(f\"條友仔無d創意,個個ser既名都係: {user.display_name}\")\n else:\n await ctx.send(f\"佢係尼個ser既名係: {user.nick}\")\n\n\n@bot.command()\nasync def pl(ctx, url : str):\n channel = ctx.author.voice.channel\n\n voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)\n if voice and not voice.is_connected():\n await voice.move_to(channel)\n else:\n voice = await channel.connect()\n\n voice.play(discord.FFmpegPCMAudio(f\"./MP3/{url}.mp3\"))\n\n while voice.is_playing():\n await asyncio.sleep(1)\n else:\n await asyncio.sleep(1)\n while voice.is_playing():\n break\n else:\n await voice.disconnect()\n\n\n@bot.command(name='hurt')\nasync def canvas(ctx,*,txt):\n image = Image.open(f'./Pic/dog.png')\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(\"SansTC.ttf\", 18)\n text_width, text_height = draw.textsize(txt, font=font)\n x = 120\n y = 280\n draw.text( (x, y), txt, fill=(128,128,130), font=font)\n buffer = io.BytesIO()\n image.save(buffer, format='PNG') \n buffer.seek(0) \n await ctx.send(file=File(buffer, 'myimage.png'))\n\n\n@bot.command(name = \"whowin\")\nasync def whowin(ctx,*,member: discord.Member = None):\n image = Image.open(f'./Pic/whowin.png')\n print('size:', image.size)\n IMAGE_WIDTH, IMAGE_HEIGHT = image.size\n draw = ImageDraw.Draw(image)\n buffer = io.BytesIO()\n image.save(buffer, format='PNG') \n buffer.seek(0) \n \n AVATAR_SIZE = 128\n avatar_asset = ctx.author.avatar_url_as(format='jpg', size=AVATAR_SIZE)\n buffer_avatar = io.BytesIO()\n await avatar_asset.save(buffer_avatar)\n buffer_avatar.seek(0)\n avatar_image = Image.open(buffer_avatar)\n avatar_image = avatar_image.resize((AVATAR_SIZE + 110, AVATAR_SIZE + 110))\n x = 10\n y = (IMAGE_HEIGHT-AVATAR_SIZE)//2\n image.paste(avatar_image, (x, y))\n\n avatar_asset2 = member.avatar_url_as(format='jpg', size=AVATAR_SIZE)\n buffer_avatar2 = io.BytesIO()\n await avatar_asset2.save(buffer_avatar2)\n buffer_avatar2.seek(0)\n avatar_image2 = Image.open(buffer_avatar2)\n avatar_image2 = avatar_image2.resize((AVATAR_SIZE + 110, AVATAR_SIZE + 110))\n x = 270\n y = (IMAGE_HEIGHT-AVATAR_SIZE)//2\n image.paste(avatar_image2, (x, y))\n\n buffer_output = io.BytesIO()\n image.save(buffer_output, format='PNG') \n buffer_output.seek(0) \n msg = await ctx.send(file=File(buffer_output, 'myimage.png'))\n await msg.add_reaction(\"👈\")\n await msg.add_reaction(\"👉\")\n\n@bot.command(name = \"mstake\")\nasync def whowin(ctx,*,member: discord.Member = None):\n image = Image.open(f'./Pic/mstake.png')\n print('size:', image.size)\n IMAGE_WIDTH, IMAGE_HEIGHT = image.size\n draw = ImageDraw.Draw(image)\n buffer = io.BytesIO()\n image.save(buffer, format='PNG') \n buffer.seek(0) \n \n AVATAR_SIZE = 128\n avatar_asset = member.avatar_url_as(format='jpg', size=AVATAR_SIZE)\n buffer_avatar = io.BytesIO()\n await avatar_asset.save(buffer_avatar)\n buffer_avatar.seek(0)\n avatar_image = Image.open(buffer_avatar)\n avatar_image = avatar_image.resize((AVATAR_SIZE + 350, AVATAR_SIZE + 350))\n x = 480\n y = ((IMAGE_HEIGHT-AVATAR_SIZE)//2)+80\n image.paste(avatar_image, (x, y))\n\n buffer_output = io.BytesIO()\n image.save(buffer_output, format='PNG') \n buffer_output.seek(0) \n await ctx.send(file=File(buffer_output, 'myimage.png'))\n\n@bot.command(name = \"shake\")\nasync def whowin(ctx,*,member: discord.Member = None):\n image = Image.open(f'./Pic/shake.png')\n IMAGE_WIDTH, IMAGE_HEIGHT = image.size\n print('size:', image.size)\n draw = ImageDraw.Draw(image)\n buffer = io.BytesIO()\n image.save(buffer, format='PNG') \n buffer.seek(0) \n \n AVATAR_SIZE = 128\n avatar_asset = ctx.author.avatar_url_as(format='jpg', size=AVATAR_SIZE)\n buffer_avatar = io.BytesIO(await avatar_asset.read())\n avatar_image = Image.open(buffer_avatar)\n avatar_image = avatar_image.resize((AVATAR_SIZE - 30 , AVATAR_SIZE - 30))\n\n circle_image = Image.new('L', (AVATAR_SIZE - 30, AVATAR_SIZE - 30))\n circle_draw = ImageDraw.Draw(circle_image)\n circle_draw.ellipse((0, 0, AVATAR_SIZE - 30, AVATAR_SIZE - 30), fill=255)\n x = 1\n y = 110\n image.paste(avatar_image, (x, y), circle_image)\n\n avatar_asset2 = member.avatar_url_as(format='jpg', size=AVATAR_SIZE)\n buffer_avatar2 = io.BytesIO(await avatar_asset2.read())\n avatar_image2 = Image.open(buffer_avatar2)\n avatar_image2 = avatar_image2.resize((AVATAR_SIZE - 30 , AVATAR_SIZE - 30))\n circle_image2 = Image.new('L', (AVATAR_SIZE - 30, AVATAR_SIZE - 30))\n circle_draw2 = ImageDraw.Draw(circle_image2)\n circle_draw2.ellipse((0, 0, AVATAR_SIZE - 30, AVATAR_SIZE - 30), fill=255)\n x = 450\n y = 120\n image.paste(avatar_image2, (x, y), circle_image2)\n\n buffer_output = io.BytesIO()\n image.save(buffer_output, format='PNG') \n buffer_output.seek(0) \n await ctx.send(file=File(buffer_output, 'myimage.png'))\n\n \n@bot.command(name=\"fact\")\nasync def animal_fact(ctx, *, animal: str):\n if animal.lower() in (\"dog\", \"cat\", \"panda\", \"fox\", \"bird\", \"koala\"):\n endpoint = f\"https://some-random-api.ml/facts/{animal.lower()}\"\n response = requests.get(endpoint).json()\n await ctx.channel.send(response[\"fact\"])\n else:\n await ctx.channel.send(\"自己Google啦!\")\n\n@bot.command(name=\"do\")\nasync def expre_ss(ctx, *, action: str):\n if action.lower() in (\"wink\", \"pat\", \"hug\", \"face-palm\"):\n url = f'https://some-random-api.ml/animu/{action.lower()}'\n response = requests.get(url, headers={\"Accept\": \"application/json\"}).json()\n await ctx.channel.send(response[\"link\"])\n else:\n await ctx.channel.send(\"https://tenor.com/view/what-do-you-wanna-do-edward-asner-abe-rifkin-dead-to-me-what-should-we-do-gif-17803589\")\n\n@bot.command(name=\"img\")\nasync def img(ctx, *, img: str):\n if img.lower() in (\"dog\", \"cat\", \"panda\", \"fox\", \"red_panda\", \"koala\", \"birb\", \"racoon\", \"kangaroo\", \"whale\", \"pikachu\"):\n url = f'https://some-random-api.ml/img/{img.lower()}'\n response = requests.get(url, headers={\"Accept\": \"application/json\"}).json()\n await ctx.channel.send(response[\"link\"])\n else:\n await ctx.channel.send(\"Google Image幫到你!\")\n\n@bot.command(name=\"jokea\")\nasync def ichdj(ctx):\n url = 'https://icanhazdadjoke.com/'\n response = requests.get(url, headers={\"Accept\": \"application/json\"}).json()\n await ctx.channel.send(response[\"joke\"])\n\n@bot.command(name=\"jokeb\")\nasync def SRF_joke(ctx):\n endpoint = \"https://some-random-api.ml/joke\"\n response = requests.get(endpoint).json()\n await ctx.channel.send(response[\"joke\"])\n\n@bot.command(name=\"meme\")\nasync def SRF_meme(ctx):\n endpoint = \"https://some-random-api.ml/meme\"\n response = requests.get(endpoint).json()\n await ctx.channel.send(response[\"image\"])\n\n@bot.command()\nasync def yn(ctx, *, message):\n emb = discord.Embed(title=f'投票:{message}',description=f'我覺得係{random.choice(ynchoice)}')\n msg = await ctx.channel.send(embed=emb)\n await msg.add_reaction('👍')\n await msg.add_reaction('👎')\n\n@bot.command(pass_context = True)\n@commands.has_permissions(administrator=True, manage_messages=True)\nasync def clr(ctx, number):\n mgs = [] #Empty list to put all the messages in the log\n number = int(number) #Converting the amount of messages to delete to an integer\n async for x in ctx.channel.history(limit = number):\n mgs.append(x)\n await ctx.message.channel.delete_messages(mgs)\n await ctx.channel.send('還我河蟹社會!')\n\n@bot.command()\nasync def rate(ctx):\n emb = discord.Embed(title=\"你會俾幾分?\",description=f'我唔係逼你,但如果係frd既就比{random.randrange(10)}分!')\n msg = await ctx.channel.send(embed=emb)\n number_of_responses = 10\n emoji_numbers = ['1️⃣', \"2️⃣\", \"3️⃣\", \"4️⃣\", \"5️⃣\", \"6️⃣\", \"7️⃣\", \"8️⃣\", \"9️⃣\", '🔟']\n for i in range(number_of_responses):\n await msg.add_reaction(emoji_numbers[i])\n\nasync def react(message):\n await message.add_reaction('🫂') #841102889510371419\n await message.add_reaction('<:LOVEU2:831585270398189600>')\n await message.channel.send(random.choice(starter_encouragements))\n\n\n@bot.command(name= 'lgbtq')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/gay?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'gay.png'))\n \n@bot.command(name= 'wasted')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/wasted?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'wasted.png'))\n\n\n@bot.command (name= 'rps')\nasync def rps(ctx):\n choice = (\"🍞\", \"✂️\", \"👊\")\n pc = random.choice(choice)\n msg = await ctx.channel.send(\"清侵,無教,叉燒包!\")\n await msg.add_reaction(\"🍞\")\n await msg.add_reaction(\"✂️\")\n await msg.add_reaction(\"👊\")\n\n try:\n def check1(reaction, user):\n return user != bot.user \n reaction, user = await bot.wait_for(\"reaction_add\", timeout=5.0, check=check1)\n \n await asyncio.sleep(1)\n\n if str(reaction.emoji) == pc:\n await ctx.channel.send('打和!SUPER!')\n elif str(reaction.emoji) == \"🍞\":\n if pc == \"👊\":\n await ctx.channel.send('哎呀~衰鬼~我居然比你包住咗~ <3')\n elif pc == \"✂️\":\n await ctx.channel.send(\"哼!你個廢柴!果然輸比我個奪命鉸剪腳呢\")\n elif str(reaction.emoji) == \"✂️\":\n if pc == \"🍞\":\n await ctx.channel.send('哎呀~我個法國麵包啊~~~剪細力啦好心你~ <3')\n elif pc == \"👊\":\n await ctx.channel.send(\"超!你個軟腳蟹!睇就知唔夠我硬啦!\")\n elif str(reaction.emoji) == \"👊\":\n if pc == \"✂️\":\n await ctx.channel.send('哼~比你難得贏翻一次甘多多啦!')\n elif pc == \"🍞\":\n await ctx.channel.send(\"垃圾!我求其出個小籠包都贏你啊!\")\n except asyncio.TimeoutError:\n why = [\"你唔好聽日先出?\",\"我寧願你彈弓手好過咯!\",\"你以為bot就無生活可以等你成世?\"]\n await ctx.channel.send(f\"諗甘耐做咩?{random.choice(why)}\")\n\n\n@bot.command(name= 'passed')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/passed?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'passed.png'))\n\n@bot.command(name= 'jail')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/jail?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'jail.png'))\n\n@bot.command(name= 'communism')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/comrade?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'comrade.png'))\n\n@bot.command(name= 'pixel')\nasync def exp_res(ctx, member: discord.Member=None):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/pixelate?avatar={member.avatar_url_as(format=\"png\")}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'pixelate.png'))\n\n@bot.command(name= 'cm')\nasync def exp_res(ctx, member: discord.Member=None,*, message):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/youtube-comment?avatar={member.avatar_url_as(format=\"png\")}&comment={message}&username={member.display_name}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'youtube-comment.png'))\n\n@bot.command(name= 'dog')\nasync def exp_res(ctx, member: discord.Member=None, *, msg):\n if not member:\n member = ctx.author\n my_url = f'https://some-random-api.ml/canvas/its-so-stupid?avatar={member.avatar_url_as(format=\"png\")}&dog={msg}' \n async with aiohttp.ClientSession() as session:\n async with session.get(my_url) as resp:\n if resp.status != 200:\n return await ctx.channel.send('網絡唔穩定,你遲d再整過')\n data = io.BytesIO(await resp.read())\n await ctx.channel.send(file=discord.File(data, 'its-so-stupid.png'))\n\n@bot.command(name='CAtime')\nasync def CAtime(context):\n tz_CA = pytz.timezone('America/Toronto')\n CA_T = datetime.now(tz_CA)\n await context.channel.send(CA_T.strftime(\"%a %d %b,%y %I:%M:%S %p\"))\n\n@bot.command(name='UKtime')\nasync def UK_T(message):\n tz_UK = pytz.timezone('Europe/London')\n UK_T = datetime.now(tz_UK)\n await message.channel.send(UK_T.strftime(\"%a %d %b,%y %I:%M:%S %p\"))\n\n@bot.command(name='HKtime')\nasync def HK_T(message):\n tz_HK = pytz.timezone('Asia/Hong_Kong')\n HK_T = datetime.now(tz_HK)\n await message.channel.send(HK_T.strftime(\"%a %d %b,%y %I:%M:%S %p\"))\n\n\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n if message.author == bot.user or message.author.bot == True:\n return\n username = str(message.author).split('#')[0]\n msg = message.content.lower()\n \n if msg.endswith(\".gif\") or msg.startswith(\"http\"): \n return\n\n if any(word in msg for word in sad_words):\n await react(message)\n \n if msg.startswith('ping'):\n await message.channel.send(f'{bot.latency*1000}(毫秒)')\n\n if any(word in msg for word in stel_la):\n tmpmsg = await message.channel.send('你講緊Stella?')\n time.sleep(3)\n await tmpmsg.delete()\n\n if ('比幾分' in msg):\n await message.channel.send(f'我比較公道,會比{random.randrange(10)}分!')\n\n if msg.startswith('bling'):\n await bling(message)\n\n if any(map(msg.startswith, greet_ings)):\n await message.add_reaction('<:LOVEU2:831585270398189600>')\n\n if ('雞湯' in msg):\n await message.channel.send(random.choice(poison_soup))\n\n if msg.startswith('挽'):\n await message.channel.send('https://tenor.com/view/holding-hands-dog-cars-gif-13660273')\n\n if msg.startswith('say'):\n tmp = message.content.split(\" \", 1)\n if len(tmp) == 1:\n await message.channel.send('https://tenor.com/view/wow-what-say-what-gif-16598538')\n else:\n await message.delete()\n temp_mg = await message.channel.send(tmp[1])\n time.sleep(5)\n await temp_mg.delete()\n\n if ('joker' in msg):\n await message.channel.send('https://tenor.com/view/batman-joker-heath-ledger-clap-clapping-gif-11060757')\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.errors.MissingRequiredArgument):\n await ctx.channel.send(\"遺失參數\")\n elif isinstance(error, commands.errors.CommandNotFound):\n await ctx.channel.send(\"無尼個cmd啊!係咪打錯字啊?\")\n else:\n await ctx.channel.send({error})\n\nstaying_alive()\nbot.run(os.getenv('TOKEN'))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"462852130","text":"from pathlib import Path\n# tensoroverflow\n# ripple net movie default args\nripple_net_movie_args = {\n 'model':'ripple_net',\n 'dataset_path': Path('../../data'),\n 'dataset':'movie',\n 'dim':16,\n 'n_hop':2,\n 'kge_weight':0.01,\n 'l2_weight':1e-7,\n 'lr':0.02,\n 'n_memory':32,\n 'log_path':Path('../../logs'),\n 'model_path':Path('../../models'),\n 'file_name':'ripple_net_movie',\n 'n_epoch':10,\n 'batch_size':1024,\n 'show_eval':True,\n 'show_loss':True,\n 'test':True,\n 'save_model':True,\n 'show_train_eval':True,\n 'load':True,\n 'max_loss':100\n}\nripple_net_book_args = {\n 'model': 'ripple_net',\n 'dataset_path': Path('../../data'),\n 'dataset': 'book',\n 'dim': 4,\n 'n_hop': 2,\n 'kge_weight': 1e-2,\n 'l2_weight': 1e-5,\n 'lr': 1e-3,\n 'n_memory': 32,\n 'log_path': Path('../../logs'),\n 'model_path': Path('../../models'),\n 'file_name': 'ripple_net_book',\n 'n_epoch': 20,\n 'batch_size': 1024,\n 'show_eval': True,\n 'show_loss': True,\n 'test': True,\n 'save_model': True,\n 'show_train_eval': True,\n 'load':False,\n 'max_loss': 100\n\n}\n# ripple net plus default args\nripple_net_plus_movie_args = {\n 'model':'ripple_net_plus',\n 'dataset_path': Path('../../data'),\n 'dataset':'movie',\n 'dim':16,\n 'n_hop':3,\n 'kge_weight':0.01,\n 'l2_weight':1e-7,\n 'lr':0.02,\n 'n_memory':32,\n 'dropout':0.8,\n 'predict_mode':'dense',\n 'log_path':Path('../../logs'),\n 'model_path':Path('../../models'),\n 'file_name':'ripple_net_plus_movie',\n 'n_epoch':20,\n 'batch_size':1024,\n 'show_eval':True,\n 'show_loss':False,\n 'test':True,\n 'save_model':True,\n 'show_train_eval':False,\n 'load':True,\n 'max_loss': 100\n}\n#run_ripple_net_plus_book_6.9e-05lr_4.2e-02kg_4.8e-08l2_0.50dp_44m_2h_44d_basic\nripple_net_plus_book_args = {\n 'model':'ripple_net_plus',\n 'dataset_path': Path('../../data'),\n 'dataset':'book',\n 'dim':4,\n 'n_hop':2,\n 'kge_weight':0.01,\n 'l2_weight':1e-5,\n 'lr':0.001,\n 'n_memory':32,\n 'dropout':0.8,\n 'predict_mode':'basic',\n 'log_path':Path('../../logs'),\n 'model_path':Path('../../models'),\n 'file_name':'ripple_net_plus_book_1',\n 'n_epoch':20,\n 'batch_size':1024,\n 'show_eval':True,\n 'show_loss':True,\n 'test':True,\n 'save_model':True,\n 'show_train_eval':False,\n 'load':True,\n 'max_loss': 100\n}\n\n\ndef args_convert(args):\n target_args = dict()\n target_args['static_args']={\n 'model_args': {\n 'dim': args['dim'],\n 'n_hop': args['n_hop'],\n 'kge_weight': args['kge_weight'],\n 'l2_weight': args['l2_weight'],\n 'lr': args['lr'],\n 'n_memory': args['n_memory'],\n },\n 'dataset_args': {\n 'root_path': args['dataset_path'],\n 'dataset': args['dataset'],\n 'dim': args['dim'],\n 'n_hop': args['n_hop'],\n 'n_memory': args['n_memory']\n },\n 'model':args['model'],\n 'log_path': args['log_path'],\n 'model_path': args['model_path']/args['file_name'],\n 'file_name': args['file_name'],\n 'n_epoch': args['n_epoch'],\n 'batch_size': args['batch_size'],\n 'max_loss':args['max_loss']\n }\n target_args['runtime_args']={\n 'show_eval': args['show_eval'],\n 'show_loss': args['show_loss'],\n 'test': args['test'],\n 'save_model': args['save_model'],\n 'show_train_eval': args['show_train_eval']\n }\n if args['model']=='ripple_net_plus':\n target_args['static_args']['model_args']['dropout']=args['dropout']\n target_args['static_args']['model_args']['predict_mode']=args['predict_mode']\n return target_args","sub_path":"src/tensorflow/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593600591","text":"# magic cards module\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nimport os\nimport random\nfrom datetime import datetime\nimport json\nfrom sopel.module import commands, rule\nfrom sopel.bot import Sopel\nimport urllib.request\nimport requests\n\n@commands('draw')\n@rule('$nickname draw')\ndef drawCard(bot, trigger):\n filename = '/var/www/py/sopel/modules/mtgcreatures.txt'\n line_num = 0\n selected_line = ''\n with open(filename) as f:\n while 1:\n line = f.readline()\n if not line: break\n line_num += 1\n if random.uniform(0, line_num) < 1:\n selected_line = line\n card = selected_line.strip()\n cardDrawRaw = requests.get('https://api.deckbrew.com/mtg/cards?&multiverseid=' + card)\n cardDrawRawText = cardDrawRaw.text\n cardDrawRawJson = json.loads(cardDrawRawText)\n cardID = cardDrawRawJson[0]['id']\n cardDrawRawWithID = requests.get('https://api.deckbrew.com/mtg/cards/' + cardID)\n cardDrawRawWithIDText = cardDrawRawWithID.text\n cardDrawRawWithIDJson = json.loads(cardDrawRawWithIDText)\n cardName = cardDrawRawWithIDJson['name']\n creaturePower = cardDrawRawWithIDJson['power'][0]\n creatureToughness = cardDrawRawWithIDJson['toughness'][0]\n creatureColor = cardDrawRawWithIDJson['colors'][0]\n cardURLRaw = cardDrawRawWithIDJson['editions'][0]['image_url']\n cardURLGet = requests.get('https://coinurl.com/api.php?uuid=53759db04170a030904396&url=' + cardURLRaw)\n cardURLGetText = cardURLGet.text\n cardURL = str(cardURLGetText)\n bot.say(str(trigger.nick) + ' has drawn ' + cardName + '. This ' + creatureColor + ' creature has ' + creaturePower + '/' + creatureToughness + '. Say .draw and then .fight to challenge him in a fight. To view ' + cardName + ', visit ' + cardURL)\n \n\n ","sub_path":"sopel/modules/mtg.py","file_name":"mtg.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"127554201","text":"\"\"\"\n@brief test log(time=2s)\n\nskip this test for regular run\n\"\"\"\n\nimport sys\nimport os\nimport unittest\n\ntry:\n import src\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n\ntry:\n import pyquickhelper as skip_\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"pyquickhelper\",\n \"src\")))\n if path not in sys.path:\n sys.path.append(path)\n if \"PYQUICKHELPER\" in os.environ and len(os.environ[\"PYQUICKHELPER\"]) > 0:\n sys.path.append(os.environ[\"PYQUICKHELPER\"])\n import pyquickhelper as skip_\n\n\nfrom pyquickhelper.loghelper import fLOG\n\n\nclass TestScriptInstall(unittest.TestCase):\n\n def test_pypi(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n\n if sys.version_info[0] == 2:\n return\n import xmlrpc.client as xmlrpc_client\n module_name = \"version_information\"\n url = 'https://pypi.python.org/pypi'\n functions = []\n with xmlrpc_client.ServerProxy(url) as pypi:\n for f in pypi.system.listMethods():\n fLOG(f)\n sig = pypi.system.methodSignature(f)\n fLOG(\" \", sig)\n h = pypi.system.methodHelp(f)\n fLOG(\" \", h)\n functions.append(f)\n if len(functions) > 1:\n break\n available = pypi.package_releases(module_name, True)\n fLOG(available)\n assert len(functions) > 1\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_packaged/test_script_install.py","file_name":"test_script_install.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"107639645","text":"from googleapiclient.discovery import build\nfrom httplib2 import http\nfrom oauth2client import file, client, tools\nfrom collections import namedtuple\nfrom app import db\nfrom app.models import TillerTransaction, tiller_map\nimport csv\nfrom datetime import date, datetime\nfrom decimal import Decimal\n\n\nclass SeedData(object):\n\n def __init__(self):\n self.sheet_id = '1PY4A10ODPBOx4F8hqjwlQ-aDRIsilCwB1vrk7HCrvx8' #tiller raw feed\n self.range_name = 'Transactions'\n\n def seed_db(self):\n \"\"\" seed_db is going to read data from csv into db \"\"\"\n obj_list = []\n temp_dict = {}\n with open(r'TillerTransaction.csv') as f:\n reader = csv.DictReader(f)\n for row in reader:\n for k, v in row.items():\n if tiller_map[k] is not None:\n temp_dict.update({tiller_map[k]: v})\n #obj_list.append(self._transform_values(temp_dict))\n obj_list.append(TillerTransaction(**self._transform_values(temp_dict)))\n\n for ob in obj_list:\n db.session.add(ob)\n db.session.commit()\n return obj_list\n\n\n def _transform_values(self,temp_dict):\n\n temp_dict['dateadded'] = datetime.strptime(str(temp_dict['dateadded']), '%m/%d/%Y %H:%M:%S')\n temp_dict['trans_date'] = datetime.strptime(str(temp_dict['trans_date']), '%m/%d/%Y' )\n temp_dict['amount'] = Decimal(temp_dict['amount'].replace('$', '').replace(',', ''))\n return temp_dict\n\n def get_new_transactions(self):\n\n \"\"\" gets transactions from Google Spread sheets and inserts them into the database \"\"\"\n pass\n\n\n#Check Number\tMonth\tWeek\t\tDup Score\tDup Match\n","sub_path":"dataseed.py","file_name":"dataseed.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503481295","text":"import datetime\nfrom django.shortcuts import redirect\nfrom django.views.generic import TemplateView, FormView, RedirectView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.contrib.auth import login, logout\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom uw_dashboard.forms import UploadFileForm, SetUserPasswordForm, DeleteUserForm\nfrom uw_dashboard.models import Reporting_Service, Search_History\nfrom django.contrib.auth.models import User\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connection, models\nimport json, models\n\nreporting = Reporting_Service(None)\n\nclass Homepage(LoginRequiredMixin, TemplateView):\n template_name = \"homepage.html\"\n\nclass Profile(LoginRequiredMixin, TemplateView):\n template_name = \"profile.html\"\n\n\n def post(self, request, *args, **kwargs):\n models = Search_History.objects.all()\n return render(request, 'profile.html', {'models': models})\n\n def dictfetchall(self, cursor):\n columns = [col[0] for col in cursor.description]\n results = []\n for row in cursor.fetchall():\n results.append(dict(zip(columns, row)))\n return results \n\n def my_custom_sql(self,query):\n with connection.cursor() as cursor:\n cursor.execute(query)\n results = self.dictfetchall(cursor)\n return results\n\nclass UploadView(LoginRequiredMixin, TemplateView):\n template_name = \"upload.html\"\n form_class = UploadFileForm\n\n def upload(self, request):\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n myfile = form.cleaned_data['File_To_Upload']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n file_path = fs.path(filename)\n try:\n fp = str(file_path)\n fy = int(form.cleaned_data['Funding_Year'])\n fo = form.cleaned_data['Overwrite_data']\n ft = str(form.cleaned_data['File_type'])\n result = reporting.import_data(fp, fy, fo, ft)\n self.addUploadHistory(ft, fy, fo, request.user)\n\n except Exception as e:\n fs.delete(filename)\n if 'parsing' in str(e):\n messages.error(request, \"%s, please upload a valid .csv file.\" % (str(e)))\n elif 'overwriting' in str(e):\n messages.error(request, \"%s, please wait for current updates to system to finish.\" % (str(e)))\n elif 'updating' in str(e):\n if 'CSV' in str(e):\n messages.error(request, '%s and try again later.' % (str(e)))\n else:\n messages.error(request, '%s, please try again later.' % (str(e)))\n else:\n messages.error(request, \"Something went wrong, %s returned\" % str(e))\n return redirect(reverse_lazy('upload'))\n\n fs.delete(filename)\n messages.success(request, result)\n return redirect(reverse_lazy('homepage'))\n else:\n return HttpResponseRedirect('homepage.html')\n\n def get(self, request, *args, **kwargs):\n form = UploadFileForm()\n history = self.getLastUploaded()\n return render(request, 'upload.html', {'form': form, 'history': history})\n\n def post(self, request, *args, **kwargs):\n return self.upload(request)\n\n def addUploadHistory(self,file_type,year,overwrite, user):\n if file_type == 'postal':\n file_type = 'Program Locations'\n elif file_type == 'output':\n file_type = 'Inventory and Outputs'\n\n year = '%s/%s' % (year, year+1)\n time = timezone.make_aware(datetime.datetime.now())\n\n user = models.User.objects.get(username=user)\n history = models.Upload_History(file_type=file_type,\n overwrite=overwrite,\n year=year,\n user=user,\n upload_time=time)\n history.save()\n\n def getLastUploaded(self):\n if 0 == models.Upload_History.objects.all().count():\n return \"No upload history found\"\n else:\n history = models.Upload_History.objects.latest('upload_time')\n un = history.user\n ft = history.file_type\n ut = history.upload_time.strftime(\"%B %d %Y %H:%M:%S\")\n if ft == \"Inventory and Outputs\":\n fy = ' %s' % history.year\n else:\n fy = ''\n if history.overwrite:\n return \"Last user to upload was %s at %s for %s%s with overwrite\" % (un, ut, ft, fy)\n else:\n return \"Last user to upload was %s at %s for %s%s without overwrite\" % (un, ut, ft, fy)\n\n\nclass LoginView(FormView):\n template_name = \"login.html\"\n form_class = AuthenticationForm\n success_url = reverse_lazy('homepage')\n\n def form_valid(self, form):\n user = form.get_user()\n login(self.request, user)\n reporting = Reporting_Service(user)\n return super(LoginView, self).form_valid(form)\n\n def form_invalid(self, form):\n response = super(LoginView, self).form_invalid(form)\n messages.error(self.request, 'Username or Password invalid. Please try again')\n return response\n\n\nclass LogoutView(RedirectView):\n url = reverse_lazy('login')\n\n def get(self, request, *args, **kwargs):\n logout(request)\n return super(LogoutView, self).get(request, *args, **kwargs)\n\n\nclass MapView(LoginRequiredMixin, TemplateView):\n\n def get(self, request, *args, **kwargs):\n location_list = reporting.queryMap([])\n return render(request, 'map.html', {'data_table': location_list})\n \n def post(self, request, *args, **kwargs):\n postalcodes = request.POST['postalcodes']\n postalcodes = str(postalcodes)\n postlist = postalcodes.split(',')\n location_list = reporting.queryMap(postlist)\n return render(request, 'map.html', {'data_table': location_list})\n\n\nclass AddUserView(LoginRequiredMixin, SuccessMessageMixin, CreateView):\n template_name = \"addUser.html\"\n form_class = UserCreationForm\n model = User\n\n success_url = reverse_lazy(\"addUser\")\n success_message = \"%(username)s was created successfully\"\n\n def form_invalid(self, form):\n for field in form:\n for error in field.errors:\n messages.error(self.request, error)\n\n for error in form.non_field_errors():\n messages.error(self.request, error)\n\n return super(AddUserView, self).form_invalid(form)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.profile.is_admin:\n messages.error(request, \"Require administrator authentication to create new users\")\n return redirect(reverse_lazy('homepage'))\n\n return super(AddUserView, self).dispatch(request, *args, **kwargs)\n\nclass SearchResultsView(LoginRequiredMixin, TemplateView):\n template_name = \"search-results.html\"\n\n def post(self, request, *args, **kwargs):\n context = reporting.query_data(request.POST)\n\n if context.get('results') == []:\n messages.error(request, \"No data for selected filters\")\n return redirect(reverse_lazy('search-page'))\n\n self.addFiltersToDatabase(self.parseFilters(context['filters']), request.user)\n dt = self.getDataTable(context[\"results\"])\n pt = self.getPieTable(context[\"results\"])\n tt = self.getTotalsTable(context[\"totals\"])\n ft = self.getFiltersTable(context[\"filters\"])\n\n context[\"data_table\"] = dt\n context[\"pie_table\"] = pt\n context[\"totals_table\"] = tt\n context[\"filters_table\"] = ft\n\n res = render(request, 'search-results.html', context)\n return res\n\n def getDataTable(self, results):\n keys = [\n \"program_name\",\n \"agency_name\",\n \"allocation\",\n \"funding_stream\",\n \"grant_start_date\",\n \"grant_end_date\",\n \"element_names\",\n \"program_description\",\n \"postal_count\",\n \"postal_codes\"\n ]\n\n\n dataTable = []\n\n for data in results:\n array = [str(data[key]) for key in keys]\n dataTable.append(array)\n\n self.getMapInfo(dataTable)\n return dataTable\n\n def getMapInfo(self, list):\n for thing in list:\n postlist = thing[-1].split(',')\n location_list = reporting.queryMap(postlist)\n thing[-1] = location_list\n\n def getPieTable(self, results):\n keys = [\n \"city\",\n \"city_grouping\"\n ]\n\n dataTable = [[\n \"Allocation\",\n \"City\",\n \"City Grouping\"\n ]]\n\n for data in results:\n array = [data[\"allocation\"]]\n array += [str(data[key]) for key in keys]\n dataTable.append(array)\n\n return json.dumps(dataTable)\n\n def getTotalsTable(self, results):\n keyNames = [\"Seniors\",\n \"Early Years\",\n \"Counselling Sessions\",\n \"Families\",\n \"Programs\",\n \"Mentors/Tutors\",\n \"Workshops\",\n \"Middle Years\",\n \"Agencies\",\n \"Meals/Snacks\",\n \"Money Invested\",\n \"Parent/Caregivers\",\n \"Volunteers\"]\n\n data = results[0]\n i =0\n for key in data:\n data[key] = [keyNames[i], str(data[key])]\n i = i + 1\n\n\n results[0] = data\n\n return results\n\n def parseFilters(self, filterList):\n removeList = []\n appendList = []\n if 'gfa' in filterList:\n for gfa in filterList['gfa']:\n if \"Level -\" in gfa:\n removeList.append(gfa)\n appendList.append(gfa.replace(\"Level - \", \"\"))\n elif 'Other:' in gfa:\n removeList.append(gfa)\n gfa = gfa.replace('\"', '')\n olist = gfa.replace(\"Other: \", \"\")\n appendList + olist.split(' + ')\n for x in removeList:\n filterList['gfa'].remove(x)\n filterList['gfa'] = filterList['gfa'] + appendList\n removeList = []\n appendList = []\n\n if 'program_elements' in filterList:\n for pe in filterList['program_elements']:\n if \"Name -\" in pe:\n removeList.append(pe)\n appendList.append(pe.replace('Name - ', \"\"))\n elif '%' in pe:\n removeList.append(pe)\n olist = pe.replace ('%', '-')\n appendList.append(olist)\n for x in removeList:\n filterList['program_elements'].remove(x)\n filterList['program_elements'] = filterList['program_elements'] + appendList\n return filterList\n\n def getFiltersTable(self, results):\n keyNames = {\"funding_year\" : \"Funding Year\",\n \"focus_area\" : \"Focus Area\",\n \"target_population\" : \"Target Population\",\n \"program_elements\" : \"Program Elements\",\n \"city\" : \"City Grouping\",\n \"gfa\" : \"Geographic Focus Area\",\n \"donor\" : \"Donor Engagement\",\n \"money_invested\" : \"Money Invested\"\n }\n\n del results[\"Submit\"]\n del results[\"csrfmiddlewaretoken\"]\n\n # for key in results:\n # results[key] = {keyNames[key]: str(results[key])}\n\n for key in results:\n results[key] = {keyNames[key] : results[key]}\n filterList = results[key][keyNames[key]]\n filterMapKey = keyNames[key]\n j = 0\n for option in filterList:\n filterList[j] = str(option)\n j = j + 1\n results[key][keyNames[key]] = filterList\n\n return results\n\n def addFiltersToDatabase(self, results, user):\n funding_year = ''\n focus_area = ''\n target_population = ''\n program_elements = ''\n city = ''\n gfa = ''\n donor = ''\n money_invested = ''\n\n if \"funding_year\" in results and results[\"funding_year\"]:\n for result in results[\"funding_year\"]:\n funding_year += result+', '\n if \"focus_area\" in results and results[\"focus_area\"]:\n for result in results[\"focus_area\"]:\n focus_area += result+', '\n if \"target_population\" in results and results[\"target_population\"]:\n for result in results[\"target_population\"]:\n target_population += result + ', '\n if \"program_elements\" in results and results[\"program_elements\"]:\n for result in results[\"program_elements\"]:\n program_elements +=result + ', '\n if \"city\" in results and results[\"city\"]:\n for result in results[\"city\"]:\n city += result + ', '\n if \"gfa\" in results and results[\"gfa\"]:\n for result in results[\"gfa\"]:\n gfa += result + ', '\n if \"donor\" in results and results[\"donor\"]:\n for result in results[\"donor\"]:\n donor += result + ', '\n if \"money_invested\" in results and results[\"money_invested\"]:\n for result in results[\"money_invested\"]:\n money_invested += result + ', '\n\n search = models.Search_History( funding_year=funding_year[:-2],\n focus_area=focus_area[:-2],\n target_population=target_population[:-2],\n program_elements=program_elements[:-2],\n city_groupings=city[:-2],\n geographic_focus_area=gfa[:-2],\n donor_engagement=donor[:-2],\n money_invested=money_invested[:-2],\n user = user\n )\n search.save()\n\nclass SearchPage(LoginRequiredMixin, TemplateView):\n template_name = \"search-page.html\"\n\n\nclass SetPasswordView(LoginRequiredMixin, SuccessMessageMixin, FormView):\n template_name = \"resetPassword.html\"\n form_class = SetUserPasswordForm\n\n success_url = reverse_lazy(\"homepage\")\n success_message = \"You have reset password for %(username)s successfully\"\n\n def form_valid(self, form):\n try:\n user = User.objects.get(username=form.cleaned_data.get('username'))\n except ObjectDoesNotExist:\n messages.error(self.request, 'User with the username does not exist')\n return redirect(reverse_lazy('resetPassword'))\n\n user.set_password(form.clean_new_password2())\n user.save()\n return super(SetPasswordView, self).form_valid(form)\n\n def form_invalid(self, form):\n for field in form:\n for error in field.errors:\n messages.error(self.request, error)\n\n for error in form.non_field_errors():\n messages.error(self.request, error)\n\n return super(SetPasswordView, self).form_invalid(form)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.profile.is_admin:\n messages.error(request, \"Require administrator authentication to reset passwords\")\n return redirect(reverse_lazy('homepage'))\n\n return super(SetPasswordView, self).dispatch(request, *args, **kwargs)\n\nclass DeleteUserView(LoginRequiredMixin, SuccessMessageMixin, FormView):\n template_name = \"deleteUser.html\"\n form_class = DeleteUserForm\n\n success_url = reverse_lazy(\"homepage\")\n success_message = \"You have deleted %(username)s successfully\"\n\n def form_valid(self, form):\n try:\n user = User.objects.get(username=form.cleaned_data.get('username'))\n except ObjectDoesNotExist:\n messages.error(self.request, 'User with the username does not exist')\n return redirect(reverse_lazy('deleteUser'))\n\n user.delete()\n return super(DeleteUserView, self).form_valid(form)\n\n def form_invalid(self, form):\n for field in form:\n for error in field.errors:\n messages.error(self.request, error)\n\n for error in form.non_field_errors():\n messages.error(self.request, error)\n\n return super(DeleteUserView, self).form_invalid(form)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.profile.is_admin:\n messages.error(request, \"Require administrator authentication to reset passwords\")\n return redirect(reverse_lazy('homepage'))\n\n return super(DeleteUserView, self).dispatch(request, *args, **kwargs)\n\n","sub_path":"uw_dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169221845","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport os\nimport random\nimport pickle\nfrom sklearn.svm import SVC\nfrom sklearn.datasets.samples_generator import make_blobs \nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report, confusion_matrix \ndataset=\"E:/Data set for pattern/tr\"\ntestset=\"E:/Data set for pattern/t\"\ncategories =[\"non-ped_examples\",\"ped_examples\"]\ntraining_data=[] \ntest_data=[] \nXtr=[]\nytr=[]\nXt=[]\nyt=[]\nIMG_SIZE = 50\n\n#E:/Data set for pattern/t/non-ped_examples\ndef create_training_data(training_data,dataset,cat,IMG_SIZE):\n for category in cat:\n path = os.path.join(dataset,category)\n class_num = categories.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) \n training_data.append([new_array,class_num])\n except Exception as e:\n pass\n print(e)\n\n\ndef create_test_data(test_data,testset,cat,IMG_SIZE):\n for category in cat:\n path = os.path.join(testset,category)\n class_num = categories.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) \n test_data.append([new_array,class_num])\n except Exception as e:\n pass\n print(e)\n\n\n\ndef formalize(X,y,data):\n for features , labels in data:\n X.append(features)\n y.append(labels)\n\ndef SVM_Test(Xtr,Xt,ytr,yt):\n clf = SVC(kernel='linear',C=1000.0) \n # fitting x samples and y classes \n clf.fit(Xtr,ytr)\n y_pred = clf.predict(Xt)\n print(y_pred)\n\n print(clf.score(Xt,yt))\n \"\"\"\n xfit = np.linspace(-1, 3.5) \n # Y containing two classes \n Xtr, ytr = make_blobs(n_samples=500, centers=2, \n random_state=0, cluster_std=0.40) \n # plot a line between the different sets of data \n plt.scatter(Xtr[:, 0], Xtr[:, 1], c=ytr, s=50, cmap='spring')\n for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]: \n yfit = m * xfit + b \n plt.plot(xfit, yfit, '-k') \n plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', \n color='#AAAAAA', alpha=0.4) \n plt.xlim(-1, 3.5)\n # plotting scatters \n plt.show() \"\"\"\n\ncreate_training_data(training_data,dataset,categories,IMG_SIZE)\ncreate_test_data(test_data,testset,categories,IMG_SIZE)\nformalize(Xtr,ytr,training_data)\nformalize(Xt,yt,test_data)\nXtr=np.array(Xtr).reshape(-1,IMG_SIZE*IMG_SIZE)\nXt=np.array(Xt).reshape(-1,IMG_SIZE*IMG_SIZE)\nSVM_Test(Xtr,Xt,ytr,yt)\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"284444759","text":"# coding=utf-8\nimport os\nfrom xlrd import open_workbook\n\n\ndef _get(xlsx_path, sheet, colums):\n if xlsx_path is None:\n return None\n\n if sheet is None:\n return None\n\n if colums is None:\n return None\n\n # open xls file\n file = open_workbook(xlsx_path)\n # get sheet by name\n sheet = file.sheet_by_name(sheet)\n\n row0 = sheet.row_values(0);\n search = []\n for i in range(len(row0)):\n name = row0[i]\n try:\n colums.index(name)\n search.append(i)\n except Exception:\n continue\n # get one sheet's rows\n rows = []\n nrows = sheet.nrows\n for i in range(1, nrows):\n row = []\n for j in search:\n row.append(sheet.row_values(i)[j])\n rows.append(row)\n return rows\n\n\nclass Xlsx:\n _instance = None\n\n def __init__(self):\n pass\n\n @classmethod\n def _get_instance(cls):\n if cls._instance is None:\n cls._instance = Xlsx()\n return cls._instance\n\n @staticmethod\n def get_rows(xlsx_path, sheet, colums):\n return _get(xlsx_path, sheet, colums)\n\n\nif __name__ == \"__main__\":\n print(Xlsx.get_rows(os.path.join(os.getcwd(), \"config\", \"testcase.xlsx\"),\n \"login\",\n (\"case_name\", \"method\", \"token\", \"email\", \"password\")))\n","sub_path":"framework/util/xlsx.py","file_name":"xlsx.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9685836","text":"import numpy as np\ntry:\n from .key import *\nexcept ImportError:\n from key import *\n\nLOG_FILE = 'base_left-11.log'\nACTIONS = {'Dash', 'Turn', 'Tackle', 'Kick'}\n\ndef fetch_data(logs):\n \"\"\"Input: Array of log filenames\"\"\"\n assert(isinstance(logs, list))\n X = None\n y = None\n for log_file in logs:\n if X is None:\n X, y = fetch_data_helper(log_file)\n else:\n X_new, y_new = fetch_data_helper(log_file)\n X = np.concatenate((X, X_new))\n y = np.concatenate((y, y_new))\n\n return X, y\n\ndef fetch_data_helper(log_file):\n X = []\n y = []\n prev_time = 0\n curr_features = None # string\n curr_actions = None # string\n\n with open(log_file) as f:\n for line in f:\n words = line.strip().split()\n curr_time = int(words[0])\n\n if curr_time != prev_time:\n if curr_features is not None and curr_actions is not None:\n X.append(get_feature_array(curr_features))\n y.append(get_action_array(curr_actions))\n prev_time = curr_time\n curr_features = None\n curr_actions = None\n\n if words[3] == \"StateFeatures\": # States\n curr_features = words[4:]\n elif words[1] == \"8\": # Actions\n curr_actions = words[4]\n\n if curr_features is not None and curr_actions is not None:\n X.append(get_feature_array(curr_features))\n y.append(get_action_array(curr_actions))\n\n return np.array(X), np.array(y)\n\n# def fetch_data_old(log_file):\n# X = []\n# y = []\n# prev_iter = 0\n# features_exist = False\n# action_exists = False\n# prev_valid = False\n\n# with open(log_file) as f:\n# for line in f:\n# words = line.strip().split()\n# curr_iter = int(words[0])\n\n# if curr_iter != prev_iter:\n# if prev_valid:\n# X.append(state_features)\n# y.append(action)\n# prev_valid = False\n\n# if words[3] == 'StateFeatures':\n# state_features = get_feature_array(words[4:])\n# features_exist = True\n# else:\n# action_exists = False\n# for action in ACTIONS:\n# if action in line:\n# action_exists = True\n\n# if action_exists and features_exist:\n# action = get_action_array(words[4])\n# prev_valid = True\n# else:\n# features_exist = False\n\n# prev_iter = curr_iter\n\n# if prev_valid:\n# X.append(state_features)\n# y.append(action)\n\n# return np.array(X), np.array(y)\n\ndef get_feature_array(features):\n feature_array = [float(feat) for feat in features]\n\n for feat in feature_array:\n assert feat >= -1. and feat <= 1.\n\n return feature_array\n\ndef get_action_array(action):\n action_array = [0.] * OUTPUT_SIZE\n # action_array = [0. for i in range(OUTPUT_SIZE)]\n param_str = action[action.index('(')+1:-1]\n action_type = action.split('(')[0]\n\n if action_type == 'Dash':\n params = [float(param) for param in param_str.split(',')]\n action_array[DASH] = 1.\n action_array[DASH_POW] = params[0]\n action_array[DASH_DEG] = params[1]\n elif action_type == 'Turn':\n param = float(param_str)\n action_array[TURN] = 1.\n action_array[TURN_DEG] = param\n elif action_type == 'Tackle':\n params = [float(param) for param in param_str.split(',')]\n action_array[TACKLE] = 1.\n action_array[TACKLE_DEG] = params[0]\n elif action_type == 'Kick':\n params = [float(param) for param in param_str.split(',')]\n action_array[KICK] = 1.\n action_array[KICK_POW] = params[0]\n action_array[KICK_DEG] = params[1]\n\n return action_array\n\nif __name__ == '__main__':\n X, y = fetch_data(LOG_FILE)\n","sub_path":"training/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453307829","text":"import numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport bisect\n\n\ndef weight_variable(shape):\n # Xavier initialization\n stddev = np.sqrt(2.0 / (sum(shape)))\n initial = tf.truncated_normal(shape, stddev=stddev)\n weights = tf.Variable(initial)\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, weights)\n return weights\n\n\ndef bias_variable(shape, trainable=True):\n initial = tf.constant(0.0, shape=shape)\n return tf.Variable(initial, trainable=trainable)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, data_format='NCHW', strides=[1, 1, 1, 1], padding='SAME')\n\n\nclass NeuralNet:\n def __init__(self):\n self.root_dir = './saved_models/'\n\n # Network structure\n self.residual_filter = 64\n self.residual_blocks = 6\n\n # For exporting\n self.weights = []\n self.session = tf.Session()\n self.saver = tf.train.Saver()\n\n self.training = tf.placeholder(tf.bool)\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.batch_norm_count = 0\n\n # Training\n self.policy_loss_weight = 1.0\n self.value_loss_weight = 1.0\n self.learning_rate_boundaries = [100000, 130000]\n self.learning_rate_values = [0.0005, 0.002, 0.02]\n self.learning_rate = self.learning_rate_values[0]\n self.num_steps_train = 200\n self.num_steps_test = 2000\n self.total_steps = 140000\n\n # Summary\n self.test_writer = tf.summary.FileWriter(os.path.join(os.getcwd(), \"logs/test/\"), self.session.graph)\n self.train_writer = tf.summary.FileWriter(os.path.join(os.getcwd(), \"logs/train/\"), self.session.graph)\n\n # Other variables to be defined upon init\n self.handle = None\n self.next_batch = None\n self.train_handle = None\n self.test_handle = None\n self.x = None\n self.y_ = None\n self.z_ = None\n self.y_conv = None\n self.z_conv = None\n self.policy_loss = None\n self.mse_loss = None\n self.reg_term = None\n self.update_ops = None\n self.train_op = None\n self.accuracy = None\n self.avg_policy_loss = []\n self.avg_mse_loss = []\n self.avg_reg_term = None\n self.time_start = None\n\n def construct(self, dataset, train_iterator, test_iterator):\n # TF variables\n self.handle = tf.placeholder(tf.string, shape=[])\n self.next_batch = tf.data.Iterator.from_string_handle(self.handle, dataset.output_types, dataset.output_shapes).get_next()\n self.train_handle = self.session.run(train_iterator.string_handle())\n self.test_handle = self.session.run(test_iterator.string_handle())\n\n self.x = self.next_batch[0] # tf.placeholder(tf.float32, [None, 112, 8*8])\n self.y_ = self.next_batch[1] # tf.placeholder(tf.float32, [None, 1858])\n self.z_ = self.next_batch[2] # tf.placeholder(tf.float32, [None, 1])\n self.y_conv, self.z_conv = self.construct_net(self.x)\n\n # Calculate loss on policy head\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv)\n self.policy_loss = tf.reduce_mean(cross_entropy)\n\n # Loss on value head\n self.mse_loss = tf.reduce_mean(tf.squared_difference(self.z_, self.z_conv))\n\n # Regularizer\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)\n reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n self.reg_term = tf.contrib.layers.apply_regularization(regularizer, reg_variables)\n\n # For training from a (smaller) dataset of strong players, you will\n # want to reduce the factor in front of self.mse_loss here.\n loss = self.policy_loss_weight * self.policy_loss + self.value_loss_weight * self.mse_loss + self.reg_term\n\n # You need to change the learning rate here if you are training\n # from a self-play training set, for example start with 0.005 instead.\n opt_op = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.9, use_nesterov=True)\n\n self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(self.update_ops):\n self.train_op = opt_op.minimize(loss, global_step=self.global_step)\n\n correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n self.accuracy = tf.reduce_mean(correct_prediction)\n\n self.session.run(tf.global_variables_initializer())\n\n def replace_weights(self, new_weights):\n for e, weights in enumerate(self.weights):\n # Keyed batchnorm weights\n if isinstance(weights, str):\n work_weights = tf.get_default_graph().get_tensor_by_name(weights)\n new_weight = tf.constant(new_weights[e])\n self.session.run(tf.assign(work_weights, new_weight))\n elif weights.shape.ndims == 4:\n # Transpose convolutation weights from [filter_height, filter_width, in_channels, out_channels]\n # to [output, input, filter_size, filter_size]\n s = weights.shape.as_list()\n shape = [s[i] for i in [3, 2, 0, 1]]\n new_weight = tf.constant(new_weights[e], shape=shape)\n self.session.run(weights.assign(tf.transpose(new_weight, [2, 3, 1, 0])))\n elif weights.shape.ndims == 2:\n # Change fully connected layers from [in, out] to [out, in]\n s = weights.shape.as_list()\n shape = [s[i] for i in [1, 0]]\n new_weight = tf.constant(new_weights[e], shape=shape)\n self.session.run(weights.assign(tf.transpose(new_weight, [1, 0])))\n else:\n # Biases, batchnorm etc\n new_weight = tf.constant(new_weights[e], shape=weights.shape)\n self.session.run(weights.assign(new_weight))\n\n def restore(self, file):\n print(f'Restoring from {file}')\n self.saver.restore(self.session, file)\n\n def process(self, batch_size, test_batches):\n if not self.time_start:\n self.time_start = time.time()\n\n # Run training for this batch\n policy_loss, mse_loss, reg_term, _, _ = self.session.run(\n [self.policy_loss, self.mse_loss, self.reg_term, self.train_op, self.next_batch],\n feed_dict={self.training: True, self.learning_rate: self.learning_rate, self.handle: self.train_handle}\n )\n\n steps = tf.train.global_step(self.session, self.global_step)\n\n # Determine learning rate\n steps_total = (steps-1) % self.total_steps\n self.learning_rate = self.learning_rate_values[bisect.bisect_right(self.learning_rate_boundaries, steps_total)]\n\n # Keep running averages\n # Google's paper scales MSE by 1/4 to a [0, 1] range, so do the same to\n # get comparable values.\n mse_loss /= 4.0\n self.avg_policy_loss.append(policy_loss)\n self.avg_mse_loss.append(mse_loss)\n self.avg_reg_term.append(reg_term)\n if steps % self.num_steps_train == 0:\n pol_loss_w = self.policy_loss_weight\n val_loss_w = self.value_loss_weight\n time_end = time.time()\n speed = 0\n if self.time_start:\n elapsed = time_end - self.time_start\n speed = batch_size * (self.num_steps_train / elapsed)\n avg_policy_loss = np.mean(self.avg_policy_loss or [0])\n avg_mse_loss = np.mean(self.avg_mse_loss or [0])\n avg_reg_term = np.mean(self.avg_reg_term or [0])\n print(f'Steps {steps},'\n f'lr={self.learning_rate}'\n f'policy={avg_policy_loss}'\n f'mse={avg_mse_loss}'\n f'reg={avg_reg_term}'\n f'total={pol_loss_w * avg_policy_loss + val_loss_w * avg_mse_loss + avg_reg_term}'\n f'({speed} pos/s)'\n )\n train_summaries = tf.Summary(value=[\n tf.Summary.Value(tag=\"Policy Loss\", simple_value=avg_policy_loss),\n tf.Summary.Value(tag=\"MSE Loss\", simple_value=avg_mse_loss)])\n self.train_writer.add_summary(train_summaries, steps)\n self.time_start = time_end\n self.avg_policy_loss, self.avg_mse_loss, self.avg_reg_term = [], [], []\n\n if steps % self.num_steps_test == 0:\n sum_accuracy = 0\n sum_mse = 0\n sum_policy = 0\n for _ in range(0, test_batches):\n test_policy, test_accuracy, test_mse, _ = self.session.run(\n [self.policy_loss, self.accuracy, self.mse_loss,\n self.next_batch],\n feed_dict={self.training: False,\n self.handle: self.test_handle})\n sum_accuracy += test_accuracy\n sum_mse += test_mse\n sum_policy += test_policy\n sum_accuracy /= test_batches\n sum_accuracy *= 100\n sum_policy /= test_batches\n # Additionally rescale to [0, 1] so divide by 4\n sum_mse /= (4.0 * test_batches)\n test_summaries = tf.Summary(value=[\n tf.Summary.Value(tag=\"Accuracy\", simple_value=sum_accuracy),\n tf.Summary.Value(tag=\"Policy Loss\", simple_value=sum_policy),\n tf.Summary.Value(tag=\"MSE Loss\", simple_value=sum_mse)])\n self.test_writer.add_summary(test_summaries, steps)\n print(f'Steps {steps}, policy={sum_policy} training accuracy={sum_accuracy}%, mse={sum_mse}')\n save_path = self.saver.save(self.session, self.root_dir, global_step=steps)\n print(\"Model saved in file: {}\".format(save_path))\n weights_path = self.root_dir + \"-\" + str(steps) + \".txt\"\n self.save_weights(weights_path)\n print(f'Weights saved in file: {weights_path}')\n\n def save_weights(self, filename):\n with open(filename, \"w\") as f:\n for weights in self.weights:\n f.write(\"\\n\")\n # Keyed batchnorm weights\n if isinstance(weights, str):\n work_weights = tf.get_default_graph().get_tensor_by_name(weights)\n elif weights.shape.ndims == 4:\n # Transpose convolution weights [filter_height, filter_width, in_channels, out_channels]\n # to [output, input, filter_size, filter_size]\n work_weights = tf.transpose(weights, [3, 2, 0, 1])\n elif weights.shape.ndims == 2:\n # Change fully connected layers from [in, out] (TF) to [out, in]\n work_weights = tf.transpose(weights, [1, 0])\n else:\n # Biases, batchnorm etc\n work_weights = weights\n wt_str = [str(wt) for wt in np.ravel(work_weights.eval(session=self.session))]\n f.write(\" \".join(wt_str))\n\n def get_batchnorm_key(self):\n result = \"bn\" + str(self.batch_norm_count)\n self.batch_norm_count += 1\n return result\n\n def conv_block(self, inputs, filter_size, input_channels, output_channels):\n W_conv = weight_variable([filter_size, filter_size,\n input_channels, output_channels])\n b_conv = bias_variable([output_channels], False)\n self.weights.append(W_conv)\n self.weights.append(b_conv)\n\n weight_key = self.get_batchnorm_key()\n self.weights.append(weight_key + \"/batch_normalization/moving_mean:0\")\n self.weights.append(weight_key + \"/batch_normalization/moving_variance:0\")\n\n with tf.variable_scope(weight_key):\n h_bn = \\\n tf.layers.batch_normalization(\n conv2d(inputs, W_conv),\n epsilon=1e-5, axis=1, fused=True,\n center=False, scale=False,\n training=self.training)\n h_conv = tf.nn.relu(h_bn)\n return h_conv\n\n def residual_block(self, inputs, channels):\n # First convnet\n orig = tf.identity(inputs)\n W_conv_1 = weight_variable([3, 3, channels, channels])\n b_conv_1 = bias_variable([channels], False)\n self.weights.append(W_conv_1)\n self.weights.append(b_conv_1)\n weight_key_1 = self.get_batchnorm_key()\n self.weights.append(weight_key_1 + \"/batch_normalization/moving_mean:0\")\n self.weights.append(weight_key_1 + \"/batch_normalization/moving_variance:0\")\n\n # Second convnet\n W_conv_2 = weight_variable([3, 3, channels, channels])\n b_conv_2 = bias_variable([channels], False)\n self.weights.append(W_conv_2)\n self.weights.append(b_conv_2)\n weight_key_2 = self.get_batchnorm_key()\n self.weights.append(weight_key_2 + \"/batch_normalization/moving_mean:0\")\n self.weights.append(weight_key_2 + \"/batch_normalization/moving_variance:0\")\n\n with tf.variable_scope(weight_key_1):\n h_bn1 = \\\n tf.layers.batch_normalization(\n conv2d(inputs, W_conv_1),\n epsilon=1e-5, axis=1, fused=True,\n center=False, scale=False,\n training=self.training)\n h_out_1 = tf.nn.relu(h_bn1)\n with tf.variable_scope(weight_key_2):\n h_bn2 = \\\n tf.layers.batch_normalization(\n conv2d(h_out_1, W_conv_2),\n epsilon=1e-5, axis=1, fused=True,\n center=False, scale=False,\n training=self.training)\n h_out_2 = tf.nn.relu(tf.add(h_bn2, orig))\n return h_out_2\n\n def construct_net(self, planes):\n # NCHW format\n # batch, 112 input channels, 8 x 8\n x_planes = tf.reshape(planes, [-1, 112, 8, 8])\n\n # Input convolution\n flow = self.conv_block(x_planes, filter_size=3,\n input_channels=112,\n output_channels=self.residual_filter)\n # Residual tower\n for _ in range(self.residual_blocks):\n flow = self.residual_block(flow, self.residual_filter)\n\n # Policy head\n conv_pol = self.conv_block(flow, filter_size=1,\n input_channels=self.residual_filter,\n output_channels=32)\n h_conv_pol_flat = tf.reshape(conv_pol, [-1, 32*8*8])\n W_fc1 = weight_variable([32*8*8, 1858])\n b_fc1 = bias_variable([1858])\n self.weights.append(W_fc1)\n self.weights.append(b_fc1)\n h_fc1 = tf.add(tf.matmul(h_conv_pol_flat, W_fc1), b_fc1, name='policy_head')\n\n # Value head\n conv_val = self.conv_block(flow, filter_size=1, input_channels=self.residual_filter, output_channels=32)\n h_conv_val_flat = tf.reshape(conv_val, [-1, 32*8*8])\n W_fc2 = weight_variable([32 * 8 * 8, 128])\n b_fc2 = bias_variable([128])\n self.weights.append(W_fc2)\n self.weights.append(b_fc2)\n h_fc2 = tf.nn.relu(tf.add(tf.matmul(h_conv_val_flat, W_fc2), b_fc2))\n W_fc3 = weight_variable([128, 1])\n b_fc3 = bias_variable([1])\n self.weights.append(W_fc3)\n self.weights.append(b_fc3)\n h_fc3 = tf.nn.tanh(tf.add(tf.matmul(h_fc2, W_fc3), b_fc3), name='value_head')\n\n return h_fc1, h_fc3\n","sub_path":"train/NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":15531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384681585","text":"'''\n@Created by yuhsiang\n@Date : 2018/12/10\n'''\n\nimport unittest\nfrom pprint import pprint\n\nfrom data_config import common_config\nfrom base.HTMLTestReportCN import HTMLTestRunner\nfrom base.httpRequest import HttpRequest\nfrom master_api import system_management\nfrom master_api.account_login import User\n\n\nclass GroupThirdPartyBaseTest(unittest.TestCase):\n \"\"\" 线上支付商户管理 - 相關 API 調用狀態\"\"\"\n\n def setUp(self):\n self.__http = HttpRequest()\n self.user = User(self.__http)\n self.groupThirdParty = system_management.GroupThirdParty(self.__http)\n self.user.login()\n\n def tearDown(self):\n self.user.logout()\n\n def GetGroupThirdPartyId(self):\n response_data = self.groupThirdParty.get_list({})\n for i in range(len(response_data[1]['Settings'])):\n if response_data[1]['Settings'][i]['Name'] == \"QA - 測試API\":\n self.getNewCreateGroupThirdPartyId = response_data[1]['Settings'][i]['Id']\n elif response_data[1]['Settings'][i]['Name'] == \"QA - 測試API-modify\":\n self.getNewCreateGroupThirdPartyId = response_data[1]['Settings'][i]['Id']\n return self.getNewCreateGroupThirdPartyId\n\n def test_GroupThirdParty_relatedApi_status_01(self):\n \"\"\"驗證 线上支付商户管理 - 取得列表頁面\"\"\"\n response_data = self.groupThirdParty.list({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_02(self):\n \"\"\"驗證 线上支付商户管理 - 取得線上支付商戶列表\"\"\"\n response_data = self.groupThirdParty.get_list({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_03(self):\n \"\"\"驗證 线上支付商户管理 - 取得新增頁面\"\"\"\n response_data = self.groupThirdParty.create({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_04(self):\n \"\"\"驗證 线上支付商户管理 - 取得線上商戶類型\"\"\"\n response_data = self.groupThirdParty.get_types({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_05(self):\n \"\"\"驗證 线上支付商户管理 - 取得目前支付種類\"\"\"\n response_data = self.groupThirdParty.get_third_party_type_list({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_06(self):\n \"\"\"驗證 线上支付商户管理 - 新增金流公司商戶資料\"\"\"\n data = {\"AvailableMinutes\": 20,\n \"Name\": \"QA - 測試API\",\n \"Type\": \"96\",\n \"TypeValue\": 4,\n \"Min\": 1,\n \"Max\": 10,\n \"Limit\": 20,\n \"RecommendationMemo\": \"測試\",\n \"Memo\": \"微信測試\",\n \"RecommendationAmountSettings\": [{\n \"Sort\": 1,\n \"Amount\": 5\n },\n {\n \"Sort\": 2,\n \"Amount\": 10\n }],\n \"MemberLevelSettingIds\": [21],\n \"Settings\": [{\n \"key\": \"Account\",\n \"value\": \"201908024\"\n },\n {\n \"key\": \"Password\",\n \"value\": \"GFHGFDGFHDFGHGF\"\n },\n {\n \"key\": \"Gateway\",\n \"value\": \"http://www.baidu.com/\"\n }]}\n response_data = self.groupThirdParty.create_dtpp_submit(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_07(self):\n \"\"\"驗證 线上支付商户管理 - 取得金流公司商戶資料\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的詳細資料\n data = {\"id\": groupThirdPartyId}\n response_data = self.groupThirdParty.get_dtpp_detail(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_08(self):\n \"\"\"驗證 线上支付商户管理 - 停用金流公司商戶資料\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的停用\n data = {\"id\": groupThirdPartyId}\n response_data = self.groupThirdParty.dtpp_disable(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_09(self):\n \"\"\"驗證 线上支付商户管理 - 啟用金流公司商戶資料\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的啟用\n data = {\"id\": groupThirdPartyId}\n response_data = self.groupThirdParty.dtpp_active(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_10(self):\n \"\"\"驗證 线上支付商户管理 - 歸零目前商戶累計金額\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的累計金額\n data = {\"id\": groupThirdPartyId}\n response_data = self.groupThirdParty.dtpp_reset(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_11(self):\n \"\"\"驗證 线上支付商户管理 - 更新商戶名稱\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的更新資料\n data = {\"id\": groupThirdPartyId,\n \"args\": \"QA - 測試API-modify\"}\n response_data = self.groupThirdParty.update_dtpp_name(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_12(self):\n \"\"\"驗證 線上支付商戶管理 - 新增商戶取得支付商戶類型\"\"\"\n response_data = self.groupThirdParty.getDTPPTypeList({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_13(self):\n \"\"\"驗證 線上支付商戶管理 - 新增商戶取得支付商戶列表\"\"\"\n response_data = self.groupThirdParty.getValidDTPP({})\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_14(self):\n \"\"\"驗證 線上支付商戶管理 - 取得商戶詳細設定資料\"\"\"\n # Step1 取得 支付金流Id\n response_data = self.groupThirdParty.getValidDTPP({})\n getId = response_data[1]['ReturnObject'][0]['Value']\n data = {\"Id\": getId}\n response_data = self.groupThirdParty.getSettingDetail(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_15(self):\n \"\"\"驗證 線上支付商戶管理 - 取得商戶簡體中文限制\"\"\"\n # Step1 取得 支付金流Id\n response_data = self.groupThirdParty.getValidDTPP({})\n getId = response_data[1]['ReturnObject'][0]['Value']\n data = {'Id': getId}\n response_data = self.groupThirdParty.getDepositLimitsCn(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_16(self):\n \"\"\"驗證 線上支付商戶管理 - 更新商戶資料\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 取得 支付金流Id\n response_data = self.groupThirdParty.getValidDTPP({})\n Type = response_data[1]['ReturnObject'][0]['Value'] # 新金流Id\n typeValue = response_data[1]['ReturnObject'][0]['TypeValue'] # 新金流Id\n data = {\"id\": groupThirdPartyId, \"args\": {\n \"Settings\": [{\"key\": \"Account\", \"value\": \"11333\"},\n {\"key\": \"Password\", \"value\": \"33\"},\n {\"key\": \"Gateway\", \"value\": \"http://tw\"}],\n \"Type\": Type, \"TypeValue\": typeValue}}\n response_data = self.groupThirdParty.updateDTPPMerchantData(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_18(self):\n \"\"\"驗證 線上支付商戶管理 - 更新更新單次存款限額\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': {'Min': 1, 'Max': 10}}\n response_data = self.groupThirdParty.updateDTPPRange(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_19(self):\n \"\"\"驗證 線上支付商戶管理 - 更新推薦說明\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': '@QA_automation'}\n response_data = self.groupThirdParty.updateDTPPRecommendationMemo(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_20(self):\n \"\"\"驗證 線上支付商戶管理 - 更新推薦金額鎖\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': 'true'}\n response_data = self.groupThirdParty.updateDTPPAmountLock(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_21(self):\n \"\"\"驗證 線上支付商戶管理 - 更新推薦金額\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': [{'Sort': 1, 'Amount': 10}, {'Sort': 2, 'Amount': 20}]}\n response_data = self.groupThirdParty.updateDTPPRecommendationAmountSettings(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_22(self):\n \"\"\"驗證 線上支付商戶管理 - 更新總存款額度\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': 1}\n response_data = self.groupThirdParty.updateDTPPRecommendationAmountSettings(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_23(self):\n \"\"\"驗證 線上支付商戶管理 - 更新有效分鐘數\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': 4}\n response_data = self.groupThirdParty.updateDTPPAvailableMinutes(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_24(self):\n \"\"\"驗證 線上支付商戶管理 - 更新備註\"\"\"\n # Step1 取得商戶Id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n data = {'id': groupThirdPartyId, 'args': 'QA_automation'}\n response_data = self.groupThirdParty.updateDTPPMemo(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_25(self):\n \"\"\"驗證 线上支付商户管理 - 自訂商戶開關 - 開&關 狀態\"\"\"\n types = [\"false\", \"true\"]\n for i in range(len(types)):\n data = {\"id\": self.GetGroupThirdPartyId(),\n \"isShow\": types[i]\n }\n response_data = self.groupThirdParty.UpdateDTPPIsShowCustomMerchant(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_26(self):\n \"\"\"驗證 线上支付商户管理 - 前台顯示順序改為 自訂 狀態\"\"\"\n data = {\"mode\": \"1\"}\n response_data = self.groupThirdParty.SetDtppSortingMode(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_27(self):\n \"\"\"驗證 线上支付商户管理 - 前台顯示順序改為 隨機 狀態\"\"\"\n data = {\"mode\": \"2\"}\n response_data = self.groupThirdParty.SetDtppSortingMode(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_28(self):\n \"\"\"驗證 线上支付商户管理 - 刪除金流公司商戶資料\"\"\"\n # Step1 取得欲驗證的金流公司商戶id\n groupThirdPartyId = self.GetGroupThirdPartyId()\n # Step2 驗證呼叫該商戶的刪除\n data = {\"id\": groupThirdPartyId}\n response_data = self.groupThirdParty.dTPPDelete(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_GroupThirdParty_relatedApi_status_29(self):\n \"\"\"驗證 线上支付商户管理 - 线上支付看板-取得商户使用占比与成功率列表 - 昨日 + 近7日 + 近30日\"\"\"\n days = [1, 7, 30]\n for i in range(len(days)):\n data = {\n \"date\": days.pop(),\n \"name\": \"\",\n \"take\": 100,\n \"skip\": 0,\n \"descType\": 1,\n \"isDesc\": 'false'\n }\n response_data = self.groupThirdParty.GetList(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n\nif __name__ == '__main__':\n unittest.main(testRunner = HTMLTestRunner())\n","sub_path":"case/test_api/test_BaseCase/test_Master_Base_API_GroupThirdParty.py","file_name":"test_Master_Base_API_GroupThirdParty.py","file_ext":"py","file_size_in_byte":14863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153620471","text":"\"\"\"Core Layers\"\"\"\n\nfrom typing import Tuple, List, Union\n\nimport tensorflow as tf\n\nfrom deepr.layers import base\nfrom deepr.utils.broadcasting import make_same_shape\n\n\nclass Sum(base.Layer):\n \"\"\"Sum Layer\"\"\"\n\n def __init__(self, n_in: int = 2, **kwargs):\n super().__init__(n_in=n_in, n_out=1, **kwargs)\n\n def forward(self, tensors, mode: str = None):\n \"\"\"Forward method of the layer\"\"\"\n tensors = make_same_shape(tensors, broadcast=False)\n acc = 0\n for inp in tensors:\n acc += inp\n return acc\n\n\nclass Product(base.Layer):\n \"\"\"Product Layer\"\"\"\n\n def __init__(self, n_in: int = 2, **kwargs):\n super().__init__(n_in=n_in, n_out=1, **kwargs)\n\n def forward(self, tensors, mode: str = None):\n \"\"\"Forward method of the layer\"\"\"\n tensors = make_same_shape(tensors, broadcast=False)\n acc = 1\n for inp in tensors:\n acc *= inp\n return acc\n\n\nclass DotProduct(base.Layer):\n \"\"\"Dot Product on the last dimension of the input vectors.\n\n It will add missing dimensions to the before last dimension. For\n example, if\n\n - t1: shape = [batch, num_target, 100]\n - t2: shape = [batch, 100]\n\n It will return\n\n - t: shape = [batch, num_target], where\n t[i, j] = sum_k(t1[i, k] * t2[i, j, k])\n \"\"\"\n\n def __init__(self, n_in: int = 2, **kwargs):\n super().__init__(n_in=n_in, n_out=1, **kwargs)\n\n def forward(self, tensors, mode: str = None):\n \"\"\"Forward method of the layer\"\"\"\n # Add missing dimensions to tensors to make them compatible\n t1, t2 = tensors\n if len(t1.shape) < len(t2.shape):\n t1, t2 = t2, t1\n ndims = len(t1.shape) - len(t2.shape)\n for _ in range(ndims):\n t2 = tf.expand_dims(t2, axis=-2)\n\n # Matmul can be used for dot product if at least one dummy dim\n if ndims:\n return tf.squeeze(tf.matmul(t1, t2, transpose_b=True), axis=[-1])\n else:\n t1 = tf.expand_dims(t1, axis=-2)\n t2 = tf.expand_dims(t2, axis=-1)\n return tf.squeeze(tf.matmul(t1, t2), axis=[-2, -1])\n\n\nclass Dense(base.Layer):\n \"\"\"Dense Layer\"\"\"\n\n def __init__(\n self,\n units: int,\n inputs: Union[str, Tuple[str, ...], List[str]] = None,\n outputs: Union[str, Tuple[str, ...], List[str]] = None,\n name: str = None,\n **kwargs\n ):\n super().__init__(n_in=1, n_out=1, inputs=inputs, outputs=outputs, name=name)\n self.units = units\n self._kwargs = kwargs\n\n def forward(self, tensors, mode: str = None):\n return tf.layers.dense(tensors, units=self.units, **self._kwargs)\n\n\n@base.layer(n_in=2, n_out=1)\ndef Add(tensors):\n \"\"\"Add two tensors of any compatible shapes.\"\"\"\n t1, t2 = make_same_shape(tensors, broadcast=False)\n return t1 + t2\n\n\n@base.layer(n_in=2, n_out=1)\ndef Concat(tensors, axis: int = -1):\n \"\"\"Concatenate tensors on axis\"\"\"\n return tf.concat(tensors, axis=axis)\n\n\n@base.layer(n_in=2, n_out=1)\ndef LogicalAnd(tensors):\n \"\"\"Perform logical_and on two tensors of compatible shapes.\"\"\"\n t1, t2 = make_same_shape(tensors, broadcast=False)\n return tf.logical_and(t1, t2)\n\n\n@base.layer(n_in=2, n_out=1)\ndef LogicalOr(tensors):\n \"\"\"Perform logical_or on two tensors of compatible shapes.\"\"\"\n t1, t2 = make_same_shape(tensors, broadcast=False)\n return tf.logical_or(t1, t2)\n\n\n@base.layer(n_in=1, n_out=1)\ndef ToFloat(tensors):\n \"\"\"Cast tensor to float32\"\"\"\n return tf.cast(tensors, tf.float32)\n\n\n@base.layer(n_in=1, n_out=1)\ndef ExpandDims(tensors, axis: int = -1):\n return tf.expand_dims(tensors, axis=axis)\n\n\n@base.layer(n_in=1, n_out=1)\ndef Scale(tensors: tf.Tensor, multiplier: float):\n \"\"\"Scale tensor by multiplier.\"\"\"\n return tf.multiply(tensors, multiplier)\n\n\nclass Identity(base.Layer):\n \"\"\"Identity Layer\"\"\"\n\n def __init__(self, inputs: Union[str, Tuple[str, ...], List[str]] = None, name: str = None):\n super().__init__(n_in=1, n_out=1, inputs=inputs, outputs=inputs, name=name)\n\n def forward(self, tensors, mode: str = None):\n return tf.identity(tensors, name=self.name)\n\n\nclass Conv1d(base.Layer):\n \"\"\"Conv1d Layer\"\"\"\n\n def __init__(\n self,\n filters: int,\n kernel_size: int,\n use_bias: bool = True,\n activation=None,\n inputs: Union[str, Tuple[str, ...], List[str]] = None,\n outputs: Union[str, Tuple[str, ...], List[str]] = None,\n name: str = None,\n **kwargs\n ):\n super().__init__(n_in=1, n_out=1, inputs=inputs, outputs=outputs, name=name)\n self.filters = filters\n self.kernel_size = kernel_size\n self.use_bias = use_bias\n self.activation = activation\n self._kwargs = kwargs\n\n def forward(self, tensors, mode: str = None):\n return tf.layers.conv1d(\n inputs=tensors,\n filters=self.filters,\n kernel_size=self.kernel_size,\n activation=self.activation,\n use_bias=self.use_bias,\n **self._kwargs\n )\n\n\nclass Softmax(base.Layer):\n \"\"\"Apply softmax to the last dimension of tensor with filtering masked values\"\"\"\n\n def __init__(self, n_in: int = 2, n_out=1, **kwargs):\n super().__init__(n_in=n_in, n_out=n_out, **kwargs)\n\n def forward(self, tensors, mode: str = None):\n \"\"\"Forward method of the layer\"\"\"\n tensor, mask = tensors\n mask = tf.cast(mask, tf.float32)\n tensor_exp = tf.exp(tensor - tf.reduce_max(tensor * mask, axis=-1, keepdims=True))\n sum_tensor_exp = tf.reduce_sum(tf.multiply(tensor_exp, mask), axis=-1, keepdims=True)\n return tf.div_no_nan(tensor_exp, sum_tensor_exp) * mask\n","sub_path":"deepr/layers/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"182024014","text":"#!/usr/bin/python3\n\n\n\"\"\" Module tests/test_models/test_base_model\"\"\"\nfrom models.base_model import BaseModel\nimport models\nimport os\nimport unittest\n\n\nclass TestBase_Model(unittest.TestCase):\n \"\"\" class TestBase_Model \"\"\"\n\n def test_docstring(self):\n \"\"\" function test_docstring \"\"\"\n msj = \"Módulo does not has docstring\"\n self.assertIsNotNone(models.base_model.__doc__, msj)\n msj = \"Clase does not has docstring\"\n self.assertIsNotNone(BaseModel.__doc__, msj)\n\n def test_executable_file(self):\n \"\"\" function test_executable_file \"\"\"\n is_read_true = os.access(\"models/base_model.py\", os.R_OK)\n self.assertTrue(is_read_true)\n is_write_true = os.access(\"models/base_model.py\", os.W_OK)\n self.assertTrue(is_write_true)\n is_exec_true = os.access(\"models/base_model.py\", os.X_OK)\n self.assertTrue(is_exec_true)\n\n def test_is_an_instance(self):\n \"\"\" function test_is_an_instance \"\"\"\n my_model = BaseModel()\n self.assertIsInstance(my_model, BaseModel)\n\n def test_id(self):\n \"\"\" function test_id \"\"\"\n my_model = BaseModel()\n my_model1 = BaseModel()\n self.assertNotEqual(my_model.id, my_model1.id)\n\n def test_save(self):\n \"\"\" function test_save \"\"\"\n my_model2 = BaseModel()\n first_updated = my_model2.updated_at\n my_model2.save()\n second_updated = my_model2.updated_at\n self.assertNotEqual(first_updated, second_updated)\n\n def test_to_dict(self):\n \"\"\" function test_to_dict \"\"\"\n my_model3 = BaseModel()\n my_dict_model3 = my_model3.to_dict()\n self.assertIsInstance(my_dict_model3, dict)\n for key, value in my_dict_model3.items():\n flag = 0\n if my_dict_model3[\"__class__\"] == \"BaseModel\":\n flag += 1\n self.assertTrue(flag == 1)\n for key, value in my_dict_model3.items():\n if key == \"created_at\":\n self.assertIsInstance(value, str)\n if key == \"updated_at\":\n self.assertIsInstance(value, str)\n","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381426201","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpRequest\n\n\nfrom Models.Municipio.FormsM import FormsMunicipio\nfrom Models.Municipio.models import Municipio\n\n\nclass FormularioMunicipioView(HttpRequest):\n\n def index(request):\n municipio = FormsMunicipio()\n return render(request,'MunicipioIndex.html',{'form':municipio})\n\n def procesar_formulario(request):\n municipio = FormsMunicipio(request.POST)\n if municipio.is_valid():\n municipio.save()\n municipio= FormsMunicipio()\n return render(request,'MunicipioIndex.html', {'form': municipio, \"mensaje\": \"Ok\"})\n\n def listar_municipios(request):\n municipios = Municipio.objects.all()\n return render(request, \"ListaMunicipios.html\", {\"municipios\": municipios})\n\n\n def modificarM(request, id):\n MODM = Municipio.objects.get(id_muni=id)\n data = {\n 'form': FormsMunicipio(instance=MODM)\n }\n if request.method == 'POST':\n formulario = FormsMunicipio(data=request.POST, instance=MODM)\n if formulario.is_valid():\n formulario.save()\n data['mensaje'] = \"Se ha actualizado el registro.\"\n data['form'] = formulario\n return render(request, 'ModificarMunicipio.html', data)\n\n\n def eliminarM(request, id):\n Municipio.objects.filter(id_muni=id).delete()\n return redirect(to=\"listarMunicipios\")\n\n","sub_path":"Models/Municipio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232895829","text":"#!/usr/bin/python\n# filename: hide_html5.py\n\nimport re\nimport blogofile_bf as bf\n\ndef run(content):\n \"\"\"Return an HTML5-free (no HTML5 elements and attributes) web page.\n \"\"\"\n\n enabled = bf.config.filters.hide_html5.enabled\n elements = []\n attributes = ['placeholder']\n\n if enabled:\n for attr in attributes:\n content = re.sub(r'%s=\"(.+?)\"\\s?' % attr, '', content)\n\n return content\n","sub_path":"py/template-blog/mysite/_filters/hide_html5.py","file_name":"hide_html5.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622154440","text":"import euler\n\nsieve = euler.list_to_dic(euler.sieve_of_eratosthenes(1000000))\n\ndef truncatable(n):\n\treturn all([True if int(n[i:]) in sieve and int(n[0:len(n)-i]) in sieve else False for i in range(len(n))])\n\ncount = 0\ntotal = 0\nfor prime in sieve.keys():\n\tif truncatable(str(prime)) and prime not in [2,3,5,7]:\n\t\tcount += 1\n\t\ttotal += prime\n\tif count==11:\n\t\tbreak\nprint (total)\n\n\n\n\n","sub_path":"37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465226527","text":"#! /usr/bin/env python\n# -*- coding: latin-1 -*-\n\nimport sys\nimport os.path\nimport re\n\nfrom planning import parser, problem\n\n\ndef parse_pddl_file(type, filename):\n try:\n #Ignasi review: open instead of file\n return parser.parse_nested_list(open(filename))\n except IOError as inst:\n raise SystemExit(\"Error: Could not read file: %s\\nReason: %s.\" %\n (inst.filename, inst))\n except parser.ParseError:\n raise SystemExit(\"Error: Could not parse %s file: %s\\n\" % (type, filename))\n\n\ndef open_file(task_filename=None, domain_filename=None, actions_on_demand = False):\n #print(task_filename,domain_filename)\n if task_filename is None:\n if len(sys.argv) not in (2, 3, 4):\n raise SystemExit(\"Error: Need exactly one or two command line arguments.\\n\"\n \"Usage: %s [] \" % sys.argv[0])\n #task_filename = sys.argv[-1]\n task_filename = sys.argv[2]\n if len(sys.argv) > 2:\n domain_filename = sys.argv[1]\n\n if not domain_filename:\n dirname, basename = os.path.split(task_filename)\n domain_filename = os.path.join(dirname, \"domain.pddl\")\n if not os.path.exists(domain_filename) and re.match(r\"p[0-9][0-9]\\b\", basename):\n domain_filename = os.path.join(dirname, basename[:4] + \"domain.pddl\")\n if not os.path.exists(domain_filename) and re.match(r\"p[0-9][0-9]\\b\", basename):\n domain_filename = os.path.join(dirname, basename[:3] + \"-domain.pddl\")\n if not os.path.exists(domain_filename) and re.match(r\"p[0-9][0-9]\\b\", basename):\n domain_filename = os.path.join(dirname, \"domain_\" + basename)\n if not os.path.exists(domain_filename) and basename.endswith(\"-problem.pddl\"):\n domain_filename = os.path.join(dirname, basename[:-13] + \"-domain.pddl\")\n if not os.path.exists(domain_filename):\n raise SystemExit(\"Error: Could not find domain file using \"\n \"automatic naming rules.\")\n\n domain_pddl = parse_pddl_file(\"domain\", domain_filename)\n problem_pddl = parse_pddl_file(\"problem\", task_filename)\n #print domain_pddl\n #print problem_pddl\n return problem.Problem.parse(domain_pddl, problem_pddl, actions_on_demand = actions_on_demand)\n #return tasks.Task.parse(domain_pddl, task_pddl)\n\nif __name__ == \"__main__\":\n p = open()\n #p.dump()\n A = p.applicable_actions(p.init)\n for a in A:\n literals = set()\n literals |= a.preconditions\n literals |= a.add_eff\n literals |= a.del_eff\n for l in (literals):\n if l in p.init:\n print('+++')\n else:\n print('---')\n print(l)\n A2 = p.match_applicable_actions(A,p.init)\n print(A == A2)\n #a.dump()\n \n \n \n","sub_path":"planning/pddl_file.py","file_name":"pddl_file.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65282868","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 28 19:53:42 2017\n\n@author: veraqin\n\"\"\"\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n#from sklearn import cross_validation\n\n\ndef load_train_data():\n data = np.empty((10000,30000),dtype= 'uint8')\n label = np.empty((10000,),dtype=\"int\")\n \n img_file = os.listdir('./1/train1')\n i = 0\n j = 2\n for ff in img_file:\n if not ff.startswith('.'):\n j -= 1\n imgs = os.listdir('./1/train1/' + ff)\n for f in imgs:\n if not f.startswith('.'):\n img = Image.open('./1/train1/' + ff +'/'+ f)\n arr = np.asarray(img,dtype='uint8')\n arr = arr.reshape(1,30000)\n data[i,:] = arr\n label[i] = j\n i += 1\n img.close()\n return data,label\n\ndata, label = load_train_data()\n\ndef load_test_data():\n data = np.empty((10000,30000), dtype=\"uint8\")\n #imgs = os.listdir('./valid')\n #num = len(imgs)\n for j in range(10000):\n path = './data_test/1_test_deal/'+ str(j+1)+'.png'\n img = Image.open(path)\n arr = np.asarray(img, dtype=\"uint8\")\n arr = arr.reshape(1,30000)\n data[j,:] = arr\n img.close()\n return data\n\ntest_data = load_test_data()\n\n#X_train, X_test, y_train, y_test = cross_validation.train_test_split(data, label, test_size=0.1, random_state=0)\n\n\npca = PCA(n_components=300, svd_solver = 'randomized', whiten = True)\npca.fit(data)\nX_train = pca.transform(data)\n#X_t_test = pca.transform(X_test)\n\ntest = pca.transform(test_data)\n\nclf = SVC()\nclf.fit(X_train, label)\n#print 'score', clf.score(X_t_test, y_test)\n\ntest_label = clf.predict(test)\nfor i in range(10000):\n test_label[i] += 1\n\nnp.savetxt('./data_test/1.csv', test_label, fmt = '%d')\n","sub_path":"Footprint_classification/1_PcaSvm.py","file_name":"1_PcaSvm.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473493372","text":"#!/usr/bin/env python\nimport math\nimport numpy as np\nimport rospy\nfrom message_filters import ApproximateTimeSynchronizer, Subscriber\nimport sensor_msgs.msg\nfrom std_msgs.msg import Float32\nfrom avoid_obstacles.msg import BoundingBoxes\n# Parameters\nPI = math.pi\nKp = 1\nmax_angle = 30\nmax_speed = 200\nnear_arc_angle = 25\nmid_arc_angle = 15 # 9\nfar_arc_angle = 2\nnear_threshold = 0.25\nmid_threshold = 0.8\nfar_threshold = 5\nstart = 0 # rospy.Time.now()\nduration = 0.\ndirection = 0\ncarry_dir = 0.\ncarry_speed = 0\ncarry_angle = 0\n# near_threshold = 2 # 0.3\n# mid_threshold = 4\n# far_threshold = 5\ndirect = 1\nstop = False\n\n\ndef sigmoid(x):\n y = 1 / (1 + np.exp(-x))\n return y\n\n\ndef distanceAt(range_angle, dr):\n # n = (range_angle - 90) / 180 * len(dr)\n # d = dr[int(n)]\n d = dr[range_angle]\n return d\n\n\ndef changeAngleSmooth(current_angle, destination_angle, change_rate):\n global new_angle\n # if current_angle - destination_angle > 15:\n # change_rate = change_rate * 2 #\n if destination_angle - (change_rate + 2) < current_angle < destination_angle + (change_rate + 2):\n new_angle = current_angle\n if destination_angle == 0:\n new_angle = 0\n elif current_angle < destination_angle:\n new_angle = (1 - change_rate) * current_angle + change_rate * destination_angle\n elif current_angle > destination_angle:\n new_angle = (1 - change_rate) * current_angle - change_rate * destination_angle\n if new_angle < -max_angle:\n new_angle = -max_angle\n if new_angle > max_angle:\n new_angle = max_angle\n return new_angle\n\n\ndef changeAngle(current_angle, destination_angle, change_rate):\n global new_angle\n if destination_angle - (change_rate + 2) < current_angle < destination_angle + (change_rate + 2):\n new_angle = current_angle\n if current_angle - destination_angle > 15:\n change_rate = change_rate * 2 #\n # if destination_angle == 0:\n # new_angle = 0\n if current_angle < destination_angle:\n new_angle = current_angle + change_rate\n elif current_angle > destination_angle:\n new_angle = current_angle - change_rate\n if new_angle < -max_angle:\n new_angle = -max_angle\n if new_angle > max_angle:\n new_angle = max_angle\n return new_angle\n\n\ndef changeSpeed(current_speed, destination_speed, change_rate):\n global new_speed\n global carry_speed\n # if destination_speed - (change_rate + 2) < current_speed < destination_speed + (change_rate + 3):\n # new_speed = 0.8*current_speed +0.2*destination_speed\n # print(\"00\")\n # if destination_speed == 0:\n # new_speed = 0\n\n if current_speed - destination_speed > 60:\n change_rate = 10 #\n elif current_speed - destination_speed > 30:\n change_rate = 5 #\n if current_speed < destination_speed:\n new_speed = current_speed + change_rate\n elif current_speed > destination_speed:\n new_speed = current_speed - change_rate * 2\n if current_speed < destination_speed + (change_rate + 2) and angle > destination_speed - (change_rate + 2):\n new_speed = current_speed\n if new_speed < 6:\n if new_speed > 0 and carry_speed < 100:\n carry_speed = carry_speed + new_speed\n else:\n carry_speed = 0\n new_speed = 0\n\n if new_speed > max_speed:\n new_speed = max_speed\n return new_speed\n\n\ndef avoid(data, cam):\n global start\n global stop\n global duration\n global carry_dir\n global angle\n global speed\n global direct\n near_check = 0\n mid_check = 0\n mid_left_check = 0\n mid_right_check = 0\n # near = False\n # mid = False\n # free = False\n mid_line = int(len(data.ranges)/2)\n # increaseBy = data.angle_increment*180/PI\n ranges = np.array(data.ranges)\n ranges[np.isnan(ranges)] = 0.\n ranges[np.isinf(ranges)] = 10.\n np.warnings.filterwarnings('ignore')\n # #########~~~~~~~Time~~~~~~~##############----------------------------------\n \"\"\"\n if start != 0:\n duration = (start - rospy.Time.now()).to_sec()\n carry_dir = carry_dir + (angle * speed * duration)\n if carry_dir > 0:\n direct = 1\n elif carry_dir < 0:\n direct = -1\n start = rospy.Time.now()\n print(\"mid line\", data.ranges[mid_line])\n print(\"carry_dir\", carry_dir)\n \"\"\"\n ##########################################\n # scan_lines = np.arange(len(data.ranges))\n # print(\"mid_line\",mid_line)\n near_arc_line = int(len(ranges) * near_arc_angle / 180)\n mid_arc_line = int(len(ranges) * mid_arc_angle / 180)\n left_sum = np.sum(ranges[0:int(len(ranges)/2)])\n right_sum = np.sum(ranges[int(len(ranges)/2):len(ranges)])\n if left_sum < right_sum:\n direct = 1\n else:\n direct = -1\n for line_n in range(mid_line - near_arc_line, mid_line + near_arc_line):\n # print(\"near = \", line_n)\n if ranges[line_n] < near_threshold:\n near_check = near_check+1\n # near = True\n # break\n if near_check > 2 * near_arc_line * 0.1: # if near arc line is blocked by more than 10%\n angle = changeAngle(angle, 0, 1)\n speed = changeSpeed(speed, 0, 1)\n\n print(\"I'm blocked\")\n ##########################################################################################\n else:\n for line_m in range(mid_line - mid_arc_line, mid_line + mid_arc_line):\n # print(\"mid = \", line_m)\n if ranges[line_m] < mid_threshold: # if mid arc line is blocked by more than 10%\n mid_check = mid_check + 1\n if line_m < mid_line:\n mid_left_check = mid_left_check + 1\n elif line_m > mid_line: #\n mid_right_check = mid_right_check + 1\n # mid = True\n # break\n if mid_check > 2 * mid_arc_line * 0.2:\n if mid_left_check < mid_right_check-5:\n direct = -1\n # print(\" -1 \", mid_left_check, mid_right_check)\n elif mid_left_check > mid_right_check+5:\n direct = 1\n # print(\" 1 \", mid_left_check, mid_right_check)\n else:\n # print(\" sum \",left_sum, right_sum)\n if left_sum < right_sum:\n direct = 1\n else:\n direct = -1\n angle = changeAngle(angle, direct * 30, 3)\n # angle = changeAngleSmooth(angle, -direct * 30, 0.1)\n speed = changeSpeed(speed, max_speed/2, 5)\n # carry_dir = angle * speed\n print(\"Obstacle ahead\")\n ##########################################################################################\n else:\n # if -100 < carry_dir < 100:\n # angle = changeAngle(angle, 0, 1)\n # else:\n # angle = changeAngle(angle, direct * 30, 0.5)\n angle = changeAngleSmooth(angle, 0, 0.4)\n speed = changeSpeed(speed, max_speed, 10)\n print(\"It's a free world\")\n ##########################################################################################\n if stop:\n angle = changeAngle(angle, 0, 1)\n speed = changeSpeed(speed, 0, 5)\n print(\"Stop\")\n global stop, prob, y_max\n stop = False\n num = len(cam.bounding_boxes)\n real_y_max = 0\n for i in range(0, num):\n # b = \"stop sign\"\n c = \"bottle\"\n m = cam.bounding_boxes[i].class_name\n if m == c: # or m == b: # data.bounding_boxes[i].ymax < 400 and\n # Class_name = cam.bounding_boxes[i].class_name\n prob = cam.bounding_boxes[i].probability\n # x_min = cam.bounding_boxes[i].xmin\n # x_max = cam.bounding_boxes[i].xmax\n # y_min = cam.bounding_boxes[i].ymin\n y_max = cam.bounding_boxes[i].ymax\n print(y_max)\n if real_y_max < y_max and prob > 0.4: # and Class_name != \"chair\"\n real_y_max = y_max\n if real_y_max > 100:\n stop = True\n\n\ndef main():\n global angle\n global speed\n global stop, y_max\n angle = 0.0\n speed = 0.0\n rospy.init_node('listener', anonymous=True)\n\n robo_angle_pub = rospy.Publisher('robo_angle', Float32, queue_size=10)\n robo_speed_pub = rospy.Publisher('robo_speed', Float32, queue_size=10)\n laser_sub = Subscriber(\"scan\", sensor_msgs.msg.LaserScan)\n camera_sub = Subscriber(\"/darknet_ros/bounding_boxes\", BoundingBoxes)\n ats = ApproximateTimeSynchronizer([laser_sub, camera_sub], queue_size=5, slop=0.1, allow_headerless=True)\n ats.registerCallback(avoid)\n rospy.spin()\n rate = rospy.Rate(1) # 10hz\n\n while not rospy.is_shutdown():\n msg_angle = angle\n msg_speed = speed\n print(\" s =\", speed, \"a =\", angle, stop, )\n robo_angle_pub.publish(msg_angle)\n robo_speed_pub.publish(msg_speed)\n rate.sleep()\n\n\nif __name__ == '__main__':\n main()\n\"\"\"\n elif False: # carry_dir !=0:\n angle = changeAngle(angle, -direct*30, 1)\n speed = changeSpeed(speed, -direct*50, 5)\n \"\"\"\n","sub_path":"scripts/robo_laser_cam1.py","file_name":"robo_laser_cam1.py","file_ext":"py","file_size_in_byte":9276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507797495","text":"class Util:\r\n def is_out_of_areas(num, areas):\r\n for area in areas:\r\n if area[0] < num and num < area[1]:\r\n return False\r\n return True\r\n\r\n\r\n def format(items):\r\n levels = []\r\n for item in items:\r\n levels.append(item[0])\r\n # --------------------------\r\n\r\n # minimize diff between levels -----\r\n _depths = list(set(levels)) # sort and unique\r\n # replace with depth rank\r\n for i, item in enumerate(levels):\r\n levels[i] = _depths.index(levels[i]) + 1\r\n\r\n\r\n # Force set level of first item to 1 -----\r\n # (first item must be list root)\r\n if len(levels):\r\n diff_to_root = levels[0] - 1\r\n if 0 < diff_to_root:\r\n def pad(level):\r\n return level - diff_to_root\r\n levels = list(map(pad, levels))\r\n\r\n # --------------------------\r\n for i, item in enumerate(items):\r\n item[0] = levels[i]\r\n return items\r\n\r\n\r\n def strtobool(val):\r\n \"\"\"pick out from 'distutils.util' module\"\"\"\r\n if isinstance(val, str):\r\n val = val.lower()\r\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\r\n return 1\r\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\r\n return 0\r\n else:\r\n raise ValueError(\"invalid truth value %r\" % (val,))\r\n else:\r\n return bool(val)\r\n\r\n\r\n def within_ranges(target, ranges):\r\n tb = target[0]\r\n te = target[1]\r\n for _range in ranges:\r\n rb = _range[0]\r\n re = _range[1]\r\n if (rb <= tb and tb <= re) and (rb <= tb and tb <= re):\r\n return True\r\n return False\r\n","sub_path":"markdowntoc/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"562510256","text":"def dfs(i,j) :\n global block_count\n visited[i][j]=1\n index_list.append([i,j])\n cx = j\n cy = i\n for n in range(4):\n nx = cx+dx[n]\n ny = cy + dy[n]\n if nx<0 or ny<0 or nx>=6 or ny>= 12 :\n continue\n #print(ny,nx)\n if puyo[cy][cx]==puyo[ny][nx] and visited[ny][nx]!=1:\n block_count+=1\n #block_count 를 여기서 계산을 하게 됨\n dfs(ny, nx)\n\n# 뇌절 포인트 4 gravity 하는 방법-> 한 열을 리스트에 다 담아 버리고 아래서 부터 다시 갱신함\ndef gravity():\n for i in range(6):\n plus_list=[]\n for j in range(11,-1,-1):\n if puyo[j][i]!='.':\n plus_list.insert(0,puyo[j][i])\n puyo[j][i]='.'\n for k in range(11, 11-len(plus_list),-1):\n puyo[k][i]=plus_list.pop()\n\n\npuyo = [['.' for _ in range(6)] for _ in range(12)]\n\nblock_count=0;\nvisited = [[0 for _ in range(6)] for _ in range(12)]\ndx = [0,0,-1,1]\ndy = [-1,1,0,0]\nfor i in range(12):\n data = input()\n for j in range(6):\n puyo[i][j]= data[j]\ncount =0\n#알고리즘 시작\nwhile True: # 연쇄 턴\n visited = [[0 for _ in range(6)] for _ in range(12)]\n boom_list = []\n #뇌절 포인트1 . visited 는 한턴에 맵 전체를 탐색할때 중복을 피하기위함으로 한턴에 한번 초기화\n #2 boom_list는 모든 뿌요 한번에 터뜨리기 위해 선언\n flag =0\n for i in range(12):\n for j in range(6):\n # 모든 점을 돌며 문자가 있으면 dfs로 들어간다.\n if puyo[i][j]!='.':\n index_list = []\n block_count=1\n #index_list는 boom_list에 누적할 리스트로 한 중첩포인트(같은색 4개)당 하나이다.\n #뇌절 포인트3 : 네개 이상이면 터뜨리는걸 어떻게 할 것이냐 dfs 내부로 들어가면 도중에 판단할 수 없음\n #dfs를 모두 돌고 나서 계산된 block count가 4가 넘는다면 그 중첩포인트는 터뜨릴 수 있음\n dfs(i, j)\n if block_count>=4:\n flag=1\n boom_list.extend(index_list)\n #4가 넘으므로 while문을 한턴을 더 진행하기 위해 flag를 설정하고 boom_list에 포인트 누적\n if flag==1:\n count+=1\n #결과값(몇턴) 계산\n if flag==0:\n break;\n #boom_count가 4가 넘는 것이 하나도 없다-> 더이상 턴 진행 못함 while문 break;\n for boom in boom_list:\n puyo[boom[0]][boom[1]]= '.'\n #담아놓은 중첩 포인트들 터뜨림\n gravity()\n #아래로 다 내림\n # for ci in range(12):\n # print(puyo[ci])\n\nprint(count)\n","sub_path":"0128_0204/0128_0204_문태현/puyoputo.py","file_name":"puyoputo.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"192751048","text":"# !/Users/nancypham/opt/anaconda3/bin/ python3\n\nimport sys\nimport re\n\ndef reset(make_year):\n make_year = []\n return make_year\n\ndef flush(make_year):\n '''\n Write result to STDOUT. Run at end of every group.\n '''\n print(f'{make_year[0]}, {make_year[1]}, {make_year[2]}')\n\ndef reducer():\n '''\n Reads mapper output. Within each vin_number group, iterate through all the records to find the one that has the make and year available and captures that in group-level master info. Filter accident records and modify by adding the master info before outputing the accident records.\n\n Assumption: mapper output sorted by key (i.e. vin) before passed to the reducer.\n '''\n current_vin = None\n make_year = []\n\n for line in sys.stdin:\n vin, record = line.strip().split('\\t', 1)\n \n if current_vin != vin:\n if current_vin != None and make_year[0] == 'A':\n flush(make_year)\n make_year = reset(make_year)\n\n current_vin = vin\n\n value = record.split(',')\n value = [re.sub('[\\W_]', '', x) for x in value] # Clean stringified list\n\n if value[0] == 'A':\n make_year.append('A')\n else:\n make_year.append('')\n \n if value[0] == 'I':\n make_year[1:2] = [value[1+i] for i in range(2)]\n\n # Output the last group if needed\n flush(make_year)\n\nif __name__ == '__main__': \n reducer()\n\n'''\nExecution log:\nA, Mercedes, 2016\nA, Mercedes, 2015\nA, Mercedes, 2015\nA, Nissan, 2003\n'''","sub_path":"hadoop-mapreduce/src/autoinc_reducer1.py","file_name":"autoinc_reducer1.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1080194","text":"\"\"\"\n Insert a node into a sorted doubly linked list\n head could be None as well for empty list\n Node is defined as\n\n class Node(object):\n\n return the head node of the updated list\n\"\"\"\n\ndef __init__(self, data=None, next_node=None, prev_node = None):\n self.data = data\n self.next = next_node\n self.prev = prev_node\n\ndef SortedInsert(head, data):\n n = Node(data, None, None)\n if(head == None): return n\n if(data<=head.data):\n head.prev = n\n n.next = head\n return n\n else:\n o = SortedInsert(head.next, data)\n head.next = o\n o.prev = head\n return head","sub_path":"Linked_Lists/InsertANodeIntoASortedDoublyLinkedList.py","file_name":"InsertANodeIntoASortedDoublyLinkedList.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586661115","text":"################################################\n# This below code download all json file with latest and future 5 day climate and weather information.\n################################################\n\nimport pandas as pd\nimport numpy as np\nimport urllib, json\n\ndata = pd.read_csv(\"allcitylist.csv\")\n\nprint(len(data))\n\nprint(data.head())\n\ndf = pd.DataFrame(data)\n\n# print(type(df[['Country', 'City', 'CityId']]))\n\ncityIdList = df['CityId']\n\ncityIdList = list(cityIdList)\n\nimport os\n\nfor id in cityIdList:\t\n\tCITY_ID = id\n\tURL = \"https://worldweather.wmo.int/en/json/\"+str(CITY_ID)+\"_en.json\"\n\tfile_name = './tmp/'+str(CITY_ID)+'_en.json'\n\tif(os.path.exists(file_name)):\n\t\tprint(\"File Already Present\")\n\telse:\n\t\tprint('downloading from '+URL)\n\t\tresponse = urllib.urlopen(URL)\n\t\tdata = json.loads(response.read())\n\t\twith open(file_name, 'w') as f:\n\t\t\tjson.dump(data, f)\n\n\n","sub_path":"jsondownloader.py","file_name":"jsondownloader.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524795350","text":"from collections import namedtuple\nfrom datetime import datetime\nimport mock\nfrom service.updaters.property_by_postcode_updater_v3 import PropertyByPostcodeUpdaterV3\n\nMockTitleRegisterData = namedtuple(\n \"TitleRegisterData\", ['title_number', 'register_data', 'last_modified', 'is_deleted']\n)\n\n\nclass TestPropertyByPostcodeUpdaterV3:\n\n def test_init_sets_index_information(self):\n updater = PropertyByPostcodeUpdaterV3('index123', 'doctype321')\n assert updater.index_name == 'index123'\n assert updater.doc_type == 'doctype321'\n\n @mock.patch('service.database.page_reader.get_next_data_page', return_value=[])\n def test_get_next_source_data_page_calls_page_reader_with_right_args(self, mock_get_page):\n last_title_number = 'title123'\n last_modification_date = datetime.now()\n page_size = 123\n\n updater = PropertyByPostcodeUpdaterV3('index', 'doctype')\n updater.last_title_modification_date = last_modification_date\n updater.last_updated_title_number = last_title_number\n\n updater.get_next_source_data_page(page_size)\n\n mock_get_page.assert_called_once_with(\n last_title_number, last_modification_date, page_size\n )\n\n def test_get_next_source_data_page_returns_result_from_page_reader(self):\n title1 = MockTitleRegisterData('TTL1', {'register': 'data1'}, datetime.now(), False)\n title2 = MockTitleRegisterData('TTL2', {'register': 'data2'}, datetime.now(), False)\n\n with mock.patch('service.database.page_reader.get_next_data_page',\n return_value=[title1, title2]):\n updater = PropertyByPostcodeUpdaterV3('index', 'doctype')\n updater.last_title_modification_date = datetime.now()\n updater.last_updated_title_number = 'title123'\n result = updater.get_next_source_data_page(123)\n\n assert result == [title1, title2]\n\n @mock.patch('service.es_utils.get_delete_action', return_value={'delete': 'action1'})\n def test_prepare_elasticsearch_actions_returns_delete_action_when_title_deleted(\n self, mock_get_delete_action):\n\n index_name = 'index_name1'\n doc_type = 'doc_type1'\n register_data = {'address': {'address_string': 'address string SW11 2DR'}}\n\n deleted_title = MockTitleRegisterData('TTL1', register_data, datetime.now(), True)\n title_id = 'TTL1-SW112DR'\n\n updater = PropertyByPostcodeUpdaterV3(index_name, doc_type)\n returned_actions = updater.prepare_elasticsearch_actions(deleted_title)\n\n mock_get_delete_action.assert_called_once_with(index_name, doc_type, title_id)\n\n assert returned_actions == [{'delete': 'action1'}]\n\n @mock.patch('service.es_utils.get_upsert_action', return_value={'upsert': 'action1'})\n def test_prepare_elasticsearch_actions_returns_upsert_action_when_title_open(\n self, mock_get_upsert_action):\n\n entry_datetime = datetime(2015, 4, 20, 12, 23, 34)\n index_name = 'index_name1'\n doc_type = 'doc_type1'\n register_data = {\n 'address': {\n 'house_no': '15',\n 'address_string': 'ADDRESS string 12 SW11 2DR',\n }\n }\n updated_title = MockTitleRegisterData('TTL1', register_data, entry_datetime, False)\n title_id = 'TTL1-SW112DR'\n doc = {\n 'title_number': 'TTL1',\n 'entry_datetime': '2015-04-20T12:23:34.000+0000',\n 'postcode': 'SW112DR',\n 'house_number_or_first_number': 15,\n 'address_string': 'address string 12 sw11 2dr'\n }\n\n updater = PropertyByPostcodeUpdaterV3(index_name, doc_type)\n returned_actions = updater.prepare_elasticsearch_actions(updated_title)\n\n mock_get_upsert_action.assert_called_once_with(index_name, doc_type, doc, title_id)\n\n assert returned_actions == [{'upsert': 'action1'}]\n\n @mock.patch('service.es_utils.get_upsert_action', return_value={'upsert': 'action1'})\n def test_prepare_elasticsearch_actions_returns_upsert_action_without_postcode_numbers(\n self, mock_get_upsert_action):\n\n entry_datetime = datetime(2015, 4, 20, 12, 23, 34)\n index_name = 'index_name1'\n doc_type = 'doc_type1'\n register_data = {\n 'address': {\n 'address_string': 'ADDRESS string (SW11 2DR)',\n }\n }\n updated_title = MockTitleRegisterData('TTL1', register_data, entry_datetime, False)\n title_id = 'TTL1-SW112DR'\n doc = {\n 'title_number': 'TTL1',\n 'entry_datetime': '2015-04-20T12:23:34.000+0000',\n 'postcode': 'SW112DR',\n 'house_number_or_first_number': None,\n 'address_string': 'address string (sw11 2dr)'\n }\n\n updater = PropertyByPostcodeUpdaterV3(index_name, doc_type)\n returned_actions = updater.prepare_elasticsearch_actions(updated_title)\n\n mock_get_upsert_action.assert_called_once_with(index_name, doc_type, doc, title_id)\n\n assert returned_actions == [{'upsert': 'action1'}]\n\n def test_get_mapping_returns_correct_mapping(self):\n assert PropertyByPostcodeUpdaterV3('index', 'doctype').get_mapping() == {\n 'properties': {\n 'title_number': {'type': 'string', 'index': 'no'},\n 'postcode': {'type': 'string', 'index': 'not_analyzed'},\n 'house_number_or_first_number': {'type': 'integer', 'index': 'not_analyzed'},\n 'address_string': {'type': 'string', 'index': 'not_analyzed'},\n 'entry_datetime': {'type': 'date',\n 'format': 'date_time',\n 'index': 'no'}\n }\n }\n","sub_path":"tests/test_property_by_postcode_updater_v3.py","file_name":"test_property_by_postcode_updater_v3.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394029135","text":"from django.conf import settings\nfrom pytz import timezone\nimport xlsxwriter\n\n\nTZ = timezone(settings.TIME_ZONE)\n\n\ndef change_request_export(fileobj, rfcs):\n with xlsxwriter.Workbook(\n fileobj,\n {\n 'in_memory': True,\n 'default_date_format': 'dd-mmm-yyyy HH:MM',\n 'remove_timezone': True,\n },\n ) as workbook:\n changes = workbook.add_worksheet('Change requests')\n changes.write_row('A1', (\n 'Change ref.', 'Title', 'Change type', 'Requester', 'Endorser', 'Implementer', 'Status',\n 'Test date', 'Planned start', 'Planned end', 'Completed', 'Outage duration',\n 'System(s) affected', 'Incident URL', 'Unexpected issues', 'Created timestamp',\n ))\n row = 1\n for i in rfcs:\n changes.write_row(row, 0, [\n i.pk, i.title, i.get_change_type_display(),\n i.requester.name if i.requester else '',\n i.endorser.name if i.endorser else '',\n i.implementer.name if i.implementer else '',\n i.get_status_display(), i.test_date,\n i.planned_start.astimezone(TZ) if i.planned_start else '',\n i.planned_end.astimezone(TZ) if i.planned_end else '',\n i.completed.astimezone(TZ) if i.completed else '',\n str(i.outage) if i.outage else '', i.systems_affected, i.incident_url,\n i.unexpected_issues, i.created.astimezone(TZ),\n ])\n row += 1\n changes.set_column('A:A', 11)\n changes.set_column('B:B', 44)\n changes.set_column('C:C', 12)\n changes.set_column('D:F', 18)\n changes.set_column('G:G', 26)\n changes.set_column('H:K', 18)\n changes.set_column('L:L', 15)\n changes.set_column('M:N', 30)\n changes.set_column('O:P', 18)\n\n return fileobj\n\n\ndef riskassessment_export(fileobj, it_systems):\n with xlsxwriter.Workbook(\n fileobj,\n {\n 'in_memory': True,\n 'default_date_format': 'dd-mmm-yyyy HH:MM',\n 'remove_timezone': True,\n },\n ) as workbook:\n sheet = workbook.add_worksheet('Risk assessments - IT systems')\n sheet.write_row('A1', (\n 'IT system ID', 'IT system name', 'IT system status', 'Division',\n 'Platform', 'Critical function', 'Traffic', 'Access', 'Backups',\n 'Support', 'Operating System', 'Vulnerability', 'Contingency plan',\n ))\n row = 1\n for i in it_systems:\n sheet.write_row(row, 0, [\n i.system_id,\n i.name,\n i.get_status_display(),\n i.division_name,\n i.platform.name if i.platform else '',\n ])\n\n risks = i.get_risk_category_maxes()\n sheet.write_row(row, 5, [r.rating_desc.capitalize() if r else '' for r in risks.values()])\n row += 1\n sheet.set_column('B:B', 50)\n sheet.set_column('C:C', 18)\n sheet.set_column('D:D', 40)\n sheet.set_column('E:M', 19)\n\n return fileobj\n","sub_path":"registers/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"217748446","text":"import sparse_csr_io as spi\nfrom scipy import sparse\nimport csv\n\n\ncmap = dict()\namap = dict()\n\nwith open('id_map_comp.csv') as ifile:\n reader = csv.reader(ifile)\n next(reader)\n for row in reader:\n cmap[int(row[1])] = row[0]\nwith open('id_map_comp.csv') as ifile:\n reader = csv.reader(ifile)\n next(reader)\n for row in reader:\n amap[row[0]] = int(row[1])\nadj_com = spi.load_sparse_csr(\"dyad_adj_comp\")\nadj = spi.load_sparse_csr(\"dyad_adj\")\n\nfor i in range(len(cmap)):\n if adj_comp[i].nnz != adj[amap[cmap[i]]].nnz:\n print(\"Dun goofed\")\n break\n\n","sub_path":"code/deg_req.py","file_name":"deg_req.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48325164","text":"# django imports\nfrom django.urls import path\nfrom ticket.views import GetTicketAPIView \nfrom ticket.views import TicketCreateAPIView, TicketDeleteAPIView\nfrom ticket.views import TicketExpireAPIView, TicketUpdateAPIView\n\nurlpatterns = [\n path('create/',\n TicketCreateAPIView.as_view({\"post\": \"ticket_create\"})),\n path('update/',\n TicketUpdateAPIView.as_view({\"post\": \"ticket_update\"})),\n path('delete/',\n TicketDeleteAPIView.as_view({\"put\": \"ticket_delete\"})),\n path('expire/',\n TicketExpireAPIView.as_view({\"post\": \"ticket_expire\"})),\n path('get/',\n \tGetTicketAPIView.as_view({\"get\": \"get_ticket\"})),\n path('get/',\n \tGetTicketAPIView.as_view({\"get\": \"view_ticket\"}))\n]\n","sub_path":"ticket/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3693695","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import auth\nfrom . import forms\nfrom . import models\nfrom django.core.urlresolvers import reverse\nfrom stream3.settings import STRIPE_PUBLISHABLE, STRIPE_SECRET\nfrom django.contrib.auth import decorators\nfrom django.contrib import messages\nfrom django.utils import timezone\nimport stripe\nimport arrow\nfrom stream3.helpers import is_subscribed\n\n\ndef register(request):\n\n if request.method == 'POST':\n form = forms.RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n user = auth.authenticate(email=request.POST['email'], password=request.POST['password1'])\n new_profile = models.Profile(user=user)\n new_profile.save()\n if user.is_active:\n auth.login(request, user)\n return redirect(reverse('profile', kwargs={'user_id': user.id}))\n else:\n if request.user.is_authenticated():\n return redirect(reverse('profile', kwargs={'user_id': request.user.id}))\n form = forms.RegistrationForm()\n\n context = {\n 'form': form,\n 'url': 'register'\n }\n\n return render(request, 'accounts/register.html', context)\n\n\n@decorators.login_required(login_url='login')\ndef profile(request, user_id=None):\n if user_id:\n profile = get_object_or_404(models.Profile, user_id=user_id)\n else:\n profile = models.Profile.objects.get(user=request.user)\n\n context = {\n 'subscribed': is_subscribed(request.user),\n 'url': 'profile',\n 'profile': profile\n }\n return render(request, 'accounts/profile.html', context)\n\n\n@decorators.login_required(login_url='login')\ndef change_profile(request):\n profile = models.Profile.objects.get(user=request.user)\n if request.method == 'POST':\n form = forms.ChangeProfileForm(request.POST, request.FILES, instance=profile)\n if form.is_valid():\n form.save()\n return redirect(reverse('profile', kwargs={'user_id': request.user.id}))\n\n form = forms.ChangeProfileForm({\n 'name': profile.name,\n 'bio': profile.bio,\n 'interests': profile.interests,\n 'contact': profile.contact\n })\n context = {\n 'url': 'profile',\n 'form': form\n }\n return render(request, 'accounts/changeprofile.html', context)\n\n\ndef login(request):\n\n if request.method == 'POST':\n form = forms.LoginForm(request.POST)\n if form.is_valid():\n email = request.POST['email']\n password = request.POST['password']\n user = auth.authenticate(email=email, password=password)\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n return redirect(reverse('profile', kwargs={'user_id': user.id}))\n\n else:\n if request.user.is_authenticated():\n return redirect(reverse('profile', kwargs={\"user_id\": request.user.id}))\n form = forms.LoginForm()\n\n context = {\n 'form': form,\n 'url': 'login'\n }\n return render(request, 'accounts/login.html', context)\n\n\ndef logout(request):\n auth.logout(request)\n return redirect(reverse('login'))\n\n\n@decorators.login_required(login_url='login')\ndef subscribe(request):\n\n if request.method == 'POST':\n stripe.api_key = STRIPE_SECRET\n print(request.POST)\n try:\n customer = stripe.Customer.create(\n email=request.POST['stripeEmail'],\n card=request.POST['stripeToken'],\n plan='stream3_monthly',\n )\n request.user.stripe_id = customer.stripe_id\n request.user.subscription_end = arrow.now().replace(months=+1).datetime\n request.user.save()\n except stripe.CardError:\n messages.error(request, 'Your card was declined.')\n else:\n if request.user.check_subscription():\n return redirect(reverse('home'))\n\n context = {\n 'subscribed': is_subscribed(request.user),\n 'stripe_publishable_key': STRIPE_PUBLISHABLE,\n 'url': 'home',\n }\n return render(request, 'subscribe.html', context)\n\n\n@decorators.login_required(login_url='login')\ndef cancel_subscription(request):\n try:\n subscribed = request.user.check_subscription()\n except AttributeError:\n subscribed = False\n\n if not subscribed:\n return redirect(reverse('home'))\n\n try:\n customer = stripe.Customer.retrieve(request.user.stripe_id)\n customer.cancel_subscription(at_period_end=False)\n request.user.subscription_end = timezone.now()\n request.user.save()\n messages.success(request, 'Your membership has been cancelled.')\n except Exception:\n messages.error(request, 'Failed to cancel subscription.')\n\n return redirect(reverse('profile', kwargs={\"user_id\": request.user.id}))\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231481951","text":"# -*- coding: utf-8 -*-\n\"\"\" обработчик респауна объектов \"\"\"\n\nfrom evennia import DefaultScript\nfrom typeclasses.objects import Object\nfrom typeclasses.weapons import AcidBottle, Knife, Pistol\nfrom django.conf import settings\nfrom evennia import create_object, search_object\nimport random\n\nclass RespawnHandler(DefaultScript):\n\t\"\"\"обработчик респауна объектов\n\t typeclass - класс объекта\n\t name - имя объекта\n\t locations - список локаций где будут размещены предметы\n\t desc - описание объекта, если оне не задано при создании\n\t world_count - количество таких объектов в мире\n\t\"\"\"\n\t#словарь с инсрукциями: что создавть, где создать, сколько этого должно быть\n\tinstruction = {\"1\" : {\"typeclass\" : settings.BASE_OBJECT_TYPECLASS, \"name\": \"Дилдо твоей мамки\", \"locations\":[\"1-test\",\"1-test2\"], \"desc\": \"Кто-то выкинул во двор дилдо товоей мамки.\", \"world_count\": 2},\n\t\t\t \t \"2\" : {\"typeclass\" : Knife, \"name\": \"Картонный нож\", \"locations\":[\"test3\"], \"desc\": None, \"world_count\": 1}\n\t\t\t \t }\n\n\n\tdef at_script_creation(self):\n\t\t#имя скрипта\n\t\tself.key = \"spawn_script\"\n\t\t#описание\n\t\tself.desc = \"Спаунит вещи в локациях каждую минуту.\"\n\t\t#интервал с которым выполняется метод at_repeat\n\t\tself.interval = 30 \n\t\t#Переживет ли скрипт перезагрузку сервера. Типа авто запуска скрипта.\n\t\tself.persistent = True\n\n\tdef at_repeat(self):\n\t\tif not self.instruction:\n\t\t\treturn\n\n\t\tinst_list = self.instruction.values()\n\n\t\tif not inst_list:\n\t\t\treturn\n\n\t\tfor inst in inst_list:\n\t\t\t#ищем текущий предмет\n\t\t\titems = search_object(inst[\"name\"])\n\n\t\t\t#если нет таких передметов\n\t\t\tif not items:\n\t\t\t\t#self.obj.msg_contents(\"не нашел %s\" % inst[\"name\"])\n\t\t\t\tfor count in range(inst[\"world_count\"]):\n\t\t\t\t\tself.obj.msg_contents(\"создаю %s\" % inst[\"name\"])\n\t\t\t\t\t#берем рандомную локацию из списка локаций\n\t\t\t\t\tlocation = random.choice(inst[\"locations\"])\n\t\t\t\t\tself.obj.msg_contents(\"выбрал %s\" % location)\n\t\t\t\t\t#ищем ее\n\t\t\t\t\tloc = search_object(location)\n\t\t\t\t\tself.obj.msg_contents(\"нашел %s\" % loc)\n\t\t\t\t\t#если их много как кухонь то, из них берем рандомную\n\t\t\t\t\tcreation_loc = random.choice(loc)\n\t\t\t\t\tself.obj.msg_contents(\"выбрал %s\" % creation_loc.key)\n\t\t\t\t\t#создаем там объект\n\t\t\t\t\tobj = create_object(inst[\"typeclass\"],inst[\"name\"], creation_loc)\n\t\t\t\t\tself.obj.msg_contents(\"создал %s\" % obj.key)\n\t\t\t\t\t# буду к этому вязяться когда будут предметы с одинаоквыми именами\n\t\t\t\t\tobj.db.respawnable = True\n\t\t\t\t\t#задаем описание если нет стандартного как у ножа\t\n\t\t\t\t\tif inst[\"desc\"]:\n\t\t\t\t\t\tif not obj.db.desc:\n\t\t\t\t\t\t\tobj.db.desc = inst[\"desc\"]\n\n\t\t\t#если предметы найдены\t\t\t\t\n\t\t\telse:\n\t\t\t\t#self.obj.msg_contents(\"нашел %s\" % items)\n\t\t\t\t#если их меньше чем задано в world_count\n\t\t\t\tif len(items) < inst[\"world_count\"]:\n\t\t\t\t\tself.obj.msg_contents(\"добвляю не достоющие %s\" % inst[\"name\"])\n\t\t\t\t\t#создаем не достающее количество объектов\n\t\t\t\t\tfor count in range(inst[\"world_count\"] - len(items)):\n\t\t\t\t\t\t#берем рандомнуб локацию из списка локаций\n\t\t\t\t\t\tlocation = random.choice(inst[\"locations\"])\n\t\t\t\t\t\tself.obj.msg_contents(\"выбрал %s\" % location)\n\t\t\t\t\t\t#ищем ее\n\t\t\t\t\t\tloc = search_object(location)\n\t\t\t\t\t\tself.obj.msg_contents(\"нашел %s\" % loc)\n\t\t\t\t\t\t#если их много как кухонь то, из них берем рандомную\n\t\t\t\t\t\tcreation_loc = random.choice(loc)\n\t\t\t\t\t\tself.obj.msg_contents(\"выбрал %s\" % creation_loc.key)\n\t\t\t\t\t\t#создаем там объект\n\t\t\t\t\t\tobj = create_object(inst[\"typeclass\"],inst[\"name\"], creation_loc)\n\t\t\t\t\t\tself.obj.msg_contents(\"создал %s\" % obj.key)\n\t\t\t\t\t\t# буду к этому вязяться когда будут предметы с одинаоквыми именами\n\t\t\t\t\t\tobj.db.respawnable = True\n\t\t\t\t\t\t#задаем описание если нет стандартного как у ножа\t\n\t\t\t\t\t\tif inst[\"desc\"]:\n\t\t\t\t\t\t\tif not obj.db.desc:\n\t\t\t\t\t\t\t\tobj.db.desc = inst[\"desc\"]\n\n","sub_path":"typeclasses/item_respawn.py","file_name":"item_respawn.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"511380857","text":"__author__ = 'sjai013'\n\n# Get list of all words in the NZ dictionary\nf = open(\"nz_dict/compiled/all_modified_words.txt\", 'r')\nall_modified_words_temp = f.readlines()\nf.close()\n\nall_modified_words = dict()\n\nfor word in all_modified_words_temp:\n word = word.split('\\t')\n word[0] = word[0].translate(None,\"(\\\")\")\n all_modified_words[word[0]] = word[2:]\n\ndel f, all_modified_words_temp\n\n# Get a list of all words in the alveo MAUS dictionary\nf = open(\"complete.txt\")\nalveo_temp = f.readlines()\nf.close()\n\nalveo = dict()\nfor word in alveo_temp:\n line = word.split('\\t')\n alveo[line[0]] = line[1].strip()\n\ndel f, alveo_temp\n\n\n# Get list of all unique words\nf = open(\"all_unique_words.txt\",'r')\nunique_words_temp = f.readlines()\nf.close()\n\nunique_words = dict()\n\nfor word in unique_words_temp:\n word = word.split('\\t')\n unique_words[word[0]] = word[1].strip()\n\ndel f, unique_words_temp\n# Compare unique words and NZ dictionary, and get all unique words NOT in the NZ or MAUS dictionary\nnz_maus_words = list(set(all_modified_words) | set(alveo))\nmissing_corpus_words_temp = list(set(unique_words) - set(nz_maus_words))\nmissing_corpus_words = dict()\n\nfor word in missing_corpus_words_temp:\n words = word.split(\"-\")\n for word in words:\n missing_corpus_words[word] = \"\"\n\n\n# Get all words in the NZ/MAUS lexicon, that aren't in the 5000 most common word list\nf = open(\"nz_dict/5000_most_common.txt\")\nmost_common_words_temp = f.readlines();\nf.close()\n\nmost_common_words = dict()\nfor line in most_common_words_temp:\n word = line.split()[1]\n most_common_words[word] = \"\"\n\n\nmissing_common_words_temp = list(set(most_common_words) - set(nz_maus_words))\nmissing_common_words = dict()\n\nfor item in missing_common_words_temp:\n # For hyphenated words, only put in the words that are not already in the lexicon, or missing words list\n words = item.split(\"-\")\n for word in words:\n if (word not in missing_common_words) | (word not in nz_maus_words):\n missing_common_words[word] = \"\"\n\n\n\n\nf = open(\"nz_dict/compiled/words_to_add.txt\", \"w\")\nfor word in sorted(missing_common_words):\n f.write(word + '\\n')\n\nf.close()\n\n\n# Recreate list of missing corpus words by removing all common words, and breaking hyphenated words into separate words\nmissing_corpus_words = list(set(missing_corpus_words) - set(missing_common_words))\n\n\n# Print all words that are in the corpus, but are not included in the missing_common_words list\nf = open(\"nz_dict/compiled/missing_corpus_words.txt\", \"w\")\nfor word in sorted(missing_corpus_words):\n f.write(word + '\\n')\n\nf.close()\ndel f\n","sub_path":"python/get_missing_words.py","file_name":"get_missing_words.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"520779655","text":"import time\nimport mysql.connector as mysql\nimport RPi.GPIO as gpio\nimport os\n\nold = 0.0\n\ncnx = mysql.connect(user = 'root', password = 'citofono98', host = '127.0.0.1', database = 'cantina')\n\ngpio.setmode(gpio.BCM)\ngpio.setwarnings(False)\ngpio.setup(18,gpio.OUT) #Ventola\n\ncursor = cnx.cursor()\n\nwhile True:\n\n\ttemp = os.popen(\"vcgencmd measure_temp\").readline() #Lancio un comando alla shell per prendere il valore della Temp della CPU e lo salvo nella variabile\n\ttemp = temp.replace(\"temp=\",\"\")\n\ttemp = temp.replace(\"'C\",\"\")\n\tcursor.execute(\"update temp set value = %(temp)s where id = 1\", {\"temp\":temp})\n\t\n\n\tcursor.execute(\"select en from fan where id=1;\")\n\tforce = list(cursor.fetchone())\n\n\tif force[0] == 1:\n\t\tgpio.output(18,gpio.LOW)\n\telif force[0] == 0:\n\t\tgpio.output(18,gpio.HIGH)\n\telif force[0] == 2:\n\t\ttime.sleep(380)\n\t\tif float(temp) >= 3.5:\n\t\t\tgpio.output(18,gpio.LOW)\n\t\telif float(temp) < 3.5:\n\t\t\tgpio.output(18,gpio.HIGH)\n","sub_path":"python/ventola.py","file_name":"ventola.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190532069","text":"wordsHandle = open(\"words.txt\")\nwordsDict = dict()\ncontador = 0\nfor wlines in wordsHandle:\n wordsKeys = wlines.split()\n for words in wordsKeys:\n if words in wordsDict:\n continue\n wordsDict[words] = str(contador)\n contador = contador + 1\nprint(wordsDict, type(wordsDict))","sub_path":"diccionario_word.py","file_name":"diccionario_word.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231708799","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport requests, os, json\nfrom django.shortcuts import render\nfrom django.http import HttpResponse,JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom smartapi.models import ProblemRecord\nfrom smartapi.serializers import SmartAPISerializer\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# Create your views here.\n@csrf_exempt\ndef snippet_list(request):\n \"\"\"\n 列出所有已经存在的snippet或者创建一个新的snippet\n \"\"\"\n if request.method == 'GET':\n snippets = ProblemRecord.objects.all()\n serializer = SmartAPISerializer(snippets, many=True)\n #return JsonResponse(serializer, safe=False, content_type='application/json; charset=utf-8')\n DataResponse=HttpResponse(json.dumps(serializer.data, ensure_ascii=False), content_type='application/json; charset=utf-8')\n return DataResponse\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = SmartAPISerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef snippet_detail(request, pk):\n \"\"\"\n 检索查看、更新或者删除一个代码段\n \"\"\"\n try:\n snippet = ProblemRecord.objects.get(pk=pk)\n except ProblemRecord.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = SmartAPISerializer(snippet)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = SmartAPISerializer(snippet, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n ProblemRecord.delete()\n return HttpResponse(status=204)\n","sub_path":"devops/smartapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633541843","text":"# -*- coding: utf-8 -*-\n\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport re\n\nimport m10s_util as ut\n\"\"\"↑様々な便利コマンド詰め合わせ\nut.ondevicon(Member)\n オンライン状況に基づくデバイスアイコンテキストを返す。\nut.getEmbed(title,description,color,(name,value)...)\n Embedのお手軽生成。これ使ったのがあるから消そうにも消せない。\nawait ut.opendm(Member/User)\n DMチャンネルを返します。DMチャンネルが存在しないなんてことでは困らせません。\nawait wait_message_return(ctx,質問するテ���スト,←の送信先,待つ時間):\n 入力待ちの簡略化。タイムアウトの例外キャッチを忘れずに\nut.get_vmusic(bot,member)\n 思惟奈ちゃんの音楽再生機能でそのメンバーがきいている曲を返します。\n\"\"\"\n\n\nclass m10s_role_panel(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.e_check = self.bot.get_emoji(653161518103265291)\n\n @commands.command(name=\"paneledit\")\n @commands.has_permissions(administrator=True)\n @commands.bot_has_permissions(manage_messages=True,manage_roles=True)\n async def _setting(self, ctx, mid=None, *reactions):\n if mid is None:\n m = await ctx.send(\"> パネル発行の確認\\nこのチャンネルにパネルを発行してもよろしいですか?\")\n await m.add_reaction(self.e_check)\n try:\n r, u = await self.bot.wait_for(\"reaction_add\", check=lambda r, u: r.message.id == m.id and u.id == ctx.author.id)\n except asyncio.TimeoutError:\n await m.edit(\"> パネルは発行されていません!\\n時間内に応答がなかったため、作成はキャンセルされました。\")\n else:\n if r.emoji == self.e_check:\n pd={}\n pm = await ctx.send(embed=discord.Embed())\n await pm.edit(embed=ut.getEmbed(\"思惟奈ちゃん役職パネル\",f\"made by {ctx.author.mention}\",self.bot.ec,f\"内容の変更は`s-paneledit {pm.id} (絵文字と役職idをスペース区切りで繰り返す)`\"))\n self.bot.cursor.execute(\"INSERT INTO role_panels(id,roles) VALUES(?,?)\", (pm.id,pd))\n else:\n await m.edit(\"> パネルは発行されていません!\\n作成はキャンセルされました。\")\n else:\n try:\n mid = int(mid)\n except:\n await ctx.send(\"> メッセージIDが数字ではありません!\")\n return\n if self.bot.cursor.execute(\"select * from role_panels where id = ?\",(mid,)).fetchone():\n await ctx.send(\"該当のパネルを上書きます。\")\n emotes = reactions[::2]\n rids = reactions[1::2]\n pd={}\n for i in range(len(emotes)):\n pd[emotes[i]] = rids[i]\n rt = []\n for i in reactions:\n try:\n i = int(i)\n except:\n rt.append(str(i))\n else:\n try:\n rt.append(ctx.guild.get_role(i).mention)\n except:\n await ctx.send(\"パネルの上書きに失敗しました。\")\n return\n try:\n msg = await ctx.channel.fetch_message(mid)\n except:\n await ctx.send(\"> パネルが見つかりません\\nパネルのあるチャンネルで更新を行ってください。削除されている場合は再発行してください。\")\n else:\n for i in emotes:\n try:\n await msg.add_reaction(i)\n except Exception as e:\n try:\n eid = re.match(\n \"<:[a-zA-Z0-9_-]+:([0-9]+)>\", i).group(1)\n ej = self.bot.get_emoji(int(eid))\n await msg.add_reaction(ej)\n except:\n await ctx.send(\"付与できていないリアクションがあります。該当の役職はリアクションでの付与ができません。\")\n await msg.edit(embed=ut.getEmbed(\"思惟奈ちゃん役職パネル\",f\"made by {ctx.author.mention}\",self.bot.ec,*rt))\n self.bot.cursor.execute(\"UPDATE role_panels SET roles = ? WHERE id = ?\", (pd, mid))\n else:\n await ctx.send(\"> パネルが見つかりません。\\nパネルではないIDが指定されています。\")\n\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self,pr):\n self.bot.cursor.execute(\"select roles from role_panels where id = ?\",(pr.message_id,))\n rs = self.bot.cursor.fetchone()\n if rs:\n rid = rs[\"roles\"].get(str(pr.emoji),None)\n if rid:\n g = self.bot.get_guild(pr.guild_id)\n ch = g.get_channel(pr.channel_id)\n m = await ch.fetch_message(pr.message_id)\n rl = g.get_role(int(rid))\n member = g.get_member(pr.user_id)\n await m.remove_reaction(pr.emoji,member)\n try:\n if int(rid) in [i.id for i in member.roles]:\n await member.remove_roles(rl)\n await ch.send(\"> 役職を除去しました!\",delete_after=5)\n else:\n await member.add_roles(rl)\n await ch.send(\"> 役職を付与しました!\",delete_after=5)\n except:\n await ch.send(\"> 役職の付与に失敗しました\",delete_after=5)\n \n\n\ndef setup(bot):\n bot.add_cog(m10s_role_panel(bot))\n","sub_path":"cogs/m10s_role_panel.py","file_name":"m10s_role_panel.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348344314","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n# n角形において内角は{180*(n-2)}/n\n\nfrom turtle import *\nfrom random import randint\nimport time\nstart_time = time.time()\n\nspeed(0)\nsetup( width = 1280, height = 720, startx = None, starty = None)\nbgcolor(\"black\")\nsetheading(0)\n\ndef mkfig(argu, leng):\n\tfor i in range(n):\n\t\tforward(leng)\n\t\tleft(180 - argu)\n\n\n\n# ---------------------------------------------------------------------------\n\nn = 6\narg = 120\nlen = 40\n\ncolor('blue')\n# begin_fill()\nfor k in range(96):\n\n mkfig(arg, len)\n setheading(0 + 15*(k+1))\n len = len * 1.03\n# end_fill()\npenup()\nsetposition(-10000,0)\n\nfin_time = time.time() - start_time\nprint(fin_time)\n\ninput('type to exit')","sub_path":"shapes5.py","file_name":"shapes5.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554934694","text":"\"\"\"empty message\n\nRevision ID: 967600ee8bbf\nRevises: afd82ca5d0da\nCreate Date: 2018-06-06 12:11:27.695705\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '967600ee8bbf'\ndown_revision = 'afd82ca5d0da'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('parser', sa.Column('last_movie_update', sa.DateTime(), nullable=True))\n op.add_column('parser', sa.Column('last_tv_update', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('parser', 'last_tv_update')\n op.drop_column('parser', 'last_movie_update')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/967600ee8bbf_.py","file_name":"967600ee8bbf_.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"41702776","text":"#!/sur/local/bin python3\n\n# -*- coding: utf-8 -*-\n\n# 要表现得像list那样按照下标取出元素,需要实现__getitem__()方法:\n\nclass Fib(object):\n\t\n\t# 不支持切片去下标值\n\tdef __getitem__(self, n):\n\t\ta, b = 1, 1\n\t\tfor x in range(n):\n\t\t\ta, b, = b, a + b\n\t\treturn a\n\t\t\n\t\t\nf = Fib()\nprint(f[0])\nprint(f[1])\nprint(f[2])\nprint(f[3])\nprint(f[4])\n\nprint(list(range(100))[5:10])\n\nprint('--------- 支持切片的类 ----------')\n\nclass Fib2(object):\n\t\n\tdef __getitem__(self, n):\n\t\tif isinstance(n, int):\t\t# n 是索引\n\t\t\ta, b = 1, 1\n\t\t\tfor x in range(n):\n\t\t\t\ta, b = b, a + b\n\t\t\treturn a\n\n\t\tif isinstance(n, slice):\t# n 是切片\n\t\t\tstart = n.start\n\t\t\tstop = n.stop\n\t\t\tif start is None:\n\t\t\t\tstart = 0\n\t\t\ta, b = 1, 1\n\t\t\tL = []\n\t\t\tfor x in range(stop):\n\t\t\t\tif x >= start:\n\t\t\t\t\tL.append(a)\n\t\t\t\ta, b = b, a + b \n\t\t\treturn L\n\n\n\n# 试试切片\nf2 = Fib2()\nprint(f2[0:5])\n\nprint(f2[:10:2])\n\n\n\n\n\n\t\t\t\n\t\t\n\t\t","sub_path":"Class/special_getitem.py","file_name":"special_getitem.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"197023493","text":"# -*- coding: utf-8 -*-\n\n# 作者: Gael Varoquaux \n# 协议: BSD 3 clause\n# Python标准科学计算包导入\nimport matplotlib.pyplot as plt\n# 导入数据集,分类器和评估度量\nfrom sklearn import datasets, svm, metrics\n# 数字数据集\ndigits = datasets.load_digits()\n#数据是一个8x8的数字图像,让我们先看看开头的三张图像.图像存储在数据集\n#的`images`属性中,如果我们要加载图像文件的话,可以使用pylab.imread.\n#注意每一张图像尺寸必须相等.这些图像各自对应的数字是多少我们是知道的\n#他们存储在数据集的target属性中.\nimages_and_labels = list(zip(digits.images, digits.target))\nfor index, (image, label) in enumerate(images_and_labels[:4]):\n plt.subplot(2, 4, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Training: %i' % label)\n# 在数据上应用一个分类器, 我们需要铺平图像,\n# 将数据转换成二位矩��:\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1))\n# 创建一个分类器: 一个支持向量分类器\nclassifier = svm.SVC(gamma=0.001)\n# 我们在前半部分数据上进行学习\nclassifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])\n# 现在预测后半部分的值:\nexpected = digits.target[n_samples / 2:]\npredicted = classifier.predict(data[n_samples / 2:])\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\nimages_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))\nfor index, (image, prediction) in enumerate(images_and_predictions[:4]):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i,Expected: %i' % (prediction,expected[index]))\n plt.show()","sub_path":"plot_digits_classification.py","file_name":"plot_digits_classification.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"135876354","text":"# https://stackoverflow.com/questions/2627002/whats-the-pythonic-way-to-use-getters-and-setters\n\nclass C(object):\n def __init__(self):\n self._file_name = None\n# self._file_path = None\n\n @property\n def file_name( self ):\n \"\"\"I'm the file_name property.\"\"\"\n print( \"getter of file_name called: self._file_name = \", self._file_name )\n return self._file_name\n # def file_path( self ):\n # \"\"\"I'm the file_path property.\"\"\"\n # print( \"getter of file_path called: self._file_path = \", self._file_path )\n # return self._file_path\n\n @file_name.setter\n def file_name( self, value ):\n print(\"setter of _file_name called: self._file_name = \", value )\n self._file_name = value\n\n @file_name.deleter\n def file_name( self ):\n print( \"deleter of x called\" )\n del self._file_name\n print( \"\\nattempting to print self._file_name: disaster entails\" )\n print( \"self._file_name = \", self._file_name )\n\nc = C()\nc.file_name = \"file.rst\" # setter called\nfoo = c.file_name # getter called\nprint( \"foo = \", foo )\ndel c.file_name # deleter called\n\n# l127914@pn1249300.lanl.gov:class $ python objects-python-A.py\n# setter of x called: self._file_name = foo\n# getter of x called: self._file_name = foo\n# deleter of x called\n#\n# attempting to print self._file_name: disaster entails\n# Traceback (most recent call last):\n# File \"objects-python-A.py\", line 28, in \n# del c.x # deleter called\n# File \"objects-python-A.py\", line 23, in x\n# print( \"self._file_name = \", self._file_name )\n# AttributeError: 'C' object has no attribute '_file_name'\n\n# l127914@pn1249300.lanl.gov:class $ date\n# Wed Nov 21 10:25:02 MST 2018\n\n# l127914@pn1249300.lanl.gov:class $ pwd\n# /Volumes/Tlaltecuhtli/repos/GitHub/topa-development/python/class\n","sub_path":"python/class/rubic.py","file_name":"rubic.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"59640768","text":"class OsebaDna():\n\n def __init__(self, ime, barva_las, oblika_obraza, barva_oci, spol, rasa):\n self.ime = ime\n self.barva_las = barva_las\n self.oblika_obraza = oblika_obraza\n self.barva_oci = barva_oci\n self.spol = spol\n self.rasa = rasa\n\n def preveri_dna(self, dna):\n if self.barva_las in dna and\\\n self.oblika_obraza in dna and\\\n self.barva_oci in dna and\\\n self.spol in dna and\\\n self.rasa in dna:\n return True\n else:\n return False\n\nclass GlavnaMesta():\n\n def __init__(self, drzava, gl_mesto, url_slike):\n self.drzava = drzava\n self.gl_mesto = gl_mesto\n self.url_slike = url_slike\n","sub_path":"NarediObjekt.py","file_name":"NarediObjekt.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"572110478","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 14 20:56:58 2018\n\n@author: justinwu\n\"\"\"\n\n#! python3\n#將本目錄的所有圖片都加上PNG.png圖片\n\n\nimport os\nfrom PIL import Image\nS_SIZE = 1800\nLOGO_F= 'PNG.png'\nlogoIm = Image.open(LOGO_F)\nlogoWidth, logoHeight = logoIm.size\n#新增image目錄\nos.makedirs('image_a', exist_ok=True)\n#搜尋目前目錄的所有檔案#選取所有png檔和jpg檔或檔名為LOGO_F\nfor filename in os.listdir('.'):\n if not (filename.endswith('.png') or filename.endswith('.jpg')) or filename == LOGO_F:\n continue \n im = Image.open(filename)#打開LOGO_F檔案\n width, height = im.size\n #檢查影像���小是否要被修正\n if width > S_SIZE and height > S_SIZE:\n if width > height:\n height = int((S_SIZE / width) * height)\n width = S_SIZE\n else:\n width = int((S_SIZE / height) * width)\n height = S_SIZE\n # 更新圖片大小\n print('更新圖片大小 %s...' % (filename))\n im = im.resize((width, height))\n # 將圖片加入LOGO_F.\n print('將圖片加入LOGO_F %s...' % (filename))\n im.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\n # 儲存圖片.\n im.save(os.path.join('image_a', filename))\n","sub_path":"pillow_i.py","file_name":"pillow_i.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"429121662","text":"from aiohttp import web\nimport json\n\n\nasync def handle(context):\n data = {}\n article_list = {}\n for an_article in context['article_list']:\n article_list.update({an_article.id: {\n 'title': an_article.title, 'body': an_article.body,\n 'submitted_timestamp': an_article.submitted_timestamp,\n 'updated_timestamp': an_article.updated_timestamp}})\n data.update({'article_list': article_list})\n return web.json_response(data)\n","sub_path":"api_views/index_view.py","file_name":"index_view.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"611109223","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom bulk_mover.move_db.move_provider import MoveProvider\nfrom bulk_mover.move_db.PathProvider import PathProvider\nfrom bulk_mover.move_db.MoverDBs import ProjectID, StoPDB, PtoADB, PtoAFiles, PtoAError\nfrom bulk_mover.bulk_p2a import PMoverBase\nfrom bulk_mover.mover_classes import PathMunger\nfrom tqdm import tqdm\n\n\nclass FileCompare:\n\n def __init__(self, s_loc: Path, d_loc: Path) -> None:\n self._s_loc = s_loc # type: Path\n self._d_loc = d_loc\n self.s_files = []\n self._not_copied = []\n\n def compare(self):\n print(\"Comparing paths.\")\n for root, dirs, files in os.walk(self._s_loc):\n for f in files:\n p = Path(f).stem\n self.s_files.append(p)\n\n for root, dirs, files in os.walk(self._d_loc):\n for f in files:\n p = Path(f).stem\n if p in self.s_files:\n self.s_files.remove(p)\n\n\nclass SqlPtoA(PMoverBase):\n def __init__(self, mp: MoveProvider) -> None:\n self.mp = mp\n self._current_items = [] # type: [StoPDB]\n self._current_item = None # type: StoPDB\n self._current_ptadb = None # type: PtoADB\n self._current_pta_item = None # type: PtoAFiles\n\n def _write_success(self, pm: PathMunger):\n self._current_pta_item.a_file_name = pm.dest_file_path\n self._current_pta_item.a_file_size = os.path.getsize(pm.dest_file_path)\n self._current_pta_item.date_completed = datetime.now()\n self._current_pta_item.completed = True\n self._current_pta_item.save()\n\n def _write_fail(self, pm: PathMunger):\n pte, created = PtoAError.get_or_create(fk=self._current_pta_item, error_msg=pm.get_error())\n if created:\n pte.save()\n\n def move(self):\n for pp in self.get_path_provider(): # type: PathProvider\n pm = PathMunger.PathMunger(str(Path(\"P:\\\\\", pp.item.p_root)), \"A:\")\n pbar = tqdm(total=pp.get_count(), ascii=True, desc=\"Converting: {}\".format(pm.get_source_bag()))\n for ptaf in pp.ptaf_items:\n self._current_pta_item = ptaf\n pi = Path(ptaf.p_file_name)\n pm.set_current_targets(str(pi.parent), pi.parts[-1])\n pm.create_dest_path()\n if self._is_a_blank_path(pm.source_base):\n if self._handle_restricted(pm):\n self._write_success(pm)\n else:\n self._write_fail(pm)\n pbar.update(1)\n continue\n\n ext = pi.suffix\n if len(ext) < 4 or len(ext) > 5: #Checking the length of the the file extension. If extension is not 4 characters then complete transfer because we won't be able to tell what the filetype is.\n # Extension will not be determinable go ahead and copy\n if pm.do_a_copy():\n self._write_success(pm)\n pbar.update(1)\n continue\n\n if pm.is_pass_through():\n if pm.is_dest_there():\n self._write_success(pm)\n pbar.update(1)\n continue\n if pm.do_a_copy():\n self._write_success(pm)\n pbar.update(1)\n continue\n self._write_fail(pm)\n else:\n if self._handle_conversion(pm):\n self._write_success(pm)\n else:\n self._write_fail(pm)\n pbar.update(1)\n pbar.close()\n pp.close_item()\n\n def get_path_provider(self) -> [PathProvider]:\n self._current_items = self.mp.set_unfinished_items(self.mp.ATOP)\n for self._current_item in self._current_items:\n # is there PToAdb for this item?\n pp = PathProvider(self._current_item)\n # There is now. Are there files for this entry.\n pp.get_file_entries()\n #pp.add_extra()\n if len(pp.ptaf_items) == 0:\n continue\n # it does now\n yield pp\n\n def has_item_been_converted(self) -> bool:\n print(\"Checking for an existing path:\\t{}\".format(self._current_item.p_root))\n sp = Path(\"P:\\\\\") / self._current_item.p_root / 'data'\n dp = Path(\"A:\\\\\") / self._current_item.p_root\n spf = 0\n dpf = 0\n for root, dirs, files in os.walk(sp):\n spf += len(files)\n\n for root, dirs, files in os.walk(dp):\n dpf += len(files)\n\n if spf == dpf:\n return True\n return False\n\n\ndef new_file_chooser() -> MoveProvider:\n mp = MoveProvider()\n op = mp.set_open_projects(mp.ATOP)\n for i in op: #type: ProjectID\n print(\"{}) {}\".format(i.id, i.project_file))\n val = input(\"Select a project: \")\n mp.set_active_project(int(val))\n return mp\n\n\nif __name__ == '__main__':\n mp = new_file_chooser()\n sqlmvr = SqlPtoA(mp)\n sqlmvr.move()\n if mp.ptoa_complete():\n mp.close_ptoa()\n print()","sub_path":"src/bulk_mover/sql_ptoa.py","file_name":"sql_ptoa.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"295907202","text":"#coding=utf-8\r\n__author__ = 'lzp'\r\n\r\nimport time\r\n\r\nLOG_TYPE_ERROR = 1\r\nLOG_TYPE_WARN = 2\r\nLOG_TYPE_INFO = 3\r\nLOG_TYPE_DEBUG = 4\r\nLOG_TYPE_DETAIL = 5\r\n\r\n\r\n\r\n\r\nclass MyLog:\r\n\r\n filename = \"\"\r\n title = \"\"\r\n LogFp = \"\" # 文件句柄\r\n\r\n# 初始化日志文件\r\n# filename:日志文件名; title:标题\r\n def __init__(self,filename,title):\r\n self.filename = filename\r\n self.title = title\r\n self.LogFp = open(self.filename, \"a\")\r\n if title is not None:\r\n structTm = time.localtime()\r\n timeStr = time.strftime(\"%Y-%m-%d %H:%M:%S\", structTm)\r\n valus = timeStr + \": \" + str(title) + \"\\n\"\r\n self.LogFp.writelines(valus)\r\n self.LogFp.flush()\r\n\r\n def __del__(self):\r\n self.LogFp.close()\r\n\r\n def WriteLog(self, value):\r\n if value is not None:\r\n structTm = time.localtime()\r\n timeStr = time.strftime(\"%Y-%m-%d %H:%M:%S\", structTm)\r\n values = timeStr + \": \" + str(value) + \"\\n\"\r\n self.LogFp.writelines(values)\r\n self.LogFp.flush()\r\n\r\n\r\n\r\n\r\ndef Main():\r\n log = MyLog(\"sa.log\", \"测试日志\")\r\n log.WriteLog(\"2131212121\")\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n Main()\r\n","sub_path":"companyTool/WatchDog/MyLog.py","file_name":"MyLog.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"95782291","text":"#Ex 3\nimport random as rd\nquantidade_lista=int(input('Digite o número de itens que a lista deve ter: '))\nlista=[]\nlinha=1\nif quantidade_lista<=0:\n print('O número de itens da lista não pode ser igual ou menor que 0')\nelse:\n while linha<= quantidade_lista:\n lista.append(rd.randrange(0,100))\n linha+=1\n print('Essa é a lista',lista)\n print('Maior número: ',max(lista))\n print('Menor número: ',min(lista))\n\n","sub_path":"aula3_ex3.py","file_name":"aula3_ex3.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574270799","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2005-2013 Mag. Christian Tanzer. All rights reserved\n# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at\n# ****************************************************************************\n#\n# This module is licensed under the terms of the BSD 3-Clause License\n# .\n# ****************************************************************************\n#\n#++\n# Name\n# Apply_All\n#\n# Purpose\n# Class transparently applying method calls to a set of objects\n#\n# Revision Dates\n# 20-Feb-2005 (CT) Creation\n# ««revision-date»»···\n#--\n\nfrom _TFL import TFL\nimport _TFL._Meta.Object\n\nclass Apply_All (TFL.Meta.Object) :\n \"\"\"Class transparently applying method calls to a set of objects.\n\n >>> l1 = list (range (5))\n >>> l2 = [\"f\", \"b\", \"c\", \"a\"]\n >>> all = Apply_All (l1, l2)\n >>> all._receivers\n ([0, 1, 2, 3, 4], ['f', 'b', 'c', 'a'])\n >>> all.sort ()\n >>> all._receivers\n ([0, 1, 2, 3, 4], ['a', 'b', 'c', 'f'])\n >>> all.count (\"a\")\n [0, 1]\n >>> all.reverse ()\n >>> all._receivers\n ([4, 3, 2, 1, 0], ['f', 'c', 'b', 'a'])\n >>> all.pop ()\n [0, 'a']\n >>> all._receivers\n ([4, 3, 2, 1], ['f', 'c', 'b'])\n \"\"\"\n\n def __init__ (self, * receivers) :\n self._receivers = receivers\n # end def __init__\n\n def _apply (self, name, * args, ** kw) :\n result = []\n for r in self._receivers :\n f = getattr (r, name)\n r = f (* args, ** kw)\n if r is not None :\n result.append (r)\n return result or None\n # end def _apply\n\n def __getattr__ (self, name) :\n return lambda * args, ** kw : self._apply (name, * args, ** kw)\n # end def __getattr__\n\n# end class Apply_All\n\nif __name__ != \"__main__\" :\n TFL._Export (\"*\")\n### __END__ Apply_All\n","sub_path":"Functions/venv/lib/python3.6/site-packages/_TFL/Apply_All.py","file_name":"Apply_All.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9081526","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nimport _pickle as pickle\nfrom sklearn import metrics\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nvectorizer = TfidfVectorizer(stop_words='english',\n lowercase=True,\n min_df=2,\n ngram_range=(1, 3))\ntitles = {\n \"0\":[],\n \"1\":[]\n}\nwith open ('Datasets/ClickbaitDataset.txt',encoding='utf8') as f:\n titles[\"1\"] = f.read().splitlines()\nwith open ('Datasets/NonClickbaitDataset.txt',encoding='utf8') as f:\n titles[\"0\"] = f.read().splitlines()\ntraininglabels = [0]*len(titles[\"0\"]) + [1]*len(titles[\"1\"])\ntrainingset = titles[\"0\"] + titles[\"1\"]\nprint(str(len(traininglabels)))\ntrainingset = vectorizer.fit_transform(trainingset, traininglabels)\nprint (str(trainingset.shape))\nparams = {'kernel': 'rbf', 'C': 2, 'gamma': 1}\nprint(\"\\nTraining model\")\nclf = svm.SVC(C=params['C'], kernel=params['kernel'], gamma=params['gamma'], probability=True)\nprint(\"Fitting Model\")\nclf.fit(trainingset, traininglabels)\nwith open(\"vectorizer\", 'wb') as f:\n pickle.dump(vectorizer, f)\nwith open(\"trainedmodel\", 'wb') as f:\n pickle.dump(clf, f)\n\n","sub_path":"azure-files/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387357410","text":"#\n# preprocessing for GT data\n# input ~ sd_GT_data.csv\n# output ~ sd_GT_processed.csv\n# Fox 2020\n\nimport numpy as np\nimport pandas as pd\nfrom fractions import Fraction\nimport pickle as pkl\n\nimport argparse\nparser = argparse.ArgumentParser()\n\nparser.add_argument('input_filename')\nparser.add_argument('output_filename')\n\nargs = parser.parse_args()\n\nfn_in = args.input_filename\nfn_out = args.output_filename\n#fn_in = 'sd_GT_data.csv'\n#fn_out = 'sd_GT_data.pkl'\n\ndf = pd.read_csv(fn_in,header=1)\n\n# get rid of empty last column\n#df = df.drop(df.columns[[-1]], 1)\n\n# drop any columns with NaNs (should only drop empty columns from conversion of excel format to csv)\ndf = df.dropna(axis=1, how='all')\n\ndf['twoJi'] = df['Ji'].apply(lambda x : int(2*Fraction(x)))\ndf['twoJf'] = df['Jf'].apply(lambda x : int(2*Fraction(x)))\n\n#df = df.drop('B',axis=1) # B is Bexp without accounting for intensity, get rid of it\n\ndf['Bth'] = 0. # add column for theory values\n\n#df['Tmirror'] = np.logical_and(df['Zi']==df['Nf'], df['Zf']==df['Ni'])\ndf['Tmirror'] = (df['Zi']==df['Nf']) & (df['Zf']==df['Ni']) # note here & does element-wise logic\n\ndf['deltaJ'] = 0.5*(df['twoJf']-df['twoJi'])\n\ndf = df[(df['twoJi']!=0) | (df['deltaJ']!=0.0)] #remove 100% Fermi transitions\n\nfor edge in [8,20]:\n df['include'].loc[df['Zi']==edge] = False\n df['include'].loc[df['Ni']==edge] = False\n df['include'].loc[df['Zf']==edge] = False\n df['include'].loc[df['Nf']==edge] = False\n\n#with open(fn_out,'wb') as fh:\n# pkl.dump(df,fh)\n\n\ndf.to_csv(fn_out,index=False)\n\n","sub_path":"shmuq_gt/calculations/preprocessing_gt.py","file_name":"preprocessing_gt.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509843024","text":"#!/usr/bin python3\n# -*- coding: utf-8 -*-\n\n# @Author: shangyameng\n# @Email: shangyameng@datagrand.com\n# @Date: 2020-07-22 11:11:38\n# @LastEditTime: 2020-09-02 14:21:36\n# @FilePath: /SuYan/flaskr/app/extensions/request_ocr/request_ocr.py\n\nimport requests\nimport json\nimport re\nimport os\nimport uuid\nfrom PIL import Image, ImageGrab\nimport copy\n\nfrom app.conf import logger\nfrom app.conf.api_config import orientation\nfrom app.extensions.opencv.open_cv import OpenCv\n\n\nclass RequestOcr(object):\n def __init__(self, save_file_path):\n self.headers = {\n \"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36\"\n }\n self.ocr_url = \"http://ysocr.datagrand.cn/ysocr/ocr\"\n self.ocr_file = \"http://ysocr.datagrand.cn/file/\"\n self.bizlicense = \"http://ysocr.datagrand.cn/ysocr/bizlicense_extract\"\n self.uncompress_path = save_file_path\n self.save_path = os.path.join(self.uncompress_path, \"ocr_split_files\")\n self.orientation = orientation\n self.current_file = \"\"\n self.cv = OpenCv()\n self.logger = logger\n self.special_extract_info = {}\n self.check_uncompress_path()\n\n def check_uncompress_path(self):\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n\n def _requests_processor(self,\n url,\n method=\"GET\",\n key_id=\"\",\n params=None,\n file=None):\n \"\"\"\n 封装的请求方法\n Args:\n url:\n method:\n key_id:\n params:\n file:\n\n Returns:\n\n \"\"\"\n try:\n url = f\"{url}{key_id}\"\n if method == \"GET\":\n send_review_request = requests.get(url, headers=self.headers)\n else:\n send_review_request = {\n 'PUT': requests.put,\n 'POST': requests.post,\n 'DELETE': requests.delete,\n }[method](url, headers=self.headers, data=params, files=file)\n if send_review_request.status_code == 401:\n return self._requests_processor(url=url,\n method=method,\n key_id=key_id,\n params=params,\n file=file)\n else:\n result = send_review_request.content if send_review_request else None\n return result\n except Exception as e:\n self.logger.error(\"error:{}\".format(e))\n return None\n\n def create_url_by_ocr(self, file_abs_path):\n \"\"\"\n 创建ocr识别请求\n Args:\n file_abs_path: 请求需要的文件\n\n Returns:\n\n \"\"\"\n try:\n self.logger.info(\"start request OCR API\")\n file = {\"file\": open(file_abs_path, 'rb')}\n data = {\"caller_request_id\": uuid.uuid4()}\n result = self._requests_processor(self.ocr_url,\n method=\"POST\",\n params=data,\n file=file)\n return json.loads(result.decode(\"utf-8\"))\n\n except Exception as e:\n self.logger.error(\"error:{}\".format(e))\n return None\n\n def find_extract_page(self, ocr_result: dict, orientation_dic: dict):\n \"\"\"\n 解析ocr识别出来的结果,通过关键字定位到需要拆分出来的页\n Args:\n ocr_result: OCR识别结果\n orientation_dic: 需要抽取出的页名字及关键字信息\n\n Returns:\n 所有需要抽取的页及本地保存路径\n \"\"\"\n self.logger.info(\"start dispose OCR recognition results\")\n page_save_path = \"\"\n need_extract_info = {}\n if orientation_dic and ocr_result and ocr_result[\"code\"] == 200:\n for page_info in ocr_result[\"img_data_list\"]:\n all_page_txt = ''.join([\n text_info[\"text_string\"]\n for text_info in page_info[\"text_info\"]\n ])[:100]\n for key in orientation_dic.keys():\n write_key = orientation_dic[key][0]\n black_key = orientation_dic[key][1]\n\n page_save_path = self.file_page_decision(\n key, all_page_txt, orientation_dic, ocr_result,\n write_key, black_key, page_info, need_extract_info)\n if page_save_path and key == \"leased_line\":\n break\n if page_save_path and \"leased_line\" in page_save_path:\n break\n return need_extract_info\n\n def file_page_decision(self, key, all_page_txt, orientation_dic,\n ocr_result, write_key, black_key, page_info,\n need_extract_info):\n \"\"\"\n 根据黑白名单判定当前页是不是需要抽取出来的\n Args:\n key:\n all_page_txt:\n orientation_dic:\n ocr_result:\n write_key:\n black_key:\n page_info:\n need_extract_info:\n\n Returns:\n\n \"\"\"\n if re.search(rf\"(?:{'|'.join(write_key)})\",\n re.sub(r\"[ ]\", \"\", all_page_txt)):\n if black_key and re.search(\n rf\"(?:{'|'.join(orientation_dic[key][1])})\",\n re.sub(r\"[ ]\", \"\", all_page_txt)):\n return None\n else:\n if key == \"leased_line\":\n image_name = ocr_result[\"out_pdf_name\"]\n page_number = \"\"\n else:\n image_name = page_info[\"detect_img_name\"]\n page_number_info = re.findall(r\"page_(\\d+)_detection\",\n image_name)\n page_number = page_number_info[\n 0] if page_number_info else None\n page_save_path = self.down_page_img(image_name, key,\n page_number)\n\n if key not in need_extract_info.keys():\n need_extract_info[key] = [page_save_path]\n else:\n need_extract_info[key].append(page_save_path)\n return page_save_path\n return None\n\n def down_page_img(self, img_name, rename, page_number):\n \"\"\"\n 下载并保存图片\n Args:\n img_name: 图片保存的路径\n rename: 保存文件是重命名\n page_number: 第几页\n\n Returns:\n 文件保存后路径\n \"\"\"\n url = self.ocr_file + img_name\n self.logger.info(f\"start download image: {url}\")\n\n result = self._requests_processor(url)\n if img_name.rsplit(\".\", 1)[-1] == \"pdf\":\n suffix = \".pdf\"\n else:\n suffix = \".jpeg\"\n image_save_path = self.save_path + \"/\" + self.current_file.rsplit(\n \".\", 1)[0] + \"_\" + rename + str(page_number) + suffix\n self.save_request_result(image_save_path, result, True)\n self.logger.info(f\" image is downloaded! >>> {image_save_path}\")\n return image_save_path\n\n # @staticmethod\n def save_request_result(self, img_save_path, result, img=False):\n \"\"\"\n 将接口请求的数据保存到本地\n Args:\n img_save_path: 保存的文件路径\n result: 接口获取到的数据\n img: 是否是图片\n\n Returns:\n None\n \"\"\"\n self.logger.info(\"start save request result\")\n if not img:\n res_type = \"w\"\n result = json.dumps(result, ensure_ascii=False)\n else:\n res_type = \"wb+\"\n with open(img_save_path, f\"{res_type}\") as f:\n f.write(result)\n\n @staticmethod\n def read_extract_result(json_path):\n with open(json_path, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n if result:\n return json.loads(result)\n return {}\n\n def request_bizlicense(self, all_file_info):\n \"\"\"\n 分类后的文件中请求获取营业执照的数据\n :param all_file_info:\n :return:\n \"\"\"\n bizlicense_result = []\n try:\n if all_file_info:\n all_bizlicense = all_file_info[\"营业执照\"]\n for file in all_bizlicense:\n result = self.create_url_by_ocr(file)\n if result[\"img_data_list\"]:\n res_info = result[\"img_data_list\"][0]\n extract = res_info[\n \"test_info\"] if res_info else res_info\n bizlicense_result.append(extract)\n all_file_info.update({\"营业执照\": bizlicense_result})\n except Exception as e:\n self.logger.exception(e)\n\n # @staticmethod\n def update_result(self, file_info, all_file_info):\n try:\n if file_info:\n for doctype in file_info.keys():\n doctype_files = file_info[doctype]\n if doctype not in all_file_info.keys():\n all_file_info[doctype] = doctype_files\n else:\n all_file_info[doctype].extend(doctype_files)\n except Exception as e:\n self.logger.exception(e)\n\n # @staticmethod\n def combine2pdf(self, all_file_info):\n \"\"\"\n\n :param folder_path: 图片地址\n :return:\n \"\"\"\n new_all_file_info = {}\n try:\n for doctype in all_file_info.keys():\n new_file_list = []\n for file in all_file_info[doctype]:\n if file.rsplit(\".\", 1)[-1] in [\"pdf\"]:\n continue\n if doctype == \"leased_line\":\n new_name = file\n else:\n output = Image.open(file)\n if output.mode != \"RGB\":\n output = output.convert(\"RGB\")\n new_name = file.rsplit(\".\", 1)[0] + \".pdf\"\n output.save(new_name)\n os.remove(file)\n new_file_list.append(new_name)\n new_all_file_info[doctype] = new_file_list\n return new_all_file_info\n except Exception as e:\n self.logger.exception(e)\n return all_file_info\n\n # @staticmethod\n def file_uniq(self, file_info):\n \"\"\"\n 重复文件过滤\n Args:\n file_info:\n\n Returns:\n\n \"\"\"\n try:\n new_file_info = {}\n for key in file_info.keys():\n file_result = file_info[key]\n new_file_info[key] = list(set(file_result))\n return new_file_info\n except Exception as e:\n self.logger.exception(e)\n return file_info\n\n def start(self):\n \"\"\"\n 开始一个图片解析请求\n Args:\n\n Returns:\n\n \"\"\"\n self.logger.info(\"start request_ocr\")\n all_file_info = {}\n for file in os.listdir(self.uncompress_path):\n if file.rsplit(\".\", 1)[-1] in [\"pdf\", \"jpeg\", \"jpg\", \"png\", \"PDF\"\n ] and not file.startswith(\".\"):\n self.logger.info(f\"start dispose file: {file}\")\n self.current_file = file\n file_abs_path = os.path.join(self.uncompress_path, file)\n result = self.create_url_by_ocr(file_abs_path)\n file_info = self.find_extract_page(result, self.orientation)\n self.update_result(file_info, all_file_info)\n all_file_info = self.file_uniq(all_file_info)\n all_file_info = self.combine2pdf(all_file_info)\n return all_file_info\n\n\nif __name__ == '__main__':\n # files_path = \"/Users/sarmn/DG/project/suyan/suyan/suyan_file_process/test/files/\"\n files_path = \"/Users/sarmn/DG/project/suyan/files/证件分类\"\n # save_path = \"/Users/sarmn/DG/project/suyan/suyan/suyan_file_process/test/save/\"\n # orientation = {\n # # 需要抽取定位的文件类型:[[定位的关键字段], [黑名单]]\n # \"营业执照\": [[\"营业执照\", \"统一社会信用代码\"], [\"登记通知书\", \"客户基本信息\", \"业务办理表\"]],\n # \"业务受理单\": [[\"业务受(?:理|埋)单\", \"申请业务信息\", \"客户单位付款账号信息\", \"云MAS业务登记表\"], []],\n # \"数据专线\": [[\"电路租用业务协议\", \"跨境专线A类业务协议\", \"电路租用业务服务协议\", \"数据专线业务登记表\", \"数据专线业务登记表\", \"业务办理表\"], []]\n # }\n ocr_obj = RequestOcr(files_path)\n res = ocr_obj.start()\n print(json.dumps(res, indent=4, ensure_ascii=False))\n","sub_path":"crawler/extensions/request_ocr/request_ocr.py","file_name":"request_ocr.py","file_ext":"py","file_size_in_byte":13262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83216414","text":"import argparse\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nsys.path.append('../')\nimport stddraw\nfrom matrixutils import multiplyVM\nimport numpy\n\nfrom ioutils import read_strings\nfrom strutils import words\nimport shelve\nfrom web_analysis import TRANS_MATR_KEY\nfrom numpy import cumsum\nfrom numpy.random import choice\n\n\ndef choose_page(probs):\n pages = list(range(len(probs)))\n return choice(pages, size=1, p=probs)[0]\n\ndef hitting_time(probs, startpage):\n count = 0\n while True:\n count += 1\n go_page = choice(range(len(probs)), size=1, p=probs)[0]\n if go_page == startpage:\n return count\n\ndef avg_hitting_time(probs,startpage, nb_trials):\n total = 0\n for i in range(nb_trials):\n total += hitting_time(probs, startpage)\n return total / nb_trials\n\ndef coverage_time(probs, startpage):\n covered = [False] * len(probs)\n count = 0\n while True:\n count += 1\n go_page = choice(range(len(probs)), size=1, p=probs)[0]\n covered[go_page] = True\n if all(covered):\n return count\n\ndef avg_coverage_time(probs,startpage, nb_trials):\n total = 0\n for i in range(nb_trials):\n total += coverage_time(probs, startpage)\n return total / nb_trials\n\n\n\n\n\nif __name__ == \"__main__\":\n d = shelve.open('transmatrfile')\n trns_matr= d[TRANS_MATR_KEY]\n d.close()\n # print(trns_matr)\n\n parser = argparse.ArgumentParser(description='Simulation of page visits to calculate page rank (using Markov)')\n parser.add_argument('nb_visits', type=int, help='number of page visits allowed for the user')\n parser.add_argument('--histogram', help='show histogram', action='store_true')\n parser.add_argument('--analyse-hitting-time', type=int, help='number of trials for analysing hitting time')\n parser.add_argument('--analyse-coverage-time', type=int, help='number of trials for analysing coverage time')\n args = parser.parse_args()\n nb_visits = args.nb_visits\n # print(row)\n visiting = 0\n count_visits = [0] * len(trns_matr[0])\n for i in range(nb_visits):\n probs_row=trns_matr[visiting]\n visiting = choose_page(probs_row)\n count_visits[visiting] += 1\n count_visits = [ x / nb_visits for x in count_visits]\n print(count_visits)\n\n ranks_vector=[0]*len(trns_matr[0])\n ranks_vector[0]=1\n # ranks_vector=[ranks_vector]\n # res = matrMult(ranks_vector, trns_matr)\n res = multiplyVM(ranks_vector, trns_matr)\n print(\"res\", res)\n for i in range(20):\n res = multiplyVM(res, trns_matr)\n print(\"res\", res)\n\n print('***')\n rank_and_page = [(val,i) for i, val in enumerate(res)]\n print(rank_and_page)\n print('***')\n print(sorted(rank_and_page, reverse=True))\n plt.bar(range(len(res)),res)\n plt.show()\n\n print('eigen')\n print(numpy.linalg.eig(trns_matr)[0])\n print(numpy.linalg.eig(trns_matr)[1])\n print(numpy.linalg.matrix_power(trns_matr,20))\n\n \n\n \n # res = trns_matr\n # for i in range(20):\n # res = matrMult(res, trns_matr)\n # print(res)\n\n # res = trns_matr\n # for i in range(4):\n # res = matrMult(res, res)\n # print(res)\n\n if args.histogram:\n stddraw.setXscale(0,len(count_visits))\n stddraw.setYscale(0,1)\n # for j, val in enumerate(count_visits):\n # stddraw.setPenColor(stddraw.RED)\n # stddraw.filledRectangle(j,0,1,val)\n # stddraw.show()\n plt.bar(range(len(count_visits)),count_visits)\n plt.show()\n if args.analyse_hitting_time:\n NB_TRIALS = args.analyse_hitting_time\n #Hitting time\n stat_hitting_time=[0]*len(trns_matr)\n for i, probs in enumerate(trns_matr):\n stat_hitting_time[i] = avg_hitting_time(probs, i, NB_TRIALS)\n print(stat_hitting_time)\n plt.plot(stat_hitting_time)\n plt.show()\n\n if args.analyse_coverage_time:\n NB_TRIALS = args.analyse_coverage_time\n #Coverage time\n stat_coverage_time=[0]*len(trns_matr)\n for i, probs in enumerate(trns_matr):\n stat_coverage_time[i] = avg_coverage_time(probs, i, NB_TRIALS)\n print(stat_coverage_time)\n\n\n","sub_path":"randomsurfer.py","file_name":"randomsurfer.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"226296336","text":"import os\nimport requests\nimport config\nfrom flask import Flask, request\nfrom aws_xray_sdk.core import xray_recorder, patch_all\nfrom aws_xray_sdk.ext.flask.middleware import XRayMiddleware\n\napp = Flask(__name__)\n\nxray_recorder.configure(\n context_missing='LOG_ERROR',\n service=config.XRAY_APP_NAME,\n)\npatch_all()\nXRayMiddleware(app, xray_recorder)\n\n@app.route('/ping')\ndef ping():\n return 'Pong'\n\n@app.route('/color')\ndef color():\n print(request.headers)\n response = requests.get(f'http://{config.COLOR_HOST}')\n return response.text\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=config.PORT, debug=config.DEBUG_MODE)","sub_path":"walkthroughs/howto-k8s-cross-cluster/feapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543961120","text":"\"\"\"Utilities for organizations.\"\"\"\n\nimport logging\n\nfrom readthedocs.core.utils import send_email\n\n\n# pylint: disable=invalid-name\nlog = logging.getLogger(__name__)\n\n\ndef send_team_invite_email(invite, request):\n \"\"\"Send an organization team invite email.\"\"\"\n log.info('Sending team invite for %s to %s', invite.team, invite.email)\n send_email(\n invite.email,\n subject='Join your team at Read the Docs',\n template='organizations/email/team_invite.txt',\n template_html='organizations/email/team_invite.html',\n context={\n 'invite_hash': invite.hash,\n 'sender_full_name': request.user.get_full_name(),\n 'sender_username': request.user.username,\n 'organization_name': invite.organization.name,\n },\n request=request,\n )\n\n\ndef send_team_add_email(team_member, request):\n \"\"\"Send an organization team add email.\"\"\"\n log.info(\n 'Sending team add notification for %s to %s',\n team_member.team,\n team_member.member.email,\n )\n send_email(\n team_member.member.email,\n subject='Join your team at Read the Docs',\n template='organizations/email/team_add.txt',\n template_html='organizations/email/team_add.html',\n context={\n 'sender_full_name': request.user.get_full_name(),\n 'sender_username': request.user.username,\n 'organization_name': team_member.team.organization.name,\n 'organization_slug': team_member.team.organization.slug,\n },\n request=request,\n )\n","sub_path":"readthedocs/organizations/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228179310","text":"import json\r\nimport random\r\nimport time\r\n\r\n# import json questions\r\nwith open(\"questions.json\") as json_file:\r\n data = json.load(json_file)\r\n q = data[\"questions\"]\r\n\r\nqlength = len(q)\r\nqlist = [0]\r\nnumber = 0\r\nwhile len(qlist) <= qlength - 1:\r\n number += 1\r\n qlist.append(number)\r\nquest3 = random.choice(qlist)\r\nqlist.remove(quest3)\r\nquest4 = random.choice(qlist)\r\nqlist.remove(quest4)\r\nquest5 = random.choice(qlist)\r\nqlist.remove(quest5)\r\n\r\n# question 1 + 2\r\nprint(\"Question 1. How old are you?\")\r\nprint(\"Enter age\")\r\nans1 = int(input())\r\nprint(\"Ok, What do you identify as?\")\r\nprint(\"['1. Man', '2. Woman', '3. Other']\")\r\nprint(\"Enter answer number\")\r\nans2 = int(input())\r\nif ans2 == 1 or ans2 == 2 or ans2 == 3:\r\n print(\"Ok, next question\")\r\nelse:\r\n print(\"Re-enter answer\")\r\n ans2 = int(input())\r\n# question 3\r\nprint(\"Question 3. \" + q[quest3][\"question\"])\r\nprint(q[quest3][\"options\"])\r\nprint(\"Enter answer number\")\r\nans3 = int(input())\r\nif ans3 == 1 or ans3 == 2 or ans3 == 3:\r\n print(\"Ok, next question\")\r\nelse:\r\n print(\"Re-enter answer\")\r\n ans3 = int(input())\r\n# question 4\r\nprint(\"Question 4. \" + q[quest4][\"question\"])\r\nprint(q[quest4][\"options\"])\r\nprint(\"Enter answer number\")\r\nans4 = int(input())\r\nif ans4 == 1 or ans4 == 2 or ans4 == 3:\r\n print(\"Ok, next question\")\r\nelse:\r\n print(\"Re-enter answer\")\r\n ans4 = int(input())\r\n# question 5\r\nprint(\"Question 5. \" + q[quest5][\"question\"])\r\nprint(q[quest5][\"options\"])\r\nprint(\"Enter answer number\")\r\nans5 = int(input())\r\nwhile ans5 == 1 or ans5 == 2 or ans5 == 3:\r\n print(\"Ok, your done calculating you total\")\r\nelse:\r\n print(\"Re-enter answer\")\r\n ans5 = int(input())\r\ntime.sleep(1)\r\nprint(\".\")\r\ntime.sleep(1)\r\nprint(\".\")\r\ntime.sleep(1)\r\nprint(\".\")\r\n\r\n# Scoring\r\ntotal = 0\r\ntotal += ans1 // 10\r\nif ans2 == 1:\r\n total += 3\r\nelse:\r\n total += 1\r\npoints = q[quest3][\"points\"]\r\ntotal += points[ans3 - 1]\r\npoints = q[quest4][\"points\"]\r\ntotal += points[ans4 - 1]\r\npoints = q[quest5][\"points\"]\r\ntotal += points[ans5 - 1]\r\nprint(\"you have a total of \" + str(total) + \" manliness points!\")\r\nend = input()\r\n","sub_path":"mantest.py","file_name":"mantest.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"356749979","text":"'An ordered dictionary with attribute-style access.'\n\nfrom collections import OrderedDict\n\n\nclass AttrDict(OrderedDict):\n '''\n AttrDict extends OrderedDict to provide attribute-style access.\n\n Items starting with __ or _OrderedDict__ can't be accessed as attributes.\n '''\n __exclude_keys__ = set()\n\n def __getattr__(self, name):\n 'Getting ad.x gets ad[\"x\"]'\n if (name.startswith('__') or name.startswith('_OrderedDict__')\n or name in self.__exclude_keys__):\n return super(AttrDict, self).__getattr__(name)\n else:\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n 'Setting ad.x sets ad[\"x\"]'\n if (name.startswith('__') or name.startswith('_OrderedDict__')\n or name in self.__exclude_keys__):\n return super(AttrDict, self).__setattr__(name, value)\n self[name] = value\n\n def __delattr__(self, name):\n 'Deleting ad.x deletes ad[\"x\"]'\n if (name.startswith('__') or name.startswith('_OrderedDict__')\n or name in self.__exclude_keys__):\n return super(AttrDict, self).__delattr__(name)\n del self[name]\n","sub_path":"orderedattrdict/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"425116694","text":"import requests\r\nfrom bs4 import BeautifulSoup as bs4\r\nfrom urllib.request import urlopen\r\nfrom datetime import datetime\r\nimport pandas as pd\r\n\r\n\r\ndef scrap_internshala(data_dict, df):\r\n for name, url in data_dict.items():\r\n\r\n uClient = urlopen(url)\r\n internshala_page = uClient.read()\r\n uClient.close()\r\n\r\n page_beautify = bs4(internshala_page, \"html.parser\")\r\n\r\n total_no_pages = page_beautify.find(\"span\", {\"id\":\"total_pages\"}).text\r\n\r\n try:\r\n for i in range(1, int(total_no_pages)+1):\r\n next_url = url+\"/page-\"+str(i)\r\n\r\n next_page_content = requests.get(next_url)\r\n beautify_nextPage = bs4(next_page_content.text, \"html.parser\")\r\n big_boxes = beautify_nextPage.find_all(\"div\", {\"class\":\"individual_internship\"})\r\n\r\n for box in big_boxes:\r\n try:\r\n now = datetime.now()\r\n date_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n except :\r\n date_time = \"2/11/2020\"\r\n\r\n try:\r\n profile = box.find(\"div\", {\"class\":\"profile\"}).a.text\r\n except:\r\n profile = \"Nothing\"\r\n\r\n try:\r\n company = box.find(\"div\", {\"class\":\"company_name\"}).a.text.strip().replace(\"\\n\", \"\")\r\n except :\r\n company = \"Nothing\"\r\n \r\n try:\r\n location = box.find(\"a\", {\"class\":\"location_link\"}).text\r\n except:\r\n location = \"Nothing\"\r\n\r\n try:\r\n start_date = box.find(\"span\", {\"class\":\"start_immediately_desktop\"}).text\r\n except:\r\n start_date = \"Nothing\"\r\n\r\n try:\r\n stipend = box.find(\"span\", {\"class\":\"stipend\"}).text\r\n except:\r\n stipend = \"Nothing\"\r\n\r\n try:\r\n duration_row = box.find_all(\"div\", {\"class\":\"other_detail_item\"})\r\n duration = duration_row[1].find(\"div\", {\"class\":\"item_body\"}).text.strip().replace(\"\\n\", \"\")\r\n except:\r\n duration = \"Nothing\"\r\n\r\n try:\r\n apply_by = box.find(\"div\", {\"class\":\"apply_by\"})\r\n apply_by_date = apply_by.find(\"div\", {\"class\":\"item_body\"}).text\r\n except:\r\n apply_by_date = \"Nothing\"\r\n\r\n try:\r\n offer = box.find(\"div\", {\"class\":\"label_container label_container_mobile\"}).text.strip().replace(\"\\n\", \"\")\r\n except :\r\n offer = \"Nothing\"\r\n\r\n\r\n \r\n myDict = {\r\n \"Name\":name,\r\n \"Date Time\":date_time,\r\n \"profile\":profile, \r\n \"company\":company,\r\n \"Location\":location,\r\n \"Start Date\":start_date,\r\n \"Stipend\":stipend,\r\n 'Duration':duration,\r\n 'Apply by Date':apply_by_date,\r\n \"Offer\":offer,\r\n }\r\n\r\n df = df.append(myDict, ignore_index=True)\r\n\r\n df.to_csv(f\"scrapped-dataset/{name}.csv\", index=False) \r\n except :\r\n print(\"Next\")\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n df = pd.DataFrame(columns=['Name', 'Date Time', 'profile', 'company', 'Location', 'Start Date', 'Stipend', 'Duration', 'Apply by Date', 'Offer'])\r\n data_dict = {\r\n \"INTERNATIONAL\": \"https://internshala.com/internships/international-internship\",\r\n \"CHENNAI\": \"https://internshala.com/internships/internship-in-chennai\",\r\n \"FRESHERS\":\"https://internshala.com/fresher-jobs\",\r\n \"WORKFROMHOME\": \"https://internshala.com/internships/work-from-home-jobs\",\r\n \"DELHI\": \"https://internshala.com/internships/internship-in-delhi%20ncr\",\r\n \"BANGALORE\": \"https://internshala.com/internships/internship-in-bangalore\",\r\n \"MUMBAI\": \"https://internshala.com/internships/internship-in-mumbai\",\r\n \"HYDERABAD\": \"https://internshala.com/internships/internship-in-hyderabad\",\r\n \"KOLKATA\": \"https://internshala.com/internships/internship-in-kolkata\",\r\n }\r\n\r\n scrap_internshala(data_dict, df)\r\n\r\n\r\n","sub_path":"internshala-without-mongoDB.py","file_name":"internshala-without-mongoDB.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624236959","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport os\nimport keras\nfrom keras import backend as K\nfrom keras.layers import Dense, Activation, Dropout, Flatten, Conv2D\nfrom keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input\nfrom keras.models import Model, Sequential\nimport numpy as np\nimport matplotlib\nimport pandas as pd\nimport cv2\nfrom keras.utils import to_categorical\nfrom keras.layers import Dense, GlobalAveragePooling2D\nimport tensorflow as tf\nfrom keras.callbacks import ModelCheckpoint\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.loss import CrossEntropy\nfrom cleverhans.train import train\nfrom cleverhans.utils import AccuracyReport\nfrom cleverhans.utils_keras import cnn_model\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom cleverhans.utils_tf import model_eval\nfrom cleverhans.utils_tf import model_argmax\nimport functools\nimport tensorflow as tf\nfrom cleverhans import initializers\nfrom cleverhans.model import Model\n#from cleverhans.picklable_model import MLP, Conv2D, ReLU, Flatten, Linear\nfrom cleverhans.picklable_model import Softmax\nimport math\nimport logging\nfrom tensorflow.python.platform import flags\nfrom cleverhans.dataset import MNIST\nfrom cleverhans.utils import AccuracyReport, set_log_level\nfrom cleverhans.augmentation import random_horizontal_flip, random_shift\nfrom cleverhans.dataset import CIFAR10\nfrom cleverhans.model_zoo.all_convolutional import ModelAllConvolutional\nfrom keras.models import load_model\n\nfrom pdb import set_trace as trace\n\nfrom shutil import copyfile\nimport imageio\n\nimport tensorflow as tf\nfrom tensorflow.contrib.layers.python.layers import batch_norm\n\nimport os, csv, keras, math, logging, functools, cv2, sys\n#from keras.applications.vgg19 import VGG19, preprocess_input\nfrom keras.preprocessing import image\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, GlobalAveragePooling2D, ZeroPadding2D, Convolution2D, MaxPooling2D\nimport numpy as np\nimport pandas as pd\nfrom keras.utils import to_categorical\n#from sklearn.preprocessing import OneHotEncoder\nimport tensorflow as tf\nfrom keras.callbacks import ModelCheckpoint\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.loss import CrossEntropy\nfrom cleverhans.train import train\nfrom cleverhans.utils import AccuracyReport, set_log_level\nfrom cleverhans.utils_keras import cnn_model\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom cleverhans.utils_tf import model_eval, model_argmax\nfrom cleverhans import initializers\nfrom cleverhans.model import Model\nfrom tensorflow.python.platform import flags\n#from cleverhans.model_zoo.all_convolutional import ModelAllConvolutional\n#from vgg import VGG16\n#from vgg19 import VGG19\nfrom keras.datasets import cifar10\n#from sklearn.utils import class_weight\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n\n\n\nclasses = ['nrbc', 'notawbc', 'giant platelet', 'platelet clump', 'basophil',\n 'neutrophil', 'eosinophil', 'lymphocyte', 'monocyte', 'ig', 'atypical-blast']\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n#os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"2\"\ndef toOneHot(a):\n b = np.zeros((a.shape[0], 11))\n for i in range(a.shape[0]):\n for j in range(11):\n if a[i] == classes[j]:\n b[i][j] = 1\n return b\n\ndef del_all_flags(FLAGS):\n flags_dict = FLAGS._flags()\n keys_list = [keys for keys in flags_dict]\n for keys in keys_list:\n FLAGS.__delattr__(keys)\n\n\n\n\n\ndef lrelu(x , alpha = 0.2 , name=\"LeakyReLU\"):\n return tf.maximum(x , alpha*x)\n\ndef conv2d(input_, output_dim,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"conv2d\"):\n\n with tf.variable_scope(name):\n\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n\n return conv\n\ndef de_conv(input_, output_shape,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"deconv2d\", with_w=False):\n\n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n\n return deconv, w, biases\n\n else:\n\n return deconv\n\ndef fully_connect(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n with tf.variable_scope(scope or \"Linear\"):\n\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n\n return tf.matmul(input_, matrix) + bias\n\ndef conv_cond_concat(x, y):\n \"\"\"Concatenate conditioning vector on feature map axis.\"\"\"\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n\n return tf.concat(3 , [x , y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2] , y_shapes[3]])])\n\ndef batch_normal(input , scope=\"scope\" , reuse=False):\n return batch_norm(input , epsilon=1e-5, decay=0.9 , scale=True, scope=scope , reuse=reuse , updates_collections=None)\n\ndef instance_norm(x):\n\n epsilon = 1e-9\n mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)\n return tf.div(tf.subtract(x, mean), tf.sqrt(tf.add(var, epsilon)))\n\n# def residual(x, output_dims, kernel, strides, name_1, name_2):\n\n# with tf.variable_scope('residual') as scope:\n\n# conv1 = conv2d(x, output_dims, k_h=kernel, k_w=kernel, d_h=strides, d_w=strides, name=name_1)\n# conv2 = conv2d(tf.nn.relu(conv1), output_dims, k_h=kernel, k_w=kernel, d_h=strides, d_w=strides, name=name_2)\n# resi = x + conv2\n\n# return resi\n\n# def deresidual(x, output_shape, kernel, strides, name_1, name_2):\n\n# with tf.variable_scope('residual_un') as scope:\n\n# deconv1 = de_conv(x, output_shape=output_shape, k_h=kernel, k_w=kernel, d_h=strides, d_w=strides, name=name_1)\n# deconv2 = de_conv(tf.nn.relu(deconv1), output_shape=output_shape, k_h=kernel, k_w=kernel, d_h=strides, d_w=strides, name=name_2)\n# resi = x + deconv2\n\n# return resi\nimport os\nimport errno\nimport numpy as np\nimport scipy\nimport scipy.misc\nfrom keras.models import Model\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\ndef get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale=False):\n return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w)\n\n\n# def transform(image, npx=64, is_crop=False, resize_w=64):\n# # npx : # of pixels width/height of image\n# if is_crop:\n# cropped_image = center_crop(image, npx, resize_w=resize_w)\n# else:\n# cropped_image = image\n# cropped_image = scipy.misc.imresize(cropped_image,\n# [resize_w, resize_w])\n# return np.array(cropped_image) / 127.5 - 1\n\ndef center_crop(x, crop_h , crop_w=None, resize_w=64):\n\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],\n [resize_w, resize_w])\n\n\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, is_grayscale=False):\n if (is_grayscale):\n return scipy.misc.imread(path, flatten=True).astype(np.float)\n else:\n return scipy.misc.imread(path).astype(np.float)\n\n\ndef imsave(images, size, path):\n return scipy.misc.imsave(path, merge(images, size))\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n size1 = np.int(h * size[0])\n size2 = np.int(w * size[1])\n img = np.zeros((size1,size2, 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[np.int(j * h):np.int(j * h + h), np.int(i * w): np.int(i * w + w), :] = image\n\n return img\n\n\n\n# def inverse_transform(image):\n# return ((image + 1) * 127.5).astype(np.uint8)\n\n\n\nimport tensorflow as tf\n\nfrom cleverhans import initializers\nfrom cleverhans.serial import NoRefModel\n\n\n\nfrom keras.utils.np_utils import to_categorical \nimport PIL\nimport numpy as np\nimport scipy\nfrom tensorflow.python.framework.ops import convert_to_tensor\nimport os\nTINY = 1e-8\nd_scale_factor = 0.25\ng_scale_factor = 1 - 0.75/2\nimport csv\n\ndef getAcc(pred, next_y_images):\n acc = np.zeros([11])\n Tc = np.ones([11])\n for i in range(len(pred)):\n Tc[np.argmax(next_y_images[i])] = Tc[np.argmax(next_y_images[i])] + 1\n if (np.argmax(next_y_images[i]) == np.argmax(pred[i])):\n acc[np.argmax(next_y_images[i])] = acc[np.argmax(next_y_images[i])] + 1\n print(100*np.sum(acc)/(np.sum(Tc)-11))\n return 100*acc/Tc,100*np.sum(acc)/(np.sum(Tc)-11)\nclass ModelAllConvolutional(NoRefModel):\n \"\"\"\n A simple model that uses only convolution and downsampling---no batch norm or other techniques that can complicate\n adversarial training.\n \"\"\"\n def __init__(self, scope, nb_classes, nb_filters, input_shape, **kwargs):\n del kwargs\n NoRefModel.__init__(self, scope, nb_classes, locals())\n self.nb_filters = nb_filters\n self.input_shape = input_shape\n\n # Do a dummy run of fprop to create the variables from the start\n self.fprop(tf.placeholder(tf.float32, [32] + input_shape))\n # Put a reference to the params in self so that the params get pickled\n self.params = self.get_params()\n\n def fprop(self, x, **kwargs):\n del kwargs\n conv_args = dict(\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializers.HeReLuNormalInitializer,\n kernel_size=3,\n padding='same')\n y = x\n\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n log_resolution = int(round(\n math.log(self.input_shape[0]) / math.log(2)))\n for scale in range(log_resolution - 2):\n y = tf.layers.conv2d(y, self.nb_filters << scale, **conv_args)\n y = tf.layers.conv2d(y, self.nb_filters << (scale + 1), **conv_args)\n y = tf.layers.average_pooling2d(y, 2, 2)\n y = tf.layers.conv2d(y, self.nb_classes, **conv_args)\n logits = tf.reduce_mean(y, [1, 2])\n return {self.O_LOGITS: logits,\n self.O_PROBS: tf.nn.softmax(logits=logits)}\n\n\nclass ModelAllConvolutional1(NoRefModel):\n \"\"\"\n A simple model that uses only convolution and downsampling---no batch norm or other techniques that can complicate\n adversarial training.\n \"\"\"\n def __init__(self, scope, nb_classes, nb_filters, input_shape, **kwargs):\n del kwargs\n NoRefModel.__init__(self, scope, nb_classes, locals())\n self.nb_filters = nb_filters\n self.input_shape = input_shape\n\n # Do a dummy run of fprop to create the variables from the start\n self.fprop(tf.placeholder(tf.float32, [32] + input_shape))\n # Put a reference to the params in self so that the params get pickled\n self.params = self.get_params()\n\n def fprop(self, x, **kwargs):\n del kwargs\n conv_args = dict(\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializers.HeReLuNormalInitializer,\n kernel_size=3,\n padding='same')\n y = x\n\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n log_resolution = int(round(\n math.log(self.input_shape[0]) / math.log(2)))\n for scale in range(log_resolution - 4):\n y = tf.layers.conv2d(y, self.nb_filters << scale, **conv_args)\n y = tf.layers.conv2d(y, self.nb_filters << (scale + 1), **conv_args)\n y = tf.layers.average_pooling2d(y, 2, 2)\n conv = y\n scale = log_resolution - 4\n y = tf.layers.conv2d(y, self.nb_filters << scale, **conv_args)\n y = tf.layers.conv2d(y, self.nb_filters << (scale + 1), **conv_args)\n y = tf.layers.average_pooling2d(y, 2, 2)\n\n\n scale = log_resolution - 3\n y = tf.layers.conv2d(y, self.nb_filters << scale, **conv_args)\n y = tf.layers.conv2d(y, self.nb_filters << (scale + 1), **conv_args)\n y = tf.layers.average_pooling2d(y, 2, 2)\n y = tf.layers.conv2d(y, self.nb_classes, **conv_args)\n\n logits = tf.reduce_mean(y, [1, 2])\n return {self.O_LOGITS: conv,\n self.O_PROBS: tf.nn.softmax(logits=logits)}\n\n\n\n\n\n\nclass vaegan(object):\n\n #build model\n def __init__(self, batch_size, max_iters, repeat, model_path, latent_dim, sample_path, log_dir, learnrate_init):\n\n self.batch_size = batch_size\n self.max_iters = max_iters\n self.repeat_num = repeat\n self.saved_model_path = model_path\n\n self.latent_dim = latent_dim\n self.sample_path = sample_path\n self.log_dir = log_dir\n self.learn_rate_init = learnrate_init\n\n self.log_vars = []\n\n self.channel = 3\n self.output_size = 128\n\n self.x_input = tf.placeholder(tf.float32, [self.batch_size, self.output_size, self.output_size, 3])\n self.x_true = tf.placeholder(tf.float32, [self.batch_size, self.output_size, self.output_size, self.channel])\n\n\n\n self.labels = tf.placeholder(tf.float32, [self.batch_size, 11])\n\n\n self.ep1 = tf.random_normal(shape=[self.batch_size, self.latent_dim])\n self.zp1 = tf.random_normal(shape=[self.batch_size, self.latent_dim])\n\n self.ep2 = tf.random_normal(shape=[self.batch_size, self.latent_dim])\n self.zp2 = tf.random_normal(shape=[self.batch_size, self.latent_dim])\n self.keep_prob = tf.placeholder_with_default(1.0, shape=())\n \n print('Data Loading Begins')\n \n y_train=[]\n x_train1=[]\n for dirs in os.listdir('/home/manu_kohli/wbc/cam3/trainset/'):\n for files in os.listdir('/home/manu_kohli/wbc/cam3/trainset/'+dirs):\n y_train.append(int(dirs))\n x_train1.append(np.array(PIL.Image.open('/home/manu_kohli/wbc/cam3/trainset/'+dirs+'/'+files)))\n \n #x_train1 =np.asarray(x_train1)/255.0\n \n cam3_train_data=[]\n cam3_train_label=[]\n \n l=list(range(0,len(y_train)))\n l=np.asarray(l)\n np.random.shuffle(l)\n for i in l:\n cam3_train_data.append(x_train1[i])\n cam3_train_label.append(y_train[i])\n \n x_train1=cam3_train_data\n y_train=cam3_train_label\n \n x_train1 = np.asarray(x_train1)/127.5\n x_train1 =x_train1 - 1.\n y_train = np.asarray(y_train)\n #y_train = toOneHot(y_train)\n y_train= to_categorical(y_train, num_classes=11)\n# x_train1 = np.load( '/home/vinay/projects/Sigtuple/CreateData/DataAugmentation/X_Train.npy').astype('float32')\n# y_train = np.load( '/home/vinay/projects/Sigtuple/CreateData/DataAugmentation/Y_Train.npy')\n# x_train1_1 = np.load('/home/vinay/projects/Sigtuple/CreateData/DataAugmentation/X_Test.npy').astype('float32')\n# y_train_1 = np.load('/home/vinay/projects/Sigtuple/CreateData/DataAugmentation/Y_Test.npy')\n\n# x_train1_2 = np.load( '/home/vinay/projects/Sigtuple/CameraInvariance/Cam3Classifier/Data_Augmentation/X_Train_extra.npy').astype('float32')\n# y_train_2 = np.load( '/home/vinay/projects/Sigtuple/CameraInvariance/Cam3Classifier/Data_Augmentation/Y_Train_extra.npy')\n\n# x_train1 = np.append(x_train1, x_train1_2,axis =0)\n# y_train = np.append(y_train, y_train_2,axis =0)\n\n\n# x_train1 = np.concatenate((x_train1, x_train1_1), axis=0)\n# y_train = np.concatenate((y_train, y_train_1), axis=0)\n\n\n x_test1_cam3 = []\n y_test_cam3 = []\n \n for dirs in os.listdir('/home/manu_kohli/wbc/cam3/testset/'):\n for files in os.listdir('/home/manu_kohli/wbc/cam3/testset/'+dirs):\n y_test_cam3.append(int(dirs))\n x_test1_cam3.append(np.array(PIL.Image.open('/home/manu_kohli/wbc/cam3/testset/'+dirs+'/'+files)))\n \n cam3_test_data=[]\n cam3_test_label=[]\n \n l=list(range(0,len(y_test_cam3)))\n l=np.asarray(l)\n np.random.shuffle(l)\n for i in l:\n cam3_test_data.append(x_test1_cam3[i])\n cam3_test_label.append(y_test_cam3[i])\n \n x_test1_cam3 = cam3_test_data\n y_test_cam3 = cam3_test_label\n \n y_test_cam3= to_categorical(y_test_cam3, num_classes=11) \n #y_test_cam3 = toOneHot(np.asarray(y_test_cam3))\n #x_test1_cam3=np.asarray(x_test1_cam3)/255.0\n x_test1_cam3 = np.asarray(x_test1_cam3)/127.5\n x_test1_cam3 =x_test1_cam3 - 1.\n \n x_test1=[]\n y_test =[]\n \n for dirs in os.listdir('/home/manu_kohli/wbc/cam2/combine_train_test_cam2/'):\n for files in os.listdir('/home/manu_kohli/wbc/cam2/combine_train_test_cam2/'+dirs):\n y_test.append(int(dirs))\n x_test1.append(np.array(PIL.Image.open('/home/manu_kohli/wbc/cam2/combine_train_test_cam2/'+dirs+'/'+files)))\n \n# x_test1 = np.load('/home/vinay/projects/Sigtuple/CreateData/cam2_images.npy').astype('float32')/255\n# y_test = np.load('/home/vinay/projects/Sigtuple/CreateData/cam2_labels.npy')\n\n cam2_data=[]\n cam2_label=[]\n\n l=list(range(0,len(y_test)))\n l=np.asarray(l)\n np.random.shuffle(l)\n for i in l:\n cam2_data.append(x_test1[i])\n cam2_label.append(y_test[i])\n \n x_test1 = cam2_data\n y_test = cam2_label\n \n y_test= to_categorical(y_test, num_classes=11)\n #y_test = toOneHot(np.asarray(y_test))\n # x_test1=np.asarray(x_test1)/255.0\n x_test1 = np.asarray(x_test1)/127.5\n x_test1 =x_test1 - 1.\n\n# x_test1_cam3 = np.load('/home/vinay/projects/Sigtuple/CreateData/cam3_images.npy').astype('float32')/255\n# y_test_cam3 = np.load('/home/vinay/projects/Sigtuple/CreateData/cam3_labels.npy')\n# y_test_cam3 = toOneHot(y_test_cam3)\n\n #print(x_train1.shape, y_train.shape)\n #print(x_test1.shape, y_test.shape)\n #x_train = np.zeros([x_train1.shape[0], self.output_size,self.output_size,self.channel])\n #x_test = np.zeros([x_test1.shape[0], self.output_size,self.output_size,self.channel])\n #x_test_cam3 = np.zeros([x_test1_cam3.shape[0], self.output_size,self.output_size,self.channel])\n\n# x_train[:,:,:,0] = x_train1[:,:,:,2]\n# x_train[:,:,:,1] = x_train1[:,:,:,1]\n# x_train[:,:,:,2] = x_train1[:,:,:,0]\n\n# x_test[:,:,:,0] = x_test1[:,:,:,2]\n# x_test[:,:,:,1] = x_test1[:,:,:,1]\n# x_test[:,:,:,2] = x_test1[:,:,:,0]\n\n# x_test_cam3[:,:,:,0] = x_test1_cam3[:,:,:,2]\n# x_test_cam3[:,:,:,1] = x_test1_cam3[:,:,:,1]\n# x_test_cam3[:,:,:,2] = x_test1_cam3[:,:,:,0]\n\n x_train = np.float32(x_train1).reshape([-1,self.output_size,self.output_size,self.channel])\n x_test = np.float32(x_test1).reshape([-1,self.output_size,self.output_size,self.channel])\n x_test_cam3 = np.float32(x_test1_cam3).reshape([-1,self.output_size,self.output_size,self.channel])\n\n print(x_train.shape, y_train.shape)\n print(x_test.shape, y_test.shape)\n print(x_test_cam3.shape, y_test_cam3.shape)\n print(np.amin(x_train), np.amin( x_test ), np.amin(x_test_cam3))\n print(np.amax(x_train), np.amax( x_test ), np.amax(x_test_cam3))\n\n\n TrainDataSize = x_train.shape[0]\n TestDataSize = x_test.shape[0]\n self.TrainDataSize = TrainDataSize\n self.TestDataSize = TestDataSize\n self.TestDataSize_cam3 = x_test_cam3.shape[0]\n\n\n self.X_Real_Test = x_test\n self.X_Real_Train = x_train\n self.X_Real_Test_cam3 = x_test_cam3 \n self.Y_train = y_train\n self.Y_test = y_test\n self.Y_test_cam3 = y_test_cam3\n\n\n# self.X_Real_Train = self.X_Real_Train*2 - 1\n# self.X_Real_Test = self.X_Real_Test*2 - 1\n# self.X_Real_Test_cam3 = self.X_Real_Test_cam3*2 - 1\n\n print('Max', np.max(self.X_Real_Train))\n print('Min', np.min(self.X_Real_Train))\n\n print('Data Loading Completed')\n\n\n\n\n\n def build_model_vaegan(self):\n\n self.z1_mean, self.z1_sigm = self.Encode1(self.x_input)\n self.z1_x = tf.add( self.z1_mean, tf.sqrt(tf.exp(self.z1_sigm))*self.ep1)\n self.x_input_sobel = tf.image.sobel_edges(self.x_input)\n self.x_input_sobel = tf.reshape(self.x_input_sobel, [64,128,128,6])\n self.x_out = self.generate1(self.x_input_sobel, self.z1_x, reuse=False)\n\n self.x_filt2 = self.generate1(self.x_input_sobel, self.z1_mean, reuse=True)\n\n self.model_classifier_logits = ModelAllConvolutional('model1', 11, 64, input_shape=[self.output_size,self.output_size,self.channel])\n self.model_classifier_percept = ModelAllConvolutional1('model2', 11, 64, input_shape=[self.output_size,self.output_size,self.channel])\n #tanh o/p -1 to 1\n self.logits_x_true = self.model_classifier_logits.get_logits((self.x_true+1)*0.5)\n self.percept_x_true = self.model_classifier_percept.get_logits((self.x_true+1)*0.5)\n #self.pred_x_true = tf.nn.softmax(self.logits_x_true)\n self.pred_x_true = self.model_classifier_percept.get_probs((self.x_true+1)*0.5)\n\n\n self.logits_x_out = self.model_classifier_logits.get_logits((self.x_out+1)*0.5)\n self.percept_x_out = self.model_classifier_percept.get_logits((self.x_out+1)*0.5)\n self.pred_x_out = tf.nn.softmax(self.logits_x_out)\n\n\n self.logits_x_filt2 = self.model_classifier_logits.get_logits((self.x_filt2+1)*0.5)\n self.pred_x_filt2 = tf.nn.softmax(self.logits_x_filt2)\n\n\n\n self.cl_loss_x_true = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits_x_true, labels = self.labels))\n self.cl_loss_x_out = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits_x_out , labels = self.labels))\n self.cl_loss_x_true = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits_x_true, labels = self.labels))\n\n\n\n self.kl1_loss = self.KL_loss(self.z1_mean, self.z1_sigm)/(self.latent_dim*self.batch_size)\n\n\n self.Loss_vae1_pixel = tf.reduce_mean(tf.square(tf.subtract(self.x_out, self.x_true))) + tf.reduce_mean(tf.abs(tf.subtract(self.x_out, self.x_true))) \n self.Loss_vae1_percept = tf.reduce_mean(tf.square(tf.subtract(self.percept_x_out, self.percept_x_true)))\n self.Loss_vae1_logits = tf.reduce_mean(tf.square(tf.subtract(self.logits_x_out, self.logits_x_true)))\n\n\n\n #For encode\n self.encode1_loss = 1*self.kl1_loss + 10*self.Loss_vae1_pixel + 0*self.cl_loss_x_out + 0*self.Loss_vae1_logits + 1000*self.Loss_vae1_percept\n\n #for Gen\n self.G1_loss = 10*self.Loss_vae1_pixel + 0*self.cl_loss_x_out + 0*self.Loss_vae1_logits + 1000*self.Loss_vae1_percept\n\n\n t_vars = tf.trainable_variables()\n\n self.log_vars.append((\"encode1_loss\", self.encode1_loss))\n self.log_vars.append((\"generator1_loss\", self.G1_loss))\n\n\n\n self.g1_vars = [var for var in t_vars if 'VAE_gen1' in var.name]\n self.e1_vars = [var for var in t_vars if 'VAE_e1_' in var.name]\n\n\n self.saver = tf.train.Saver()\n for k, v in self.log_vars:\n tf.summary.scalar(k, v)\n\n print('Model is Built')\n\n\n\n\n\n #do train\n def train(self):\n\n global_step = tf.Variable(0, trainable=False)\n add_global = global_step.assign_add(1)\n new_learning_rate = tf.train.exponential_decay(self.learn_rate_init, global_step=global_step, decay_steps=10000,\n decay_rate=0.98)\n\n\n\n\n #for G1\n trainer_G1 = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)\n #trainer_G1 = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate_init)\n #trainer_G1 = tf.train.AdamOptimizer(learning_rate=new_learning_rate)\n gradients_G1 = trainer_G1.compute_gradients(self.G1_loss, var_list=self.g1_vars)\n opti_G1 = trainer_G1.apply_gradients(gradients_G1)\n\n\n\n #for E1\n trainer_E1 = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)\n #trainer_E1 = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate_init)\n #trainer_E1 = tf.train.AdamOptimizer(learning_rate=new_learning_rate)\n gradients_E1 = trainer_E1.compute_gradients(self.encode1_loss, var_list=self.e1_vars)\n opti_E1 = trainer_E1.apply_gradients(gradients_E1)\n\n\n\n\n init = tf.global_variables_initializer()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n\n #changed restoring of weights. \n ckpt = tf.train.get_checkpoint_state('/home/manu_kohli/vae_classifier_weights/Classifier/checkpoint')\n ckpt_path = ckpt.model_checkpoint_path\n sess.run(init)\n self.saver.restore(sess , self.saved_model_path)\n #print(tf.trainable_variables(),'tf.trainable_variables()')\n #saver = tf.train.Saver([var for var in tf.trainable_variables() if var.name.startswith('model1')])\n #print(ckpt_path)\n #saver.restore(sess, ckpt_path)\n \n ##self.saver.save(sess , self.saved_model_path)\n\n print('Creating a Replica of s1 onto s2')\n s1_vars1 = [var.name for var in tf.trainable_variables() if 'model1' in var.name]\n s2_vars1 = [var for var in tf.trainable_variables() if 'model2' in var.name]\n dictionary = {}\n for i in range(len(s2_vars1)):\n dictionary[s1_vars1[i][0:-2]] = s2_vars1[i]\n saver_new = tf.train.Saver(var_list=dictionary)\n #saver_new.restore(sess, ckpt_path)\n\n\n ##self.saver.save(sess , ckpt.model_checkpoint_path)\n\n\n print('******************')\n print(' ')\n print(' ')\n print('Plain VAE Training Begins')\n print(' ')\n print(' ')\n print('******************')\n\n step = 0\n g_acc=87.0\n batchNum = 0\n step=0\n while step <= 100000:\n next_x_images = self.X_Real_Train[batchNum*self.batch_size:(batchNum+1)*self.batch_size]\n next_y_images = self.Y_train[batchNum*self.batch_size:(batchNum+1)*self.batch_size]\n batchNum = batchNum +1\n #print(batchNum*self.batch_size)\n if(((batchNum+1)%170)==0):\n idx = np.random.permutation(len(self.X_Real_Train))\n self.X_Real_Train,self.Y_train = self.X_Real_Train[idx], self.Y_train[idx]\n batchNum = 0\n print('data exhausted')\n #print(idx)\n #print(self.X_Real_Train.shape, self.Y_train.shape)\n #print(batchNum)\n #print(next_y_images)\n fd ={self.keep_prob:1, self.x_input: next_x_images, self.x_true: next_x_images, self.labels: next_y_images}\n sess.run(opti_E1, feed_dict=fd)\n sess.run(opti_G1, feed_dict=fd)\n\n\n\n new_learn_rate = sess.run(new_learning_rate)\n\n if new_learn_rate > 0.00005:\n sess.run(add_global)\n\n \n if np.mod(step , 100) == 0 and step != 0:\n# for iter in range(200):\n# print('step', step)\n #print('model saved: ', self.saved_model_path)\n #self.saver.save(sess , self.saved_model_path, global_step=step)\n\n print('lr:', new_learn_rate)\n k1, e1, l11, l12, l13, cl, g1 = sess.run([self.kl1_loss , self.encode1_loss,self.Loss_vae1_pixel,self.Loss_vae1_percept, self.Loss_vae1_logits,self.cl_loss_x_out,self.G1_loss],feed_dict=fd)\n print('E1_loss_KL_Loss: ',k1)\n print('E1_loss_Total: ', e1)\n\n print('G1_loss_MSE: ', l11, 10*l11)\n print('G1_loss_Percept: ', l12, 0*l12)\n print('G1_loss_Logits: ', l13, 0*l13)\n print('G1_loss_CL: ', cl, 1*cl)\n print('G1_loss_Total: ', g1)\n\n Preddiction = np.zeros([self.TestDataSize_cam3,11])\n for i in range(np.int(self.TestDataSize_cam3/self.batch_size)):\n next_x_images = self.X_Real_Test_cam3[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_filt2, feed_dict={self.x_input: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n x_filt = sess.run(self.x_filt2, feed_dict={self.x_input: next_x_images, self.keep_prob:1})\n x_filt_percept = sess.run(self.percept_x_out, feed_dict={self.x_input: next_x_images, self.keep_prob:1})\n print('shape:', x_filt_percept.shape)\n if (step == 100):\n np.save('Data/x_cam3_test.npy',next_x_images)\n name = 'Data/x_filt__' + str(step) + '_.npy' \n np.save(name,x_filt)\n# print('Full Filtered Real Train Example Acc = ',getAcc(Preddiction[0:150*64], self.Y_test_cam3[0:150*64]))\n# print('Full Filtered Real Test Example Acc = ',getAcc(Preddiction[150*64:], self.Y_test_cam3[150*64:]))\n accs,l_acc = getAcc(Preddiction, self.Y_test_cam3)\n print('Full Filtered Real Test Example Acc = ',accs,l_acc)\n if(l_acc>g_acc):\n print('model saved: ', '/home/manu_kohli/vae_classifier_weights/VAE/itr_model_2/model.cpkt')\n self.saver.save(sess , '/home/manu_kohli/vae_classifier_weights/VAE/itr_model_2/model.cpkt', global_step=step)\n g_acc= l_acc\n\n Preddiction = np.zeros([self.TrainDataSize,11])\n for i in range(np.int(self.TrainDataSize/self.batch_size)):\n next_x_images = self.X_Real_Train[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_filt2, feed_dict={self.x_input: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n print('Full Filtered Real Train Example Acc = ',getAcc(Preddiction, self.Y_train))\n if (step == 100):\n np.save('Data/x_cam3_train.npy',next_x_images)\n\n Preddiction = np.zeros([self.TestDataSize,11])\n for i in range(np.int(self.TestDataSize/self.batch_size)):\n next_x_images = self.X_Real_Test[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_filt2, feed_dict={self.x_input: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n\n print('Full Filtered Real Cam2 Example Acc = ',getAcc(Preddiction, self.Y_test))\n if (step == 100):\n np.save('Data/x_cam2.npy',next_x_images)\n\n Preddiction = np.zeros([self.TestDataSize,11])\n for i in range(np.int(self.TestDataSize/self.batch_size)):\n next_x_images = self.X_Real_Test[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_true, feed_dict={self.x_true: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n print('Full Real Cam2 Example Acc = ',getAcc(Preddiction, self.Y_test))\n\n Preddiction = np.zeros([self.TestDataSize_cam3,11])\n for i in range(np.int(self.TestDataSize_cam3/self.batch_size)):\n next_x_images = self.X_Real_Test_cam3[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_true, feed_dict={self.x_true: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n \n print('Full Real Test Example Acc = ',getAcc(Preddiction, self.Y_test_cam3))\n \n Preddiction = np.zeros([self.TrainDataSize,11])\n for i in range(np.int(self.TrainDataSize/self.batch_size)):\n next_x_images = self.X_Real_Train[i*self.batch_size:(i+1)*self.batch_size]\n pred = sess.run(self.pred_x_true, feed_dict={self.x_true: next_x_images, self.keep_prob:1})\n Preddiction[i*self.batch_size:(i+1)*self.batch_size] = pred.reshape([64,11])\n \n print('Full Real Train Example Acc = ',getAcc(Preddiction, self.Y_train))\n \n# print('Full Filtered Real Train Example Acc = ',getAcc(Preddiction[0:150*64], self.Y_test_cam3[0:150*64]))\n# print('Full Filtered Real Test Example Acc = ',getAcc(Preddiction[150*64:], self.Y_test_cam3[150*64:]))\n\n\n step += 1\n\n def generate1(self, edge, z_var, reuse=False):\n\n with tf.variable_scope('generator1') as scope:\n\n if reuse == True:\n scope.reuse_variables()\n\n d1 = lrelu(fully_connect(z_var , output_size=64*4*4, scope='VAE_gen1_fully1'))\n d2 = lrelu(fully_connect(d1 , output_size=128*4*4, scope='VAE_gen1_fully2'))\n d3 = tf.reshape(d2, [self.batch_size, 4, 4, 128])\n d4 = lrelu(de_conv(d3, output_shape=[self.batch_size, 8, 8, 128], k_h=3, k_w=3,name='VAE_gen1_deconv1'))\n d5 = lrelu(de_conv(d4, output_shape=[self.batch_size, 16, 16, 128], k_h=3, k_w=3,name='VAE_gen1_deconv2'))\n d6 = lrelu(de_conv(d5, output_shape=[self.batch_size, 32, 32, 128], k_h=3, k_w=3,name='VAE_gen1_deconv3'))\n d7 = lrelu(de_conv(d6, output_shape=[self.batch_size, 64, 64, 128], k_h=3, k_w=3,name='VAE_gen1_deconv4'))\n d8 = de_conv(d7, output_shape=[self.batch_size, 128, 128, 3] , k_h=3, k_w=3, name='VAE_gen1_deconv5')\n d9 = tf.nn.tanh(d8)\n d10 = tf.concat([d9, edge], 3) \n conv1 = lrelu(conv2d(d10, output_dim=128, k_h=3, k_w=3, d_h=1, d_w=1,name='VAE_gen1_c1'))\n conv2 = lrelu(conv2d(conv1, output_dim=128, k_h=3, k_w=3, d_h=1, d_w=1,name='VAE_gen1_c2'))\n conv3 = conv2d(conv2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1,name='VAE_gen1_c3')\n\n\n return tf.nn.tanh(conv3)\n\n\n\n def Encode1(self, x, reuse=False):\n\n with tf.variable_scope('encode1') as scope:\n\n if reuse == True:\n scope.reuse_variables()\n conv1 = lrelu(conv2d(x, output_dim=128, k_h=3, k_w=3, name='VAE_e1_c1'))\n conv2 = lrelu(conv2d(conv1, output_dim=128, k_h=3, k_w=3,name='VAE_e1_c2'))\n conv3 = lrelu(conv2d(conv2, output_dim=128, k_h=3, k_w=3,name='VAE_e1_c3'))\n conv4 = lrelu(conv2d(conv3, output_dim=128, k_h=3, k_w=3,name='VAE_e1_c4'))\n conv5 = lrelu(conv2d(conv4, output_dim=128, k_h=3, k_w=3,name='VAE_e1_c5'))\n conv6 = tf.reshape(conv5, [self.batch_size, 128 * 4 * 4])\n fc1 = lrelu(fully_connect(conv6, output_size= 64*4*4, scope='VAE_e1_f1'))\n z_mean = fully_connect(fc1, output_size=self.latent_dim, scope='VAE_e1_f2')\n z_sigma = fully_connect(fc1, output_size=self.latent_dim, scope='VAE_e1_f3')\n return z_mean, z_sigma\n\n\n def KL_loss(self, mu, log_var):\n return -0.5 * tf.reduce_sum(1 + log_var - tf.pow(mu, 2) - tf.exp(log_var))\n\n def sample_z(self, mu, log_var):\n eps = tf.random_normal(shape=tf.shape(mu))\n return mu + tf.exp(log_var / 2) * eps\n\n\n def NLLNormal(self, pred, target):\n\n c = -0.5 * tf.log(2 * np.pi)\n multiplier = 1.0 / (2.0 * 1)\n tmp = tf.square(pred - target)\n tmp *= -multiplier\n tmp += c\n\n return tmp\n\n\nflags = tf.app.flags\n\nflags.DEFINE_integer(\"batch_size\" , 64, \"batch size\")\nflags.DEFINE_integer(\"max_iters\" , 10000, \"the maxmization epoch\")\nflags.DEFINE_integer(\"latent_dim\" , 64, \"the dim of latent code\")\nflags.DEFINE_float(\"learn_rate_init\" , 0.0001, \"the init of learn rate\")\nflags.DEFINE_integer(\"repeat\", 10000, \"the numbers of repeat for your datasets\")\nflags.DEFINE_string(\"path\", '/home/?/data/', \"for example, '/home/jack/data/' is the directory of your celebA data\")\nflags.DEFINE_integer(\"op\", 0, \"Training or Test\")\n\nFLAGS = flags.FLAGS\nFLAGS.op = 0\nif (1):\n path123 = '.'\n root_log_dir = path123 + \"/log_dir\"\n vaegan_checkpoint_dir = \"/home/manu_kohli/vae_classifier_weights/VAE/itr_model_1/model.cpkt-5400\"\n sample_path = path123 + \"/sample\"\n\n\n model_path = vaegan_checkpoint_dir\n\n batch_size = FLAGS.batch_size\n max_iters = FLAGS.max_iters\n latent_dim = FLAGS.latent_dim\n data_repeat = FLAGS.repeat\n\n learn_rate_init = FLAGS.learn_rate_init\n #learn_rate_init= 9e-5\n vaeGan = vaegan(batch_size= batch_size, max_iters= max_iters, repeat = data_repeat,\n model_path= model_path, latent_dim= latent_dim,\n sample_path= sample_path , log_dir= root_log_dir , learnrate_init= learn_rate_init)\n\n vaeGan.build_model_vaegan()\n vaeGan.train()\n\n\n","sub_path":"TrainVAE.py","file_name":"TrainVAE.py","file_ext":"py","file_size_in_byte":39157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"458922953","text":"import re\n\n\ndef alternative_strip(user_message, characters=r\"\\s*\"):\n regex_beginning_string = r'(^([' + characters + r']*))'\n stripping_regex = re.compile(regex_beginning_string, re.VERBOSE)\n user_message = re.sub(stripping_regex, '', user_message)\n regex_ending_string = r'(([' + characters + r']*)$)'\n stripping_regex = re.compile(regex_ending_string, re.VERBOSE)\n user_message = re.sub(stripping_regex, '', user_message)\n return user_message\n\n\nmessage = 'this should be stripped --- this will remain --- this should be stripped'\nprint(\" Original message =\", message, '<>')\nprint(\" Original strip with arguments =\", message.strip('thishouldbestriped '), '<>')\nprint(\" Alternative strip with arguments =\", alternative_strip(message, 'thishouldbestriped '), '<>')\nmessage = ' --- this will remain --- '\nprint(\" Original message =\", message, '<>')\nprint(\" Original strip without arguments =\", message.strip(), '<>')\nprint(\"Alternative strip without arguments =\", alternative_strip(message), '<>')\n","sub_path":"students/uss_tomasz/lesson_08_regular_expressions/8_1/regex_version_of_strip.py","file_name":"regex_version_of_strip.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112651092","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-rpc7z9ca/catsoop/catsoop/__UTIL__/time/content.py\n# Compiled at: 2020-01-06 01:44:31\n# Size of source mod 2**32: 867 bytes\nimport time\ncs_handler = 'raw_response'\ncontent_type = 'text/plain'\nresponse = str(int(time.time()))","sub_path":"pycfiles/catsoop-2019.9.5.tar/content.cpython-38.py","file_name":"content.cpython-38.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650057398","text":"\"\"\"\r\nXavier Ladores\r\nxlad198\r\nProgram that uses a dictionary of colours and a pattern text file to draw\r\npixel art.\r\n\"\"\"\r\nfrom tkinter import *\r\n\r\n#-------------------------------------------\r\n#-------------------------------------------\r\n# main() function\r\n#-------------------------------------------\r\ndef main():\r\n prompt1 = \"Enter a palette filename: \"\r\n for_colours_dict = input(prompt1)\r\n prompt2 = \"Enter a pattern filename: \"\r\n for_pattern_list = input(prompt2)\r\n list1 = process_file(for_colours_dict)\r\n list2 = process_file(for_pattern_list)\r\n \r\n size = 50\r\n start_left = size * 2\r\n start_down = size * 2\r\n #replace these two lines in step 8\r\n pattern_list = create_pattern_list(list2)\r\n colours_dictionary = create_colours_dict(list1)\r\n \r\n number_of_rows = len(pattern_list)\t\r\n number_of_columns = len(pattern_list[0])\r\n canvas_width = size * number_of_columns +size * 4\r\n canvas_height = number_of_rows * size + size * 4\r\n window = Tk() \r\n window.title(\"A5 by xlad198\") \r\n geometry_string = str(canvas_width)+\"x\"+str(canvas_height)+\"+10+20\"\r\n window.geometry(geometry_string)\r\n a_canvas = Canvas(window)\r\n a_canvas.config(background=\"white\")\r\n a_canvas.pack(fill = BOTH, expand = True) #Canvas fills the whole window \r\n draw_pattern(a_canvas, colours_dictionary, pattern_list, size, start_left, start_down)\r\n window.mainloop()\r\n\r\ndef split_digits(line):\r\n split_list = list(line)\r\n position = 0\r\n for iterations in range(len(split_list)):\r\n split_list[position] = int(split_list[position])\r\n position += 1\r\n return split_list\r\n\r\ndef process_file(filename):\r\n read_file = open(filename, \"r\")\r\n contents = read_file.read()\r\n read_file.close()\r\n\r\n strings_list = contents.split(\"\\n\")\r\n return strings_list\r\n \r\ndef create_colours_dict(lines):\r\n colours_dict = {}\r\n colon = \":\"\r\n for strings in lines:\r\n colon_index = strings.find(colon)\r\n key = int(strings[: colon_index])\r\n value = strings[colon_index + 1 :]\r\n colours_dict[key] = value\r\n return colours_dict\r\n \r\ndef create_pattern_list(lines):\r\n position = 0\r\n position2 = 0\r\n for loops in range(len(lines)):\r\n if position <= len(lines):\r\n lines[position] = list(lines[position])\r\n new_list = lines[position]\r\n for elements in new_list:\r\n elements = int(elements)\r\n new_list[position2] = elements\r\n position2 += 1\r\n position2 = 0\r\n position += 1\r\n return lines\r\n \r\n\r\ndef draw_pattern(a_canvas, colours_dictionary, pattern_list, size, left, top):\r\n possible_digits = \"0123456789\" \r\n down = top\r\n #complete this\r\n fixed_left = left\r\n for elements in pattern_list:\r\n for integers in elements:\r\n a_canvas.create_rectangle(left, top, left + size,\r\n top + size,\r\n fill= colours_dictionary[integers])\r\n left += size\r\n left = fixed_left\r\n top += size\r\n return\r\n \r\nmain()\r\n","sub_path":"xlad198A5.py","file_name":"xlad198A5.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88001759","text":"# creating a tiger class with attributes and methods\n\nclass Tiger(object):\n\n legs = 4\n\n def __init__(self, color='orange_black', age=3, name='Tiger'):\n self.color = color\n self.age = age\n self.name = name\n print('You created Tiger ' + self.name)\n def info_color(self):\n return 'Color:', self.color\n\n\n def roar(self, roar_times=1):\n return self.name + ' Roar!'* roar_times\n\n def aged(self, years=1):\n new_age = self.age + years\n self.age = new_age\n return new_age\n\nclass BengalTiger(Tiger):\n legs = 5\n\n def __init__(self, color='white', age=6, name='Default'):\n Tiger.__init__(self, color, age, name)\n self.color = color\n self.age = age\n self.name = name\n print('You created a Bengal tiger ' + self.name)\n\n #overwrite method of parent class:\n def roar(self, roar_times=2):\n\n return (super(BengalTiger, self).roar(roar_times), self.name + ' Roar!!!!!'* roar_times)\n\n\ntiger_anya = Tiger(color='Red', age=5, name='Anya')\nprint(tiger_anya.roar(roar_times=3))\n\nbengal = BengalTiger(color='white', age=10, name='Umka')\nprint(bengal.roar())\nbengal2 = BengalTiger()\nprint(bengal2.roar())\nprint(BengalTiger.legs)\nprint ('----------back to school 7/21/19-----------')\n\n# back to school 7/21/19\n\nclass Motorcycle(object):\n\n wheels = 4\n headlights = 2\n\n def __init__(self, make, color):\n print('Created general mc')\n self.make = make\n self.color = color\n\n def info(self):\n print('Make: {}'.format(self.make))\n print('Color: {}'.format(self.color))\n\n def drive(self):\n print('Started')\n\n def stop(self):\n print('Stopped')\n\nclass SportMotorcycle(Motorcycle):\n\n def __init__(self, make, color):\n print('Created SportMc')\n Motorcycle.__init__(self, make, color)\n\n def drive(self):\n super(SportMotorcycle, self).drive()\n print('Started SportMC')\n\nmc1 = Motorcycle('Honda', 'Red')\nprint(mc1.wheels)\nprint(mc1.make, mc1.color)\nmc1.info()\n\nmc2 = SportMotorcycle('Acura', 'Blue')\nmc2.info()\nmc2.drive()","sub_path":"Udemy_python_anyone_can_code/Scratch_papers/class_methods.py","file_name":"class_methods.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"27183624","text":"import requests\nimport argparse\nimport json\nsourceDict={\"CAMBRIDGE\":\"剑桥高阶英汉双解词典\",\"LONGMAN\":\"朗文当代高级英语词典\",\"COLLINS\":\"柯林斯英汉双解大词典\",\"ONLINE\":\"金山词霸\"}\nparser = argparse.ArgumentParser(description='manual to this script')\nparser.add_argument('word', type=str, help=\"The word you want to query\")\nparser.add_argument('--detail','-d', action='store_true', default=False, help=\"Show the detailed meaning of the word\")\nparser.add_argument('--brief','-b', action='store_true', default=True, help=\"Show the brief meaning of the word\", )\n\nargs = parser.parse_args()\n\ndef parseBrief(brief):\n print (\"输入的单词: \"+ brief['wordIn'])\n print (\"输出的单词: \"+brief['wordOut'])\n if 'relation' in brief['lemma']:\n print (\"%s为%s的%s\"%(brief['wordOut'],brief['wordIn'],brief['lemma']['relation']))\n if 'usPron' in brief or 'ukPron' in brief:\n print (\"\\n音标\")\n if 'usPron' in brief:\n print (\" 美式发音: \"+brief['usPron']['ps'])\n if 'ukPron' in brief:\n print (\" 英式发音: \"+brief['ukPron']['ps'])\n if 'chnDefinitions' in brief:\n print (\"\\n中文释义\")\n for chn_def in brief['chnDefinitions']:\n if 'pos' in chn_def:\n print (\" \"+chn_def['pos']+\" \"+chn_def['meaning'])\n else:\n print (\" \"+chn_def['meaning'])\n if 'engDefinitions' in brief:\n print (\"\\n英文释义\")\n for eng_def in brief['engDefinitions']:\n if 'pos' in eng_def:\n print (\" \"+eng_def['pos']+\" \"+eng_def['meaning'])\n else:\n print (\" \"+eng_def['meaning'])\ndef parseSource(sentenceGroup):\n if 'source' not in sentenceGroup:\n return \"牛津高阶英汉双解词典\"\n else:\n return sourceDict[sentenceGroup['source']]\ndef parseDetail(detail):\n parseBrief(detail['wordBrief'])\n if 'derivatives' in detail:\n print (\"\\n词形变换\")\n for derivative in detail['derivatives']:\n print(\" \"+derivative['relation']+\": \"+derivative['word'])\n if 'sentenceLists' in detail:\n print (\"\\n双语释义\")\n for sentenceGroup in detail['sentenceLists']:\n count=1\n print (\"\".ljust(2)+parseSource(sentenceGroup))\n for sentence in sentenceGroup['sentences']:\n print(\"\".ljust(4)+str(count).ljust(3)+sentence['eng'])\n print(\"\".ljust(4)+\"\".ljust(3)+sentence['chn'])\n count+=1\n\n\nif args.detail:\n detail=json.loads(requests.get(\"https://ireading.site/word/detail?json=true&word=\"+args.word).text)\n parseDetail(detail)\nelse:\n brief=json.loads(requests.get(\"https://ireading.site/word/brief?json=true&word=\"+args.word).text)\n parseBrief(brief)\n","sub_path":"idict.py","file_name":"idict.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557261299","text":"import requests\nfrom django_cron import CronJobBase, Schedule\nfrom .models import TopNews\nfrom .parser import TopNewsParser\n\n\nclass ParseNewNews(CronJobBase):\n RUN_EVERY_MINS = 5 # every 5 minutes\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\n code = 'parseNbaUdn.scheduled_jobs' # a unique code\n\n def do(self):\n # parse and save top newses' title, link, thumb img link\n index_url = 'https://nba.udn.com/nba/index?gr=www'\n base_url = 'https://nba.udn.com'\n page = requests.get(index_url)\n p = TopNewsParser()\n p.feed(page.text)\n # reversed saving (newest has newest id)\n for n in reversed(p.news_list):\n try:\n TopNews.objects.get(postId=n.postId)\n except TopNews.DoesNotExist:\n TopNews(postId=n.postId,\n title=n.title,\n imgUrl=n.imgUrl,\n pageUrl=base_url + n.pageUrl\n ).save()\n finally:\n pass\n","sub_path":"parseNbaUdn/scheduled_jobs.py","file_name":"scheduled_jobs.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95018482","text":"import numpy as np\nimport time\n#импортируем тот файл функции которого вы хотите представить в виде графика\nfrom Tests.sorts import bubble_sort, insertion_sort, merge_sort, quick_sort, selection_sort\nimport matplotlib.pyplot as plt\n\n\nclass TestMain:\n\n n = 0\n\n def __time_it(self, type):\n\n sum = 0\n maximum = 0\n\n for i in range(50):\n test = np.random.randint(0, 100, self.n)\n start_time = time.time()\n self.__parser_dict(type)(test)\n runtime = time.time() - start_time\n if i == 0:\n minimum = runtime\n minimum = min(minimum, runtime)\n maximum = max(maximum, runtime)\n sum += runtime\n\n return minimum, sum/50, maximum\n\n def __parser_dict(self, type):\n dict = {\n 1: bubble_sort,\n 2: insertion_sort,\n 3: merge_sort,\n 4: quick_sort,\n 5: selection_sort\n }\n return dict[type]\n\n def __count_elements(self):\n if self.n < 1100000:\n while self.n < 100:\n self.n += 10\n return self.n\n while self.n < 1000:\n self.n += 100\n return self.n\n while self.n < 10000:\n self.n += 1000\n return self.n\n while self.n < 100000:\n self.n += 10000\n return self.n\n while self.n <= 1000000:\n self.n += 100000\n return self.n\n\n def plot(self, type, title):\n avg = []\n minimum = []\n maximum = []\n size = []\n for i in range(45):\n self.__count_elements()\n run = self.__time_it(type)\n minimum.append(run[0])\n avg.append(run[1])\n maximum.append(run[2])\n size.append(self.n)\n\n plt.plot(size, avg, 'g')\n plt.ylabel('Время')\n plt.xlabel('Длина массива')\n plt.title(title)\n plt.tight_layout()\n plt.plot(size, minimum, 'b')\n plt.plot(size, maximum, 'r')\n plt.grid()\n plt.show()\n plt.savefig(title)\n\n\na = TestMain()\na.plot(4, title='Быстрая сортировка')\n","sub_path":"Tests/test_sort.py","file_name":"test_sort.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307007543","text":"import math\nimport json\nimport time\nimport subprocess\nimport pynmea2\nfrom txrx import Utility\nfrom WavePacketBuilder import WAVEPacketBuilder\n\n\nclass GPSVehicle:\n def __init__(self, vehicle_num, gps_sock, gui_sock, gui_lock):\n self.vehicle_num = vehicle_num\n self.gps_sock = gps_sock\n self.sock = gui_sock\n self.lock = gui_lock\n self.wave_builder = WAVEPacketBuilder()\n self.util = Utility()\n self.key = \"keys/0/p256.key\"\n\n def start(self):\n last_nmea = pynmea2.parse(\"$GPGGA,000000.00,0000.0000,N,00000.0000,E,0,99,1.0,0.0,M,0.0,M,,*5C\")\n\n while True:\n gps_loc = self.gps_sock.recv(1024)\n nmea = pynmea2.parse(gps_loc.split(b\":\")[1].decode().replace(\"GPS_GPGGA\", \"\").strip())\n print(nmea)\n\n lat = float(nmea.latitude)\n lon = float(nmea.longitidue)\n last_lat = float(last_nmea.latitude)\n last_lon = float(last_nmea.longitude)\n\n speed = (\n math.sqrt(math.pow(lat - last_lat, 2) + math.pow(lon - last_lon, 2))\n * 36\n )\n heading = self.get_heading(nmea, last_nmea)\n\n bsm_text = f\"{self.vehicle_num},{nmea.latitude},{nmea.longitude},{heading},{speed}\\n\"\n\n message = self.build_packet(nmea.latitude, nmea.longitude, heading, speed, self.key)\n\n last_nmea = nmea\n\n self.send_to_radio(message)\n self.send_to_gui(bsm_text)\n\n\n def get_heading(self, nmea, last_nmea):\n if nmea.longitude == last_nmea.longitude:\n # no change\n if nmea.latitude == last_nmea.latitude:\n return \"-\"\n # heading North\n elif nmea.latitude > last_nmea.latitude:\n return N\n # heading South\n else:\n return \"S\"\n # heading East\n elif nmea.longitude > last_nmea.longitude:\n if nmea.latitude > last_nmea.latitude:\n return \"NE\"\n elif nmea.latitude < last_nmea.latitude:\n return \"SE\"\n else:\n return \"E\"\n # heading West\n else:\n if nmea.latitude > last_nmea.latitude:\n return \"NW\"\n elif nmea.latitude < last_nmea.latitude:\n return \"SW\"\n else:\n return \"W\"\n\n\n def build_packet(self, lat, lng, heading, speed, key):\n speed = str(round(speed, 2))\n bsm_text = f\"{self.vehicle_num},{nmea.latitude},{nmea.longitude},{heading},{speed}\\n\"\n return self.wave_builder.get_wsm_payload(bsm_text, key)\n\n def send_to_radio(self, message):\n print(\"Sending BSM to radio\")\n\n bsm = self.util.inject_time(message)\n\n loader = subprocess.Popen((\"echo\", \"-n\", \"-e\", bsm), stdout=subprocess.PIPE)\n sender = subprocess.check_output(\n (\"nc\", \"-w0\", \"-u\", \"localhost\", \"52001\"), stdin=loader.stdout\n )\n\n\n def send_to_gui(self, message):\n bsm = message.split(\",\")\n\n decoded_data = {}\n\n decoded_data['id'] = bsm[0]\n decoded_data['x'] = bsm[1]\n decoded_data['y'] = bsm[2]\n decoded_data['heading'] = bsm[3]\n decoded_data['speed'] = bsm[4]\n\n decoded_data['sig'] = True\n decoded_data['elapsed'] = 0\n decoded_data['recent'] = True\n decoded_data['receiver'] = True\n\n vehicle_data_json = json.dumps(decoded_data)\n\n with self.lock:\n self.sock.send(vehicle_data_json.encode())\n","sub_path":"misc_files/GPSVehicle.py","file_name":"GPSVehicle.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510238380","text":"import sys\n\nimport numpy as np\nfrom scipy import ndimage\nimport cv2\n\ndef HarrisDetector(Img, WindowSize=3, K=0.05):\n\t\t\n\tImg = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY)\n\tCornerList = np.zeros(Img.shape)\n\t#Dy, Dx = np.gradient(Img)\n\tDx = cv2.Sobel(Img, cv2.CV_64F, 1, 0, ksize=3)\n\tDy = cv2.Sobel(Img, cv2.CV_64F, 0, 1, ksize=3)\n\tIxx = Dx**2\n\tIxy = Dx*Dy\n\tIyy = Dy**2\n\n\tHeight, Width = Img.shape\n\tOffset = int(WindowSize/2)\n\t\n\tfor Y in range(Offset, Height-Offset):\n\t\tfor X in range(Offset, Width-Offset):\n\t\t\tSxx = np.sum(Ixx[Y-Offset:Y+Offset+1, X-Offset:X+Offset+1])\n\t\t\tSxy = np.sum(Ixy[Y-Offset:Y+Offset+1, X-Offset:X+Offset+1])\n\t\t\tSyy = np.sum(Iyy[Y-Offset:Y+Offset+1, X-Offset:X+Offset+1])\n\n\t\t\tDet = Sxx*Syy-Sxy**2\n\t\t\tResponse = Det - K*((Sxx+Syy)**2)\n\t\t\tCornerList[Y, X] = Response\n\n\treturn CornerList\n\ndef Main():\n\tImg = cv2.imread(sys.argv[1])\n\tif Img is None:\n\t\tprint(\"Fail to read image\")\n\t\texit()\n\n\tcv2.imshow(\"OriginalImg\", Img)\n\t\n\tCornerList = HarrisDetector(Img.copy())\n\tImg[CornerList>0.01*CornerList.max()] = [0, 0, 255]\n\t\n\tcv2.imshow(\"Corners\", Img)\n\t\n\tcv2.waitKey()\n\tcv2.destroyAllWindows()\n\nMain()\n","sub_path":"CV/HW4/HarrisCorner.py","file_name":"HarrisCorner.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234365362","text":"def create_dict(T):\r\n # create dictionary with {key:value, ...}\r\n # as {(temp,n):index, (temp,n):index,...}\r\n # where n is from 0-N (to force uniqueness)\r\n res = {}\r\n n = 0\r\n for (i,t) in enumerate(T):\r\n res[(t,n)] = i\r\n n += 1\r\n return res\r\n\r\nfrom collections import OrderedDict\r\ndef sort_temperatures(D):\r\n # sort dictionary based on key (temperature)\r\n res = OrderedDict()\r\n for tuple in sorted(D.keys()):\r\n res[tuple] = D[tuple]\r\n return res\r\n\r\ndef solution(T):\r\n # create dictionary\r\n temperature_dict = create_dict(T)\r\n\r\n # sort dictionary\r\n sorted_temperature_dict = sort_temperatures(temperature_dict)\r\n\r\n # keep track of anchor temp\r\n anchor = list(sorted_temperature_dict.values())[0]\r\n\r\n # initialize length\r\n length = 0\r\n\r\n # increase length and reset anchor if index <= current anchor or == +1\r\n for k,v in sorted_temperature_dict.items():\r\n if v <= anchor or v == anchor + 1:\r\n length += 1\r\n anchor = v\r\n else:\r\n break\r\n return length\r\n\r\nif __name__ == '__main__':\r\n T = [5, -2, 3, 8, 6]\r\n print(solution(T))\r\n T = [-5, -5, -5, -42, 6, 12]\r\n print(solution(T))\r\n\r\n","sub_path":"carbonlighthouse.py","file_name":"carbonlighthouse.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444055365","text":"from __future__ import annotations\nfrom typing import List, Optional, Dict\nfrom itertools import count\nfrom neat.genotype.genome import Genome\nfrom neat.genotype.compatibility import calculate_compatibility_score\n\n\nclass Specie:\n\n def __init__(self, specie_id, generation):\n self.specie_id = specie_id\n self.representative: Optional[Genome] = None\n self.members: List[Genome] = []\n self.created = generation\n self.last_improved = generation\n self.fitness: Optional[int] = None\n self.adjusted_fitness: Optional[int] = None\n self.fitness_history: List[int] = []\n\n def get_all_fitnesses(self):\n return [genome.fitness for genome in self.members]\n\n def update(self, representative, members):\n self.representative = representative\n self.members = members\n\n\nclass SpeciesContainer:\n\n def __init__(self, config: Config):\n self.species: Dict[int, Specie] = {}\n self.specie_indexer = count(1)\n # a lookup from genome to specie based on the\n # genome_indexer and the specie_indexer as the ids\n self.config: Config = config\n\n def assign_specie(self, population: Dict[int, Genome], curr_gen):\n \"\"\"Assign each genome in the population its proper specie\"\"\"\n unassigned = set(population)\n new_representatives: Dict[int, int] = {}\n new_members: Dict[int, List[Genome]] = {}\n\n # assign new representatives for each specie\n for specie_id, specie in self.species.items():\n best_candidate_score = float(\"inf\")\n best_candidate_id = None\n for genome_id in unassigned:\n genome = population[genome_id]\n compatibility_score = calculate_compatibility_score(specie.representative, genome)\n if compatibility_score < best_candidate_score:\n best_candidate_score = compatibility_score\n best_candidate_id = genome_id\n\n new_representatives[specie_id] = best_candidate_id\n new_members[specie_id] = [population[best_candidate_id]]\n unassigned.remove(best_candidate_id)\n\n while unassigned:\n genome_id = unassigned.pop()\n genome = population[genome_id]\n\n best_candidate_score = float(\"inf\")\n best_candidate_id = None\n\n for specie_id, representative_id in new_representatives.items():\n representative = population[representative_id]\n compatibility_score = calculate_compatibility_score(representative, genome)\n if compatibility_score < self.config.species_difference and compatibility_score < best_candidate_score:\n best_candidate_score = compatibility_score\n best_candidate_id = specie_id\n\n # found a specie to assign the genome to\n if best_candidate_id is not None:\n new_members[best_candidate_id].append(genome)\n # Have not found any species to which the genome can be assigned to\n # Create a new specie\n else:\n new_specie_id = next(self.specie_indexer)\n new_representatives[new_specie_id] = genome_id\n new_members[new_specie_id] = [genome]\n\n # Update class instance SpecieCollection\n for specie_id, representative_id in new_representatives.items():\n specie = self.species.get(specie_id)\n\n if specie is None:\n new_specie = Specie(specie_id, curr_gen)\n self.species[specie_id] = new_specie\n\n members = new_members[specie_id]\n self.species[specie_id].update(population[representative_id], members)\n","sub_path":"neat/specie.py","file_name":"specie.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128204887","text":"# count num lines with 'action: ' and make a bucket for each 'user: {user-name}'\n\nusers = {}\n\ndef countUsers():\n with open('../ourRepo.txt') as file:\n for line in file:\n if \"action :\" in line:\n parts = line.split(',')\n for part in parts:\n if \"user :\" in part:\n user = part.split(\"user : \")[1].rstrip()\n if user not in users:\n users[user] = 1\n else:\n users[user] += 1\n\n\n\ncountUsers()\n\nsortedUsers = sorted(users, key=users.get)\nprint(\"The list below identifies how much activity each user had on all issues:\\n\")\n\n\nwith open('uneven-issue-handling.csv', 'w') as file:\n file.write('user, number of issues handled\\n')\n for user in sortedUsers:\n count = str(users[user])\n file.write(user + \", \" + count +'\\n')\n\n #Right now this tool gathers ones that don't have milestones. \n #This is identified as those with a new line behind it. \n\nprint(\"\\nThe person to close an issue is usually the one that fixed it.\")\nprint(\"The following list identifies the number of issues that a user was the last person to handle the issue:\")","sub_path":"features/9-issue-per-milestone/IssueHandler.py","file_name":"IssueHandler.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"504719283","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, render_template, request, abort\n\nfrom .permissions import content_preview_permission\nfrom .models import Activity, Video\n\ncontent_bp = Blueprint(\"activity_content\", __name__)\n\n\n@content_bp.route('/activity')\ndef activity():\n try:\n page = int(request.args.get('page'))\n except Exception as e:\n page = 1\n pagination = Activity.query.filter_by(publish=True).paginate(page, per_page=20)\n weekly_popular_top10 = Activity.weekly_popular_top10()\n return render_template('activity/home.html', activity_list=pagination,\n category_url=category, current_page='activity', weekly_popular_top10=weekly_popular_top10)\n\n\n@content_bp.route('/activity/')\ndef category(category):\n try:\n page = int(request.args.get('page'))\n except Exception as e:\n page = 1\n\n pagination = Activity.query.filter_by(category=category).filter_by(publish=True).paginate(page, per_page=20)\n category_dict = {\n 'policy': u'幼教政策',\n 'news': u'幼教新闻',\n 'events': u'幼教事件',\n 'research': u'理论研究',\n 'activity': u'实践活动'\n }\n category_name = category_dict[category]\n weekly_popular_top10 = Activity.weekly_popular_top10()\n return render_template('activity/home.html', activity_list=pagination, category_name=category_name,\n category_url=category, current_page='activity', weekly_popular_top10=weekly_popular_top10)\n\n\n@content_bp.route('/activity//')\ndef activity_view(id):\n obj = Activity.query.get(id)\n category_dict = {\n 'policy': u'幼教政策',\n 'news': u'幼教新闻',\n 'events': u'幼教事件',\n 'research': u'理论研究',\n 'activity': u'实践活动'\n }\n category_name = category_dict[obj.category]\n return render_template('activity/activity.html', activity=obj, category_name=category_name,\n current_page='activity')\n\n\n@content_bp.route('/school/')\ndef school():\n return render_template('school/home.html', current_page='school',\n Video=Video)\n\n@content_bp.route('/video/')\ndef video_detail(video_id):\n video = Video.query.get_or_404(video_id)\n if not video.publish and not content_preview_permission.can():\n abort(404)\n return render_template('school/video_detail.html', video=video)\n\n\n@content_bp.route('/school//')\ndef school_sub(category):\n if category == 'lecture':\n video_list = Video.query.filter(Video.category==u'优秀讲座').limit(9)\n return render_template('school/sub_node.html', current_page='school',\n video_list=video_list)\n else:\n abort(404)\n\n\n@content_bp.route('/school/teacher/')\ndef school_teacher():\n return render_template('school/teacher_training.html', current_page='school')\n\n\n@content_bp.route('/school/product/')\ndef school_product():\n return render_template('school/product_training.html', current_page='school')\n\n\n@content_bp.route('/school/product/detail/')\ndef school_teacher_detail():\n return render_template('school/video_detail.html', current_page='school')\n\n\n\n\n\n@content_bp.route('/research/home/')\ndef research_home():\n research_events = Activity.query.filter_by(category='research').filter_by(publish=True).limit(7).all()\n research_result = Activity.query.filter_by(category='achievement').filter_by(publish=True).limit(7).all()\n data = {\n 'research_events': research_events,\n 'research_result': research_result\n }\n return render_template('research/home.html', data=data, current_page='research')\n\n\n@content_bp.route('/research///')\ndef research_activity(category, page):\n try:\n page = int(page)\n except ValueError:\n page = 1\n\n pagination = Activity.query.filter_by(category=category).filter_by(publish=True).paginate(page, per_page=20)\n category_dict = {\n 'event': u'教研活动',\n 'achievement': u'教研成果'\n }\n category_name = category_dict[category]\n return render_template('research/research_activity.html', category_name=category_name, activity_list=pagination,\n current_page='research')\n\n\n@content_bp.route('/research/post/')\ndef research_post(id):\n obj = Activity.query.get(id)\n category_dict = {\n 'researchevents': u'教研活动',\n 'researchresult': u'教研成果'\n }\n category_name = category_dict[obj.category]\n return render_template('research/research_activity_content.html', category_name=category_name, activity=obj,\n current_page='research')\n\n\n@content_bp.route('/research/teacher/')\ndef research_teacher():\n return render_template('research/teacher.html', current_page='research')\n\n\n@content_bp.route('/page/about/')\ndef page_about():\n return render_template('pages/about.html')\n\n\n\n","sub_path":"youjiao/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"259984474","text":"#!/usr/bin/env python3\nfrom charm.toolbox.pairinggroup import PairingGroup\nfrom charm.schemes.grpsig.groupsig_bgls04 import ShortSig\nfrom charm.core.engine.util import objectToBytes, bytesToObject\nfrom pwn import remote\nimport sys\nimport os\nimport time\nimport datetime\nimport random\nimport csv\nimport sqlite3\n\nBUILDINGS = {1: \"DerTian\", 2: \"MingDa\", 3: \"XiaoFu\"}\nGS_PROTOCOL = 'ShortSig'\nGROUP = PairingGroup('MNT224')\nRID_MAX = 10 ** 10\n\ndef gettime():\n return time.strftime(\"%Y%m%d%H%M\", time.localtime(time.time()))\n\nclass Oracle:\n def __init__(self):\n self.group = PairingGroup('MNT224')\n self.gs_protocol = eval(GS_PROTOCOL)(self.group)\n self.path = f'parameters/{GS_PROTOCOL.lower()}'\n gpk_path = os.path.join(self.path, 'public/gpk')\n self.gpk = bytesToObject(open(gpk_path, 'rb').read(), self.group)\n\n def is_valid(self, msg):\n return True\n\n def verify(self, msg, signature):\n signature = bytesToObject(signature, self.group)\n return self.is_valid(msg) and \\\n self.gs_protocol.verify(self.gpk, msg, signature)\n\nclass School:\n def __init__(self):\n self.oracle = Oracle()\n\n def record(self, msg, signature):\n building, timestamp = msg.split('||')\n rid = random.randrange(RID_MAX)\n new_record = f'{rid}, {building}, {timestamp}, {signature}\\n'\n #SQL\n if not os.path.exists('database.db'):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n c.execute('''CREATE TABLE datas (Rid,Building,Timestamp,Signature)''')\n else:\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n c.execute(f\"INSERT INTO datas VALUES ({rid},'{building}','{timestamp}','{signature}')\")\n conn.commit()\n conn.close()\n \n '''\n if not os.path.exists('database.csv'):\n header = 'rid, building, timestamp, signature\\n'\n open('database.csv', 'w').write(header)\n open('database.csv', 'a').write(new_record)\n '''\n\n def verify(self, msg_sig):\n msg, signature = msg_sig.split(',')\n if self.oracle.verify(msg, signature):\n self.record(msg, signature)\n return True\n else:\n return False\n ''' \n def read_database(self):\n return csv.reader(open('database.csv','r', newline=''))\n '''\n def send_data_to_cdc(self):\n current_time = gettime()\n current_time = f'{current_time[:4]}-{current_time[4:6]}-{current_time[6:8]}'\n today = datetime.datetime.strptime(current_time,'%Y-%m-%d')\n #today = datetime.date(int(current_time[:4]), current_time[4:6], int(current_time[6:8]))\n \n #database = self.read_database()\n # skip header\n #next(database)\n buf_data = []\n cnt = 0\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n for data in c.execute(\"SELECT * FROM datas ORDER BY Timestamp\"):\n #data[2] = data[2].strip()\n tmp = f'{data[2][:4]}-{data[2][4:6]}-{data[2][6:8]}'\n dataday = datetime.datetime.strptime(tmp,'%Y-%m-%d')\n #dataday = datetime.date(int(data[2][:4]), int(data[2][4:6]), int(data[2][6:8]))\n if (today - dataday).days <= 14 and (today - dataday).days >= 0:\n cnt += 1\n buf_data.append(data)\n print(str(cnt))\n for data in buf_data:\n print(str(data))\n conn.close()\n \n\nif __name__ == '__main__':\n school = School()\n msg_sig = input()\n # msg from cdc\n if msg_sig == \"INFECTED\":\n school.send_data_to_cdc()\n else:\n verdict = school.verify(msg_sig)\n if verdict:\n print('OK')\n else:\n print('NO')\n","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"134459711","text":"\nimport yaml\nimport sys\nimport urllib.request #for generating words \nimport random \nimport unittest\nsys.path.append(\"../\")\nimport praw \n\n\nclass automatedTesting(): \n\n def readOnlylogin(self):\n login_doc='../../credentials.yaml'\n with open(login_doc,'r') as stream:\n credentials=yaml.load(stream)\n self.reddit=None\n self.username=credentials['username']\n self.appName=credentials['user_agent']\n self.subreddit=credentials['subreddit']\n self.password=credentials['password']\n self.personal_use_script=credentials['personal_use_script']\n self.client_secret=credentials['client_secret']\n #If login works \n r = praw.Reddit(client_id=self.personal_use_script,\n client_secret=self.client_secret,\n user_agent=self.appName)\n return r\n\n #Test the no read for praw \n def readOnly(self):\n try: \n r = self.readOnlylogin()\n title = 'Teehee'\n body = 'Teehee'\n subreddit = r.subreddit('comp587testing')\n subreddit.submit(title=title, selftext=body) \n return \"Success\"\n except Exception as e:\n return str(e)\n def testReadOnly(self):\n assert(self.readOnly()==\"USER_REQUIRED: 'Please log in to do that.'\")\n \nif __name__=='__main__' :\n auto=automatedTesting()\n auto.testReadOnly()","sub_path":"pull_request_3/test_submission_parts.py","file_name":"test_submission_parts.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"486204854","text":"import numpy as np\n# from scipy import spatial\nimport scipy.io as sio\nfrom scipy.interpolate import UnivariateSpline\nimport cv2\nimport time\nimport copy\nimport os.path\n\nfrom flyTrackerUtilities import *\nfrom flyTrackerBackground import *\nfrom flyTrackerClustering import *\nimport flyTrackerSettings\n\nimport argparse\n\n# TODO:\n# (1) rotate movie to align speakers, determine pixels->mm\n# (2) Update the background every T seconds (add it to a running median)\n\n# DUDI REQUESTS:\n# 1) Run one procedure that tracks only the LED and not the flies over all the movie. Hopefully\n# by choosing a small enough ROI around the center of the LED, we will be able to detect the LED ON-OFF.\n# 2) Run the full tracking starting at another frame - so maybe add a parameter that allows to\n# start tracking later than frame 1.\n# 3) The analyzed.mat is always exactly 28,712K, while the npz can run between 100K to 2M depending,\n# I think, on the movie length.\n# 4)\n\n# https://sites.google.com/site/backgroundsubtraction/overview\n# https://github.com/andrewssobral/bgslibrary\n\ndef fixFlyIdentity(allCenters, allEllipses, allReducedEllipses, allLines):\n # for now, just see if the identity is well-enough matched by the reduced pixel location...\n # we should do a better job later (eg, momentum, check for how close they are, etc)\n\n newCenters, newLabels, oldLabels = matchFlies(allLines[:, 2:4])\n if (newLabels != oldLabels):\n bestCenters = np.zeros(allCenters.shape)\n bestEllipses = np.zeros(allEllipses.shape)\n bestReducedEllipses = np.zeros(allReducedEllipses.shape)\n bestLines = np.zeros(allLines.shape)\n\n for ii in range(len(newLabels)):\n bestCenters[oldLabels[ii]] = allCenters[newLabels[ii]]\n bestEllipses[oldLabels[ii]] = allEllipses[newLabels[ii]]\n bestReducedEllipses[oldLabels[ii]] = allReducedEllipses[newLabels[ii]]\n bestLines[oldLabels[ii]] = allLines[newLabels[ii]]\n\n allCenters = copy.copy(bestCenters)\n allEllipses = copy.copy(bestEllipses)\n allReducedEllipses = copy.copy(bestReducedEllipses)\n allLines = copy.copy(bestLines)\n\n return allCenters, allEllipses, allReducedEllipses, allLines;\n\n\ndef checkJumps(allCenters, allEllipses, allReducedEllipses, allLines):\n # return allCenters, allEllipses, allReducedEllipses, allLines\n\n # should I be using reducedBodyEllipse or just normal Ellipses?\n # if velocity for one fly is high enough\n # use the momentum of each fly to guess identity\n if (frameCount <= 5) or (frameCount - flyTrackerSettings.forceMinFrame) <= 5:\n return allCenters, allEllipses, allReducedEllipses, allLines\n\n fdist = findDist(allReducedEllipses, allReducedEllipses)\n fdist[np.eye(nFlies) == 1] = 100000\n momentum = np.mean(np.sqrt(np.sum(np.double(np.diff(saveReducedBodyEllipse[frameCount-5:frameCount,:,0:2],axis=0))**2,axis=-1)),axis=0)\n\n if np.any(np.logical_and(momentum > 13, fdist < 80)):\n saveIdentityFlip[frameCount] = 1\n return allCenters, allEllipses, allReducedEllipses, allLines\n\n\ndef unwrapAngles(inAngles):\n # when angles go from 0-360 or vice versa, unwrap them for interpolation!\n outAngles = np.zeros(inAngles.shape)\n dA = np.diff(inAngles)\n offset = 0;\n outAngles[0] = inAngles[0]\n for ii in range(outAngles.shape[0]-1):\n if (dA[ii] > 95):\n offset -= 360\n elif (dA[ii] < -95):\n offset += 360\n\n outAngles[ii+1] = inAngles[ii+1] + offset\n\n return outAngles\n\n\ndef findAngles(lastAngles,allEllipses,allReducedEllipses,allLines,numPts,numReducedPts):\n momentumLag = 10\n degreeVec = [x*180 for x in range(-2,3)]\n angles = np.zeros(nFlies)\n\n for ii in range(nFlies):\n if (frameCount > momentumLag):\n momentum = np.mean(np.diff(saveAngle[int(frameCount-momentumLag):int(frameCount),ii]))\n else:\n momentum = 0\n\n dirGuess = saveAngle[frameCount-1,ii] + momentum\n # dirGuess = saveAngle[frameCount-1][ii];\n\n # choose the best of our options\n\n lineAngle = np.rad2deg(np.arctan(allLines[ii,0]/allLines[ii,1]))+90\n newDir = degreeVec + lineAngle\n dirDiff = abs(newDir - dirGuess) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n dirDiff1 = dirDiff[choice[0][0]]\n angle1 = (lineAngle + degreeVec[choice[0][0]])\n\n newDir = degreeVec + allEllipses[ii,4]\n dirDiff = abs(newDir - dirGuess) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n dirDiff[choice[0][0]]\n (allEllipses[ii,4] + degreeVec[choice[0][0]])\n\n newDir = degreeVec + allReducedEllipses[ii,4]\n dirDiff = abs(newDir - dirGuess) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n dirDiff3 = dirDiff[choice[0][0]]\n angle3 = (allReducedEllipses[ii,4] + degreeVec[choice[0][0]])\n\n # dD = np.array([dirDiff1,dirDiff2,dirDiff3])\n # fangle = np.array([angle1,angle2,angle3])\n dD = np.array([dirDiff1,dirDiff3])\n fangle = np.array([angle1,angle3])\n dD = np.array([dirDiff1])\n fangle = np.array([angle1])\n choice = np.nonzero(dD == np.min(dD))\n\n newDir = degreeVec + fangle[choice[0][0]]\n dirDiff = abs(newDir - saveAngle[frameCount-1,ii]) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n angles[ii] = newDir[choice[0][0]]\n\n return angles % 360\n\n\ndef findAngleFlips():\n\n # smoothLag = 60\n # for ff in range(frameCount):\n # mw = identifyMoonwalkers(ff,smoothLag) # can probably do this more efficiently with a convolution or something\n\n numRotatingFrames = 10\n rotatedThreshold = 90\n # if a fly moves ~ 90 degrees in a couple frames, REALLY examine that point\n for ff in range(nFlies):\n scarypts = np.where(np.abs(movingsum(np.diff(saveAngle[0:frameCount+1,ff],axis=0),numRotatingFrames)) > rotatedThreshold)[0]\n scarypts = scarypts[scarypts != 0]\n scarypts = np.append(np.zeros(1),scarypts)\n scarypts = np.append(scarypts,frameCount)\n\n scarystart = np.where(np.diff(scarypts) > numRotatingFrames/2)[0]+1\n scaryend = np.where(np.diff(scarypts) > numRotatingFrames/2)[0]\n\n if (any(scarypts!=0)):\n for cc in range(len(scarystart)):\n if identifyMoonwalkers(round((scarypts[scaryend[cc]]+scarypts[scarystart[cc]])/2),scarypts[scarystart[cc]]-scarypts[scaryend[cc]])[ff]:\n flipFlies(ff,scarypts[scaryend[cc]],scarypts[scarystart[cc]])\n # fixRotation(ff,scarypts[scarystart[cc-1]],scarypts[scaryend[cc]])\n\n\ndef findCrazyRotations():\n numRotatingFrames = 10\n rotatedThreshold = 90\n # if a fly moves ~ 90 degrees in a couple frames, REALLY examine that point\n for ff in range(nFlies):\n scarypts = np.where(np.abs(movingsum(np.diff(saveAngle[0:frameCount+1,ff],axis=0),numRotatingFrames)) > rotatedThreshold)[0]\n scarypts = scarypts[scarypts != 0]\n scarypts = np.append(np.zeros(1),scarypts)\n scarypts = np.append(scarypts,frameCount)\n\n scarystart = np.where(np.diff(scarypts) > numRotatingFrames/2)[0]+1\n scaryend = np.where(np.diff(scarypts) > numRotatingFrames/2)[0]\n\n if (any(scarypts!=0)):\n for cc in range(len(scarystart)):\n if identifyMoonwalkers(round((scarypts[scaryend[cc]]+scarypts[scarystart[cc]])/2),scarypts[scarystart[cc]]-scarypts[scaryend[cc]])[ff]:\n flipFlies(ff,scarypts[scaryend[cc]],scarypts[scarystart[cc]])\n\n\ndef fixRotation(flyNum, rotationStart, rotationEnd):\n firstAngle = saveAngle[max(rotationStart-5,1):rotationStart,flyNum] % 360\n firstAngle[firstAngle > 180] = firstAngle[firstAngle > 180] - 360\n lastAngle = saveAngle[rotationEnd:min(rotationEnd+5,frameCount),flyNum]% 360\n lastAngle[lastAngle > 180] = lastAngle[lastAngle > 180] - 360\n\n firstAngle = np.mean(firstAngle)\n lastAngle = np.mean(lastAngle)\n\n dist1 = abs(lastAngle - firstAngle)\n dist2 = abs(lastAngle - (firstAngle + 360))\n dist3 = abs(lastAngle+360 - firstAngle)\n\n if (dist1 <= dist2 and dist1 <= dist3):\n print('rotating angle from ' + str(firstAngle) + ' to ' + str(lastAngle) + ' starting at frame ' + str(rotationStart))\n saveAngle[rotationStart:rotationEnd,flyNum] = np.linspace(firstAngle,lastAngle,rotationEnd-rotationStart)\n elif (dist2 <= dist1 and dist2 <= dist3):\n firstAngle += 360\n print('rotating angle from ' + str(firstAngle) + ' to ' + str(lastAngle) + ' starting at frame ' + str(rotationStart))\n saveAngle[rotationStart:rotationEnd,flyNum] = np.linspace(firstAngle,lastAngle,rotationEnd-rotationStart)\n else:\n lastAngle += 360\n print('rotating angle from ' + str(firstAngle) + ' to ' + str(lastAngle) + ' starting at frame ' + str(rotationStart))\n saveAngle[rotationStart:rotationEnd,flyNum] = np.linspace(firstAngle,lastAngle,rotationEnd-rotationStart)\n\n\ndef guessMalesAndFemales(useframe):\n if (frameCount > 10):\n corr = np.zeros( (nFlies, nFlies,int(np.floor(useframe)+1 - max(np.floor(useframe)-1000,1))) ) + 100\n ismale = np.zeros(nFlies)\n for ii in range(nFlies):\n for jj in range(nFlies):\n if ii == jj:\n continue\n\n useframe = np.floor(useframe).astype(np.intp)\n # corr[ii,jj] = np.corrcoef(saveAngle[max(useframe-1000,0):useframe+1,ii],saveAngle[max(useframe-1000+30,0):useframe+30+1,jj])[0,1]\n flyAngles = np.rad2deg(np.arctan2(((saveCenters[max(useframe-1000,1):useframe+1,ii,0])-(saveCenters[max(useframe-1000,1):useframe+1,jj,0])), \\\n ((saveCenters[max(useframe-1000,1):useframe+1,ii,1])-(saveCenters[max(useframe-1000,1):useframe+1,jj,1]))))\n corr[ii,jj,:] = (flyAngles - saveAngle[max(useframe-1000,1):useframe+1,ii] + 90) % 360\n corr[ii,jj,corr[ii,jj,:] > 180] = 360 - corr[ii,jj,corr[ii,jj,:] > 180]\n\n tmp = np.min(corr[ii],axis=0)\n ismale[ii] = np.mean(tmp)\n else:\n ismale = np.zeros(nFlies)\n\n return ismale\n\n\ndef computeMovement(currFrame=None):\n if currFrame is None:\n currFrame = frameCount\n\n currFrame = int(currFrame)\n FV = np.zeros(nFlies)\n LV = np.zeros(nFlies)\n RV = np.zeros(nFlies)\n\n np.zeros(nFlies)\n if (currFrame > 0):\n for ii in range(nFlies):\n # make sure I'm not off by one..\n DV = np.squeeze(np.diff(saveCenters[(currFrame-1):(currFrame+1),ii,:],axis=0))\n mvAngle = np.arctan2(DV[0],DV[1])\n TV = np.sqrt(np.sum(np.float32(DV[:])**2))\n\n FV[ii] = np.sum(TV * np.cos(np.deg2rad(saveAngle[currFrame,ii]+90 % 360)-mvAngle))\n LV[ii] = np.sum(TV * np.sin(np.deg2rad(saveAngle[currFrame,ii]+90 % 360)-mvAngle))\n\n # oldAngle = saveAngle[currFrame-1,ii]%360 if saveAngle[currFrame-1,ii]%360 < 180 else saveAngle[currFrame-1,ii]%360 - 360\n # newAngle = saveAngle[currFrame,ii]%360 if saveAngle[currFrame,ii]%360 < 180 else saveAngle[currFrame,ii]%360 - 360\n # RV = newAngle - oldAngle % 360\n\n RV[ii] = saveAngle[currFrame,ii] - saveAngle[currFrame-1,ii]\n if (RV[ii] > 180):\n RV[ii] -= 360\n elif (RV[ii] < -180):\n RV[ii] += 360\n\n return FV,LV,RV\n\n\ndef identifyMoonwalkers(targetFrame,numFrameSmoothing):\n moonwalk = np.mean(saveFV[int(round(targetFrame-numFrameSmoothing/2)):int(round(targetFrame+numFrameSmoothing/2+1)),:],axis=0) < 0;\n\n return moonwalk\n\ndef identifyMoonwalkersForAllTime(numFrameSmoothing):\n moonwalk = np.zeros((nFlies,frameCount-numFrameSmoothing))\n weights = np.repeat(1.0, numFrameSmoothing)/numFrameSmoothing\n\n for ii in range(nFlies):\n moonwalk[ii] = np.convolve(saveFV[0:frameCount+1], weights, 'valid') > 0\n\n return moonwalk\n\n\ndef flipFlies(flyNum,startOrientation,endOrientation):\n print('flipping fly ' + str(flyNum) + ' between ' + str(startOrientation) + ' and ' + str(endOrientation))\n saveFV[int(startOrientation):int(endOrientation)+1,int(flyNum)] *= -1;\n saveLV[int(startOrientation):int(endOrientation)+1,int(flyNum)] *= -1;\n saveAngle[int(startOrientation):int(endOrientation)+1,int(flyNum)] += 180;\n\n\ndef orientFlies():\n saveFV[0:startFrame+1, :] = 0\n saveLV[0:startFrame+1, :] = 0\n saveRV[0:startFrame+1, :] = 0\n for ii in range(nFlies):\n if np.mean(saveFV[startFrame:frameCount, ii]) < 0:\n print('oh dear, fly #' + str(ii) + ' needs some guidance!')\n saveFV[:, ii] *= -1\n saveLV[:, ii] *= -1\n saveAngle[0:frameCount + 1, ii] += 180\n else:\n print('fly #' + str(ii) + ' is heading in the right direction in life!')\n\n\ndef angleCost(currAngle,nextAngle):\n degreeVec = [x*180 for x in range(-2,3)];\n np.zeros(nFlies)\n\n newDir = degreeVec + nextAngle\n dirDiff = abs(newDir - currAngle) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n dirDiff1 = dirDiff[choice[0][0]]\n angle1 = (nextAngle + degreeVec[choice[0][0]])\n\n return dirDiff1, angle1\n\n\ndef closestAngle(currAngle,nextAngle):\n degreeVec = [x * 180 for x in range(-2, 3)]\n # angles = np.zeros(nFlies)\n\n newDir = degreeVec + nextAngle\n dirDiff = abs(newDir - currAngle) % 360\n dirDiff[dirDiff > 180] = 360 - dirDiff[dirDiff > 180]\n choice = np.nonzero(dirDiff == np.min(dirDiff))\n # dirDiff1 = dirDiff[choice[0][0]]\n angle1 = (nextAngle + degreeVec[choice[0][0]])\n\n return angle1\n\n\ndef searchFlyTree(currFrame,depth,lastAngle,flyNum):\n # minimize change in direction (maximize smoothness of angular velocity) as well as maximizing time spent moving forward (minimize LS)\n if depth == 0:\n lineAngle = np.rad2deg(np.arctan(saveFlyLines[currFrame,flyNum,0]/saveFlyLines[currFrame,flyNum,1]))+90\n # print('AT THE BOTTOM! DEPTH 0!! ' + str(lastAngle) + ',' + str(lineAngle) + ', ' + str(saveBodyEllipse[currFrame,flyNum,4]) + ', ' + str(saveReducedBodyEllipse[currFrame,flyNum,4]))\n ellipseCost, ellipseAngle = angleCost(lastAngle,saveBodyEllipse[currFrame,flyNum,4])\n redCost, redAngle = angleCost(lastAngle,saveReducedBodyEllipse[currFrame,flyNum,4])\n lineCost, lineAngle = angleCost(lastAngle,lineAngle)\n # print('returning from the bottom')\n\n if lineCost <= redCost and lineCost <= ellipseCost:\n return lineAngle, lineCost\n elif redCost <= ellipseCost:\n return redAngle, redCost\n else:\n return ellipseAngle, ellipseCost\n\n if depth > 0:\n lineAngle = np.rad2deg(np.arctan(saveFlyLines[currFrame,flyNum,0]/saveFlyLines[currFrame,flyNum,1]))+90\n\n lineSearchAngles, lineSearchCost = searchFlyTree(currFrame+1,depth-1,lineAngle,flyNum)\n lineCost, lineAngle = angleCost(lastAngle,lineAngle)\n\n ellipseSearchAngles, ellipseSearchCost = searchFlyTree(currFrame+1,depth-1,saveBodyEllipse[currFrame,flyNum,4],flyNum)\n ellipseCost, ellipseAngle = angleCost(lastAngle,saveBodyEllipse[currFrame,flyNum,4])\n\n redSearchAngles, redSearchCost = searchFlyTree(currFrame+1,depth-1,saveReducedBodyEllipse[currFrame,flyNum,4],flyNum)\n redCost, redAngle = angleCost(lastAngle,saveReducedBodyEllipse[currFrame,flyNum,4])\n\n if lineCost+lineSearchCost <= redCost+redSearchCost and lineCost+lineSearchCost <= ellipseCost+ellipseSearchCost:\n return np.append(lineSearchAngles,lineAngle), lineCost+lineSearchCost\n elif redCost+redSearchCost <= ellipseCost+ellipseSearchCost:\n return np.append(redSearchAngles,redAngle), redCost+redSearchCost\n else:\n return np.append(ellipseSearchAngles,ellipseAngle), ellipseCost+ellipseSearchCost\n\n\ndef computeInterflyParameters():\n # angles between flies\n # velocities in the direction of other flies\n pass\n\n\ndef guessPersonalIdentityIssues():\n # find all frames where (1) centroids are very close and (2) following behavior is different before and after these time points\n minWorryingDist = 3;\n\n for ii in range(nFlies):\n badDist = np.where(np.logical_and(np.min(saveFlyDistances[:,ii,:],axis=1)/flyTrackerSettings.px2mm < minWorryingDist, saveFlyDistances[:,ii,(ii+1) % nFlies] > 0))[0]\n\n # subdivide into follower/chaser/neither...\n # jump from follower to chaser or vice versa is BAD\n for dd in range(badDist.shape[0]):\n if np.any(saveFlyDistances[badDist[dd],ii,:] == 0):\n g1 = guessMalesAndFemales(badDist[dd])\n g2 = guessMalesAndFemales(min(badDist[dd]+1000,frameCount))\n # print(abs(g1-g2))\n if not np.any(np.isnan(g1)) and not np.any(np.isnan(g2)):\n if np.any(abs(g1-g2) > 15):\n saveIdentityFlip[badDist[dd]] = 1;\n\n\ndef doubleCheckData():\n # make sure: is forward velocity moving forward? Are there sneaky bad tracking points? That sort of thing\n\n print('double-checking flies')\n\n angularThresh = 50\n loopnum = 1\n print(\"fixing major angle changes (+/- \" + str(angularThresh) + ' degrees)')\n for loopInd in range(loopnum):\n for ii in range(nFlies):\n for ff in range(int(frameCount)-1):\n dA = saveAngle[ff+1,ii] - saveAngle[ff,ii]\n if (abs(dA%180) > angularThresh and abs(dA%180) < 180-angularThresh):\n # print('fixing fly ' + str(ii) + ' at point ' + str(ff+1))\n saveAngle[ff+1,ii] -= dA\n if (abs(dA%360) > 180-angularThresh and abs(dA%360) < 180+angularThresh):\n if (saveAngle[ff+1,ii] < 0):\n saveAngle[ff+1,ii] += 180\n else:\n saveAngle[ff+1,ii] -= 180\n\n for ii in range(int(flyTrackerSettings.forceMinFrame),int(frameCount)+1):\n saveFV[ii],saveLV[ii],saveRV[ii] = computeMovement(ii)\n\n orientFlies() # we should also be finding jumps and looking at orientations between each one\n findAngleFlips()\n\n for ii in range(nFlies):\n x = np.linspace(0,frameCount,frameCount+1)\n xs = np.linspace(0,frameCount,frameCount+1) - 0.5\n unAngles = unwrapAngles(saveAngle[0:frameCount+1,ii])\n spline = UnivariateSpline(x,unAngles)\n saveAngle[0:frameCount+1,ii] = spline(xs) % 360\n\n # recomputeMovement()\n for ii in range(int(flyTrackerSettings.forceMinFrame),int(frameCount)+1):\n saveFV[ii],saveLV[ii],saveRV[ii] = computeMovement(ii)\n\n saveMFGuess[np.floor(frameCount/flyTrackerSettings.saveRate).astype(np.intp),0:nFlies] = guessMalesAndFemales(frameCount)\n saveMFGuess[np.floor(frameCount/flyTrackerSettings.saveRate).astype(np.intp), nFlies] = frameCount\n\n guessPersonalIdentityIssues()\n\n\ndef savedata(saveMode):\n arenaCoords = flyTrackerSettings.arenaCoords\n blinkyCoords = flyTrackerSettings.blinkyCoords\n px2mm = flyTrackerSettings.px2mm\n vNum = 1.4\n if flyTrackerSettings.settings==\"playback\":\n saveFileName = movieName[0:len(movieName)-4] + '_' + str(flyTrackerSettings.arenaNumber) + '_tracks.mat'\n else:\n saveFileName = movieName[0:len(movieName)-4] + '_tracks.mat'\n printDB('saving data to ' + saveFileName + '...')\n\n if saveMode==2: # update only for the range of frames\n matDict = sio.loadmat(saveFileName) # TODO: first need to make sure it exists try/except...\n idx = np.arange(flyTrackerSettings.forceMinFrame, frameCount+1, dtype=np.intp);\n flyLength = np.mean(saveBodyEllipse[flyTrackerSettings.forceMinFrame:frameCount,:,3],axis=0)\n # import ipdb; ipdb.set_trace()\n # grow array if necessary\n size_diff = max(idx) - matDict['flyLines'].shape[0] \n if size_diff>=0:\n keys = ('flyLines', 'flyEllipses', 'reducedFlyEllipses', 'flyLength', 'pxCenters', 'forwardVelocity','lateralVelocity', 'rotationalVelocity', 'saveFlyDistances', 'angles')\n for key in keys:\n append_shape = list(matDict[key].shape[1:])\n append_shape.insert(0, size_diff+1)\n matDict[key] = np.concatenate((matDict[key], np.zeros(append_shape)), axis=0)\n # update relevant fields\n matDict['flyLines'][idx] = saveFlyLines[idx]\n matDict['blinkState'][0,idx] = blinkState[idx]\n matDict['flyEllipses'][idx] = saveBodyEllipse[idx]\n matDict['reducedFlyEllipses'] = saveReducedBodyEllipse[idx]\n matDict['flyLength'] = flyLength/px2mm\n matDict['pxCenters'][idx] = saveCenters[idx]\n matDict['forwardVelocity'][idx] = saveFV[idx]*fps/px2mm\n matDict['lateralVelocity'][idx] = saveLV[idx]*fps/px2mm\n matDict['rotationalVelocity'][idx] = saveRV[idx]*fps\n matDict['saveFlyDistances'][idx] = saveFlyDistances[idx]/px2mm\n matDict['angles'][idx] = saveAngle[idx]\n print(' updating tracks in frames {0} to {1} '.format(flyTrackerSettings.forceMinFrame, frameCount))\n printDB(' updating tracks in frames {0} to {1} '.format(flyTrackerSettings.forceMinFrame, frameCount))\n doubleCheckData()\n elif saveMode==3: # update LED state only\n matDict = sio.loadmat(saveFileName) # TODO: first need to make sure it exists try/except...\n idx = np.arange(flyTrackerSettings.forceMinFrame, frameCount+1, dtype=np.intp);\n matDict['blinkState'][0,idx] = blinkState[idx]\n print(' updating LED state (blinkState) in frames {0} to {1} '.format(flyTrackerSettings.forceMinFrame, frameCount))\n printDB(' updating LED state (blinkState) in frames {0} to {1} '.format(flyTrackerSettings.forceMinFrame, frameCount))\n else: # overwrite mode\n flyLength = np.mean(saveBodyEllipse[startFrame:frameCount,:,3],axis=0)\n # create dict anew\n matDict = {'blinkState':blinkState,'flyEllipses':saveBodyEllipse[startFrame:frameCount+1], \\\n 'reducedFlyEllipses':saveReducedBodyEllipse[startFrame:frameCount+1],'flyLines':saveFlyLines[startFrame:frameCount+1],'arenaCoords':arenaCoords, \\\n 'blinkyCoords':blinkyCoords,'fps':fps,'version':vNum,'startFrame':startFrame,'finalFrame':frameCount, \\\n 'timestamps':timestamp,'pxCenters':saveCenters[startFrame:frameCount+1],'forwardVelocity':saveFV[startFrame:frameCount+1]*fps/px2mm,\\\n 'lateralVelocity':saveLV[startFrame:frameCount+1]*fps/px2mm,'rotationalVelocity':saveRV[startFrame:frameCount+1]*fps, \\\n 'flyLength':flyLength/px2mm,'saveFlyDistances':saveFlyDistances[startFrame:frameCount+1]/px2mm,'angles':saveAngle[startFrame:frameCount+1], \\\n 'mfGuess':saveMFGuess, 'px2mm':px2mm, 'possibleIdentityFlips':saveIdentityFlip}\n doubleCheckData()\n sio.savemat(saveFileName, matDict)\n\n\n##########################################################################\n######## MAIN FUNCTION ###################################################\n##########################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('path')\nparser.add_argument('--debugText', type=str, help='print debug text')\nparser.add_argument('--reuseBackground', action='store_true', help='reuse background - use with caution')\nparser.add_argument('--saveMode', type=int, default=1, help='0 - no save, 1 - save new (default), 2 - update old tracks if possible, 3 - process and update LED state only')\nparser.add_argument('-d', '--dump', nargs=1, type=str, default='', help='output debug image to DIR')\nparser.add_argument('-f', '--frames', nargs=2, type=int, default=(0, 1000000000000), help='only analyze frames between MIN and MAX... currently this is off by a few frames because .get() is wrong!')\nparser.add_argument('-n', '--nflies', nargs=1, type=int, default=None, help='set the number of flies to be NUM')\nparser.add_argument('-s', '--settings', type=str, default='init', help='name of function to be executed in flyTrackerSettings')\nparser.add_argument('-a', '--arenanumber', type=int, default=1, help='arena number - used to index into arenaCoords')\nparser.add_argument('-b', '--bgoffset', nargs=1, type=float, default=None, help='offset for background subtraction')\nargs = parser.parse_args()\n\nmovieName = findMovieFile(args.path)\n\n# find settingsFile parameter and import the corresponding settings file first\ntry:\n eval(\"flyTrackerSettings.\" + args.settings + \"()\")\n print(\"loaded settings from \" + args.settings)\nexcept:\n print(\"could not execute settings \"+ args.settings +\" - keeping defaults\")\n\nflyTrackerSettings.settings = args.settings;\n\n# override with command line settings\nflyTrackerSettings.debugText = args.debugText\nif len(args.dump)>0:\n flyTrackerSettings.debugImages = True\n flyTrackerSettings.dumpdir = args.dump[0]\n\nforceMaxFrame = args.frames[1]\nflyTrackerSettings.forceMinFrame = args.frames[0]\nflyTrackerSettings.arenaNumber = args.arenanumber\n\nif args.nflies is not None:\n flyTrackerSettings.nFlies = args.nflies\n\nif args.bgoffset is not None:\n flyTrackerSettings.bgOffset = args.bgoffset\n\nif args.reuseBackground:\n flyTrackerSettings.reuseBackground = True\n\nflyTrackerSettings.saveMode = args.saveMode\n\nprint(flyTrackerSettings.nFlies)\n\nif flyTrackerSettings.settings == \"playback\":\n getMovieDataPlayback(movieName)\nelse:\n getMovieData(movieName)\n\nnFlies = flyTrackerSettings.nFlies\nstartFrame = getStartTime(os.path.dirname(movieName))\n\n# !!! this is pure madness!!! \n# should follow the value in StartTrackingFrame.txt!!!\nreachOffset = 60 * 15 # for some reason, the light and/or camera are significantly brighter for awhile\nif (startFrame != 1):\n startFrame = startFrame + reachOffset\n\nflyTrackerSettings.startFrame = startFrame\nmaxFrames = 180 * 60\nbackGround = generateBackground(movieName)\nflyTrackerSettings.backGround = backGround\n\n# microphoneCenters = findMicrophones(movieName)\n\n# READ IN VIDEO\n\nvr = cv2.VideoCapture(movieName)\nfps = vr.get(cv2.CAP_PROP_FPS)\nNumberOfFrames = int(vr.get(cv2.CAP_PROP_FRAME_COUNT) + 1000)\nflyTrackerSettings.NumberOfFrames = NumberOfFrames\n\nif flyTrackerSettings.settings == \"playback\":\n flyTrackerSettings.px2mm = (flyTrackerSettings.arenaCoords[3] - flyTrackerSettings.arenaCoords[1]) / 50\nelse:\n flyTrackerSettings.px2mm = flyTrackerSettings.radius * 2. / 35.6\n\nprintDB('px2mm: ' + str(flyTrackerSettings.px2mm))\n\n# SET UP DATA STRUCTURES\ncenters = np.zeros((nFlies, 2), np.float32)\nthreshState = np.zeros(NumberOfFrames)\noldCenters = centers\nallCenters = centers\n\n# ew...this should really all be reformated as a structure/class/etc\nsaveBodyEllipse = np.zeros((NumberOfFrames, nFlies, 5), np.float16)\nsaveReducedBodyEllipse = np.zeros((NumberOfFrames, nFlies, 5), np.float16)\nsaveCenters = np.zeros((NumberOfFrames, nFlies, 2), np.float16)\nsaveFlyLines = np.zeros((NumberOfFrames, nFlies, 4), np.float16)\nsaveAngle = np.zeros((NumberOfFrames, nFlies), np.float16)\nsaveFlyDistances = np.zeros((NumberOfFrames, nFlies, nFlies))\nsaveFV = np.zeros((NumberOfFrames, nFlies), np.float16)\nsaveLV = np.zeros((NumberOfFrames, nFlies), np.float16)\nsaveRV = np.zeros((NumberOfFrames, nFlies), np.float16)\nsaveWingExtension = np.zeros((NumberOfFrames, nFlies), np.float16)\nsaveIdentityFlip = np.zeros((NumberOfFrames), np.float16)\n\nreanalysisFlags = np.zeros((NumberOfFrames))\n\nsaveMFGuess = np.zeros((int(np.ceil(NumberOfFrames / flyTrackerSettings.saveRate)) + 1, nFlies + 1)) # added +1 to axis 0 so it won't fail for last chunk\n\nallEllipses = np.float32((nFlies, 5))\n\ncolors = np.zeros((1, nFlies, 3), np.uint8)\ncolors[0, :] = 255\ncolors[0, :, 0] = np.arange(0, 180, 180.0 / nFlies)\ncolors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0]\ncolors = [map(int, thisColor) for thisColor in colors]\n\nblinkState = np.zeros(NumberOfFrames, np.int32)\n\nif flyTrackerSettings.forceMinFrame > 0:\n vr.set(cv2.CAP_PROP_POS_FRAMES, flyTrackerSettings.forceMinFrame - 1)\n\nflyTrackerSettings.init()\n\nnewLabels = range(nFlies)\nt = time.time()\noldTS = 0\nret = True\nwhile(ret):\n # try:\n t = time.time()\n # process the fly movement\n ret, frame = vr.read()\n if not ret:\n continue\n\n frameCount = int(vr.get(cv2.CAP_PROP_POS_FRAMES) - 1)\n if frameCount % 100 == 0:\n print('tracked frame {0:d}/{1:d}'.format(int(frameCount), int(flyTrackerSettings.NumberOfFrames)))\n # process the blinky light - but only if it was selected during annotation\n if not flyTrackerSettings.blinkyCoords == (0, 0, 0, 0):\n blinkState[frameCount] = detectBlinkState(frame)\n\n if (frameCount >= startFrame) and not flyTrackerSettings.saveMode == 3:\n timestamp = vr.get(cv2.CAP_PROP_POS_MSEC)\n printDB(\"frame length: \" + str(timestamp - oldTS))\n oldTS = timestamp\n success = 0\n # updateBackground()\n foreGround = getForeGround(frame)\n flyTrackerSettings.oldCenters = allCenters\n currThresh = 0\n\n # TODO need to catch that error more intelligently\n # try:\n allCenters, allEllipses, allReducedEllipses, allLines, success, flyFlags = clusterFlies2(foreGround, frameCount, currThresh)\n # except:\n # print(\"dooh happend at {0}!\".format( int(frameCount)))\n\n printDB('{0} clustered!'.format(int(frameCount)))\n allCenters, allEllipses, allReducedEllipses, allLines = fixFlyIdentity(allCenters, allEllipses, allReducedEllipses, allLines)\n\n printDB('checking for jumps...')\n allCenters, allEllipses, allReducedEllipses, allLines = checkJumps(allCenters, allEllipses, allReducedEllipses, allLines)\n\n saveBodyEllipse[frameCount] = allEllipses\n saveReducedBodyEllipse[frameCount] = allReducedEllipses\n saveCenters[frameCount] = allCenters\n saveFlyLines[frameCount] = allLines\n threshState[frameCount] = success\n saveFlyDistances[frameCount] = findDist(allCenters, allCenters)\n\n numPts = 0\n numReducedPts = 0\n printDB('finding angles...')\n saveAngle[frameCount] = findAngles(saveAngle, allEllipses, allReducedEllipses, allLines, numPts, numReducedPts)\n\n printDB('computing movement...')\n saveFV[frameCount], saveLV[frameCount], saveRV[frameCount] = computeMovement()\n\n if frameCount > forceMaxFrame:\n ret = False\n\n if (frameCount >= startFrame) and (int(frameCount) % flyTrackerSettings.saveRate == 0) and (flyTrackerSettings.saveMode > 0):\n savedata(flyTrackerSettings.saveMode)\n\nprint(1000 * (time.time() - t))\n\nvr.release()\nif (frameCount >= startFrame) and (flyTrackerSettings.saveMode > 0):\n savedata(flyTrackerSettings.saveMode)\nelse:\n print(\"Quitting without saving.\")\n","sub_path":"trackFlies.py","file_name":"trackFlies.py","file_ext":"py","file_size_in_byte":31008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355192612","text":"from twisted.internet.protocol import ClientFactory, Protocol\nfrom twisted.internet import reactor\nfrom OuchServer.ouch_messages import OuchClientMessages\nfrom random import randrange\nimport struct\nimport numpy as np\nimport random as rand\nimport math\n\nclass RandomTrader():\n\n def __init__(self, client, V = 100, lmbda=50, mean=0, std=0.2):\n self.client = client\n\n self.V = V\n self.lmbda = lmbda\n self.mean = mean\n self.std = std\n\n waitingTime, priceDelta, buyOrSell = self.generateNextOrder()\n reactor.callLater(waitingTime, self.sendOrder, priceDelta, buyOrSell)\n\n def set_underlying_value(self, V): \n self.V = V \n\n def generateNextOrder(self):\n waitingTime = -(1/self.lmbda)*math.log(rand.random()/self.lmbda)\n priceDelta = np.random.normal(self.mean, self.std)\n print(\"PRICE_DELTA: \", priceDelta)\n randomSeed = rand.random()\n if (randomSeed > .5):\n buyOrSell = b'B'\n else:\n buyOrSell = b'S'\n return waitingTime, priceDelta, buyOrSell\n\n def sendOrder(self, priceDelta, buyOrSell):\n price = self.V + priceDelta\n\n order = OuchClientMessages.EnterOrder(\n order_token='{:014d}'.format(0).encode('ascii'),\n buy_sell_indicator=buyOrSell,\n shares=1,\n stock=b'AMAZGOOG',\n price=int(price * 10000),\n time_in_force=4,\n firm=b'OUCH',\n display=b'N',\n capacity=b'O',\n intermarket_sweep_eligibility=b'N',\n minimum_quantity=1,\n cross_type=b'N',\n customer_type=b' ')\n self.client.transport.write(bytes(order))\n\n waitingTime, priceDelta, buyOrSell = self.generateNextOrder()\n reactor.callLater(waitingTime, self.sendOrder, priceDelta, buyOrSell)\n\n","sub_path":"exchange_server_cs/RandomTrader.py","file_name":"RandomTrader.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"596541198","text":"\"\"\"Unit tests for the Metric model class.\"\"\"\n\nimport unittest\n\nfrom model.metric import Metric\n\n\nclass MetricStatusTest(unittest.TestCase):\n \"\"\"Unit tests for determining the metric status, given a measurement value.\"\"\"\n\n def setUp(self):\n \"\"\"Override to set up the data model.\"\"\"\n self.data_model = dict(metrics=dict(metric_type=dict(direction=\"<\")))\n\n def test_green(self):\n \"\"\"Test a green measurement.\"\"\"\n metric = Metric(self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"15\"))\n self.assertEqual(\"target_met\", metric.status(\"10\"))\n\n def test_yellow(self):\n \"\"\"Test a yellow measurement.\"\"\"\n metric = Metric(self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"25\"))\n self.assertEqual(\"near_target_met\", metric.status(\"22\"))\n\n def test_red(self):\n \"\"\"Test a red measurement.\"\"\"\n metric = Metric(self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"25\"))\n self.assertEqual(\"target_not_met\", metric.status(\"30\"))\n\n def test_debt_met(self):\n \"\"\"Test a measurement better than the accepted debt.\"\"\"\n metric = Metric(\n self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"25\", debt_target=\"30\", accept_debt=True)\n )\n self.assertEqual(\"debt_target_met\", metric.status(\"30\"))\n\n def test_debt_not_met(self):\n \"\"\"Test a measurement worse than the accepted debt.\"\"\"\n metric = Metric(\n self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"25\", debt_target=\"30\", accept_debt=True)\n )\n self.assertEqual(\"target_not_met\", metric.status(\"35\"))\n\n def test_debt_past_end_date(self):\n \"\"\"Test a measurement with expired debt.\"\"\"\n metric = Metric(\n self.data_model,\n dict(\n type=\"metric_type\",\n target=\"20\",\n near_target=\"25\",\n debt_target=\"30\",\n accept_debt=True,\n debt_end_date=\"2019-06-10\",\n ),\n )\n self.assertEqual(\"target_not_met\", metric.status(\"29\"))\n\n def test_debt_end_date_removed(self):\n \"\"\"Test a measurement with the technical end date reset.\"\"\"\n metric = Metric(\n self.data_model,\n dict(\n type=\"metric_type\", target=\"20\", near_target=\"25\", debt_target=\"30\", accept_debt=True, debt_end_date=\"\"\n ),\n )\n self.assertEqual(\"debt_target_met\", metric.status(\"29\"))\n\n def test_green_with_debt(self):\n \"\"\"Test a measurement with debt, better than the target.\"\"\"\n metric = Metric(\n self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"25\", debt_target=\"30\", accept_debt=True)\n )\n self.assertEqual(\"target_met\", metric.status(\"15\"))\n\n def test_near_target_worse_than_target(self):\n \"\"\"Test that the measurement is red when the near target is worse than the target.\"\"\"\n metric = Metric(self.data_model, dict(type=\"metric_type\", target=\"20\", near_target=\"15\"))\n self.assertEqual(\"target_met\", metric.status(\"17\"))\n","sub_path":"components/server/tests/model/test_metric.py","file_name":"test_metric.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606526732","text":"from leeyzer import solution, timeit, Solution, make_tree, TreeNode\n\n# @Date: 2019/6/11\n# @Author: *** \n# @description:\n\n\nclass Q48_Rotate_Image(Solution):\n @timeit\n @solution\n def Q48_Rotate_Image1(self, matrix):\n \"\"\"\n 先沿正对角线反转,再沿中轴反转\n \"\"\"\n n = len(matrix)\n for i in range(n):\n for j in range(i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = tmp\n\n m = n // 2\n for i in range(n):\n for j in range(m):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[i][2 * m - j]\n matrix[i][2 * m - j] = tmp\n # 实现了原地修改,这里是为了输出return\n return matrix\n \n @timeit\n @solution\n def Q48_Rotate_Image2(self, matrix):\n \n pass \n \n\ndef main():\n q = Q48_Rotate_Image()\n q.add_args([\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ])\n # q.add_args()\n q.test()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"top100_liked/48_Rotate_Image.py","file_name":"48_Rotate_Image.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"471862680","text":"# test\n# random_walk\n# AUTHOR: Maln\n# TIME: 07/03/2017\n\nfrom random import choice\n\n\ndef get_step():\n \"\"\" Decide which direction to go and how far to go in that direction\"\"\"\n direction = choice([1,-1])\n distance = choice([0,1,2,3,4])\n step = direction*distance\n\n return step\n\n\nclass RandomWalk():\n \"\"\"A class to generate random walks\"\"\"\n\n\n def __init__(self, num_points=5000):\n \"\"\"Initialize attributes of a walk.\"\"\"\n self.num_points = num_points\n\n # All walks start at (0,0)\n self.x_values =[0]\n self.y_values =[0]\n\n def fill_walk(self):\n \"\"\"Calculate all the points in the walk.\"\"\"\n\n # Keep taking steps until wallk reaches desired lengthh\n while len(self.x_values) < self.num_points:\n\n # Decide which direction to go and how far to go in that direction\n x_step = get_step()\n y_step = get_step()\n\n # Reject moves that go nowhere.\n if x_step==0 and y_step==0:\n continue\n\n # Calculate next x and y values.\n next_x = self.x_values[-1]+x_step\n next_y = self.y_values[-1]+y_step\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)\n","sub_path":"test/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367860349","text":"# -*- coding:utf-8 -*-\n'''\n * 语义距离\n'''\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom jpype import *\n\nstartJVM(getDefaultJVMPath(),\"-Djava.class.path=E:\\PythonWorkSpace\\hanlp-1.2.8-release\\hanlp-1.2.8.jar;E:\\PythonWorkSpace\\hanlp-1.2.8-release\", \"-Xms512m\", \"-Xmx512m\")#Xms1g,Xmx1g,指定内存\nHanLP = JClass('com.hankcs.hanlp.dictionary.CoreSynonymDictionary')\nf1_list=[]\nf2_list=[]\nf1 = open('training set/wuliusudu_Z.txt','r')#检验级文件名\nfor i in f1.readlines():\n f1_list.append(i.strip())\nf2 = open('n_exam','r')#待分类文件名\nfor i in f2.readlines():\n f2_list.append(i.strip())\n\n\n\na=\"\"\nb=\"\"\nresults_yanse=\"\"\nfor a in f1_list:\n for b in f2_list:\n if HanLP.similarity(a,b)>=0.99:#语义相似度大于0.99\n if HanLP.distance(a, b)<=100000:#语义距离小于100000\n results_yanse+=(\"\\n\")+str(b)#转行添加\n print(a + \"\\t\" + b + \"\\t的相似度是\\t\" +str(HanLP.similarity(a,b))+\"\\t的词义距离是\\t\"+str(HanLP.distance(a, b)))\n # HanLP.Dictionary.CoreSynonymDictionary.distance(a, b))\n # print(a + \"\\t\" + b + \"\\t的相似度是\\t\" +str(HanLP.similarity(a,b)))\n\nf3 = open('training set/wuliusudu_Z.txt','a')#在检验级文本后追加符合相似度大于0.99,距离相遇100000的词语\nf3.write(results_yanse)\n\nf1.close()\nf2.close()\nf3.close()\n","sub_path":"SocialListening/KNN/n/knn/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491759397","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom pocketapp.models import Log, User, User2, Saveitem, Chat\nfrom xml.etree import ElementTree as ET\nimport urllib, urllib2, httplib, cookielib, socket, hashlib, time, re, json\n\nWECHAT_TOKEN = 'rulethebattlefield'\nWECHAT_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token'\nWECHAT_APPID = 'wxff2b22e63886a792'\nWECHAT_APPSECRET = '6ffc44556e96897d79efb66494e283da'\nPOCKET = {\n\t'consumer_key': '13263-bd933b638f056bc0ec5de525',\n\t'consumer_key_new': '17299-70dfe8151741c2d67b41ee3b',\n\t'redirect_uri': 'http://pocket.sinaapp.com/authorizationFinished',\n\t'get_token_uri': 'https://getpocket.com/v3/oauth/request',\n\t'request_token': '',\n\t'authorization_uri': 'https://getpocket.com/auth/authorize?request_token=%s&redirect_uri=%s',\n\t'access_token_uri': 'https://getpocket.com/v3/oauth/authorize',\n\t'signup_uri': 'http://getpocket.com/signup',\n\t'pic_authorization': 'http://pocket.sinaapp.com/statics/640.jpg', # 640x320\n\t'pic_signup': 'http://pocket.sinaapp.com/statics/80.png', # 80x80\n\t'add_uri': 'https://getpocket.com/v3/add',\n\t'get_uri': 'https://getpocket.com/v3/get'\n}\n\n\ndef wechat(request):\n\tglobal WECHAT_TOKEN, POCKET\n\tparams = request.GET\n\tmixedstr = ''.join(sorted((WECHAT_TOKEN, params.get('timestamp', ''), params.get('nonce', ''))))\n\tmixedstr = hashlib.sha1(mixedstr).hexdigest()\n\tif mixedstr == params.get('signature', ''):\n\t\tif params.has_key('echostr'):\n\t\t\tmsg = params.get('echostr', '')\n\t\telse:\n\t\t\treply = {\n\t\t\t\t'text': '''\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t%s\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t0\n\t\t\t\t\t''',\n\t\t\t\t'music': '''\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t%s\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t<![CDATA[TITLE]]>\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t0\n\t\t\t\t\t''',\n\t\t\t\t'news': '''\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t%s\n\t\t\t\t\t\t\n\t\t\t\t\t\t%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\t1\n\t\t\t\t\t''',\n\t\t\t\t'item': '''\n\t\t\t\t\t\t<![CDATA[%s]]> \n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t'''\n\t\t\t}\n\t\t\tif request.raw_post_data:\n\t\t\t\txml = ET.fromstring(request.raw_post_data)\n\t\t\t\txml_dict = {}\n\t\t\t\tfor child in xml: \n\t\t\t\t\txml_dict[child.tag] = child.text\n\t\t\t\tmsgid = xml_dict.get('MsgId', '')\n\t\t\t\tevent = xml_dict.get('Event', '')\n\t\t\t\tmsgtype = xml_dict.get('MsgType', '')\n\t\t\t\tcontent = xml_dict.get('Content', '')\n\t\t\t\tfromUserName = xml_dict.get('ToUserName', '')\n\t\t\t\ttoUserName = xml_dict.get('FromUserName', '')\n\t\t\t\ttitle = xml_dict.get('Title', '')\n\t\t\t\tdescription = xml_dict.get('Description', '')\n\t\t\t\turl = xml_dict.get('Url', '')\n\t\t\t\tpostTime = str(int(time.time()))\n\t\t\t\tuid = hashlib.sha1(toUserName).hexdigest()\n\t\t\t\teventkey = xml_dict.get('EventKey', '')\n\n\t\t\t\tdef reply_text(text):\n\t\t\t\t\treturn reply['text'] % (toUserName, fromUserName, postTime, text)\n\n\t\t\t\tif event == 'subscribe' or content == 'Hello2BizUser':\n\t\t\t\t\tmsg = reply_text('欢迎来到“我的Pocket”, 点击下方菜单或回复“a”绑定Pocket账号。由于国内访问Pocket速度较慢,如无回应请重新发送。\\r\\n回复“h”或“?”获取帮助。')\n\t\t\t\telif content == 'a' or content == 'A' or content == 'auth' or eventkey == 'V1001_ACCOUNT_AUTH':\n\t\t\t\t\t# return HttpResponse(reply_text(get_request_token().replace('code=','')))\n\t\t\t\t\tauthorization_uri = POCKET['authorization_uri'] % (get_request_token(uid, request), POCKET['redirect_uri'] + '?uid=' + uid)\n\t\t\t\t\titem = reply['item'] % ('点击绑定Pocket账号,如果你已有Pocket帐号,请进入后点击右上角的Login', '', POCKET['pic_authorization'], authorization_uri)\n\t\t\t\t\t# item += reply['item'] % ('如果你还没有Pocket账号,点击这里注册', '', POCKET['pic_signup'], POCKET['signup_uri'])\n\t\t\t\t\tmsg = reply['news'] % (toUserName, fromUserName, postTime, '1', item)\n\t\t\t\telif content == 'h' or content == 'H' or content == 'help' or content == '?' or content == '?' or eventkey == 'V1001_HELP':\n\t\t\t\t\tuser_item = User.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tuser_item2 = User2.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tif user_item.count() == 0 or user_item2.count() == 0:\n\t\t\t\t\t\tmsg = reply_text('使用帮助:\\r\\n点击下方菜单或回复“a”绑定Pocket账号。由于国内访问Pocket速度较慢,如无回应请重新发送。\\r\\n\\r\\n有问题或者建议欢迎留言或者发送邮件到devange@live.com,感谢你的支持!')\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = reply_text('你授权的Pocket账号为:%s\\r\\n\\r\\n使用步骤:\\r\\n1、打开你需要保存的内容,可以是微信公众账号推送或者微信好友发来的文章或链接\\r\\n2、点击右上角的“...”或者“转发”按钮\\r\\n3、在弹出的窗口中点击“复制链接”\\r\\n4、返回消息列表,进入“我的Pocket”公众账号\\r\\n5、长按消息输入框粘贴链接发送给本账号\\r\\n6、保存成功!你可以稍后打开Pocket阅读了!\\r\\n\\r\\n使用帮助:\\r\\n- 如果发送后无响应,可能是国内访问Pocket速度比较慢,请重新发送。\\r\\n- 如果提示“保存失败”,请确认链接的有效性并重试。\\r\\n- 如果多次重试不行,有可能是账号授权过期,请回复“a”重新绑定Pocket账号。\\r\\n\\r\\n有问题或者建议欢迎留言或者发送邮件到devange@live.com,感谢你的支持!' % user_item2[user_item2.count() - 1].pocket_user.encode('utf-8'))\n\t\t\t\telif eventkey == 'V1001_SAVED_RECENTLY_5' or eventkey == 'V1001_SAVED_RECENTLY_10':\n\t\t\t\t\tuser_item = User.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tuser_item2 = User2.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tif user_item.count() == 0 or user_item2.count() == 0:\n\t\t\t\t\t\tmsg = reply_text('你还没有绑定Pocket账号。\\r\\n点击下方菜单或回复“a”绑定Pocket账号。由于国内访问Pocket速度较慢,如无回应请重新操作。')\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount = 5\n\t\t\t\t\t\tif eventkey == 'V1001_SAVED_RECENTLY_5':\n\t\t\t\t\t\t\tcount = 5\n\t\t\t\t\t\telif eventkey == 'V1001_SAVED_RECENTLY_10':\n\t\t\t\t\t\t\tcount = 10\n\t\t\t\t\t\taccess_token = user_item2[user_item2.count() - 1].access_token\n\t\t\t\t\t\tget_data = {\n\t\t\t\t\t\t\t'count': count,\n\t\t\t\t\t\t\t'sort': 'newest',\n\t\t\t\t\t\t\t'detailType': 'complete',\n\t\t\t\t\t\t\t'consumer_key': POCKET['consumer_key_new'],\n\t\t\t\t\t\t\t'access_token': access_token\n\t\t\t\t\t\t}\n\t\t\t\t\t\t# for i in range(0, 5):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tget_data = urllib.urlencode(get_data)\n\t\t\t\t\t\t\tget_req = urllib2.Request(POCKET['get_uri'], get_data)\n\t\t\t\t\t\t\tget_response = urllib2.urlopen(get_req, timeout=5)\n\t\t\t\t\t\t\tresponse = str(get_response.read())\n\t\t\t\t\t\t\tresponse_json = json.loads(response)\n\t\t\t\t\t\t\tif response_json['status'] == 1:\n\t\t\t\t\t\t\t\tif response_json['list']:\n\t\t\t\t\t\t\t\t\tdicts = response_json['list']\n\t\t\t\t\t\t\t\t\tdicts_sorted = sorted(dicts.iteritems(), key=lambda d:d[1]['time_updated'], reverse=True);\n\t\t\t\t\t\t\t\t\titem = ''\n\t\t\t\t\t\t\t\t\tfor v in dicts_sorted:\n\t\t\t\t\t\t\t\t\t\t# msg = reply_text(v['images']['1']['src'])\n\t\t\t\t\t\t\t\t\t\timage = ''\n\t\t\t\t\t\t\t\t\t\tv = v[1]\n\t\t\t\t\t\t\t\t\t\tif v['has_image'] == '1':\n\t\t\t\t\t\t\t\t\t\t\timage = v['image']['src']\n\t\t\t\t\t\t\t\t\t\titem += reply['item'] % (v['resolved_title'], v['excerpt'], image, v['resolved_url'])\n\t\t\t\t\t\t\t\t\tmsg = reply['news'] % (toUserName, fromUserName, postTime, str(len(dicts)), item)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tmsg = reply_text('你似乎没有待读的内容。请善加使用:)')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmsg = reply_text('获取失败,请重试。')\n\t\t\t\t\t\t\t# break\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tmsg = reply_text('获取失败,请重试。如需获取帮助请点下方菜单。')\n\t\t\t\t\t\t\tresponse = 'timeout'\n\t\t\t\t\t\t# else:\n\t\t\t\t\t\t# \tmsg = reply_text('获取失败,请重试。如需获取帮助请点下方菜单。%s', i)\n\t\t\t\t\t\t# \tresponse = 'timeout'\n\t\t\t\telse:\n\t\t\t\t\tuser_item = User.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tuser_item2 = User2.objects.order_by('id').filter(wechat_user=uid)\n\t\t\t\t\tif user_item.count() == 0 or user_item2.count() == 0:\n\t\t\t\t\t\tmsg = reply_text('点击下方菜单或回复“a”绑定Pocket账号。由于国内访问Pocket速度较慢,如无回应请重新发送。\\r\\n如需获取帮助请点下方菜单。')\n\t\t\t\t\t\tChat(wechat_user = uid, pocket_user = '-', chat = content).save()\n\t\t\t\t\telse:\n\t\t\t\t\t\tif msgtype == 'link' or content[0:7] == 'http://' or content[0:8] == 'https://':\n\t\t\t\t\t\t\tif msgtype == 'link':\n\t\t\t\t\t\t\t\turl = url\n\t\t\t\t\t\t\t\ttitle = title\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\turl = content\n\t\t\t\t\t\t\t\t#title = fetchTitle(url)\n\t\t\t\t\t\t\t\ttitle = ''\n\t\t\t\t\t\t\taccess_token = user_item2[user_item2.count() - 1].access_token\n\t\t\t\t\t\t\tpocket_user = user_item2[user_item2.count() - 1].pocket_user\n\t\t\t\t\t\t\tadd_data = {\n\t\t\t\t\t\t\t\t'url': url,\n\t\t\t\t\t\t\t\t'title': title,\n\t\t\t\t\t\t\t\t'tags': 'wechat',\n\t\t\t\t\t\t\t\t'consumer_key': POCKET['consumer_key_new'],\n\t\t\t\t\t\t\t\t'access_token': access_token\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tadd_req = urllib2.Request(POCKET['add_uri'], urllib.urlencode(add_data))\n\t\t\t\t\t\t\t\tadd_response = urllib2.urlopen(add_req, timeout=5)\n\t\t\t\t\t\t\t\tresponse = str(add_response.read())\n\t\t\t\t\t\t\t\tresponse_json = json.loads(response)\n\t\t\t\t\t\t\t\tif response_json['item']['title'] is None:\n\t\t\t\t\t\t\t\t\ttitle = 'null'\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\ttitle = response_json['item']['title']\n\t\t\t\t\t\t\t\tif (response_json['status'] == 1):\n\t\t\t\t\t\t\t\t\tif title == 'null':\n\t\t\t\t\t\t\t\t\t\tmsg = reply_text('保存成功')\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tmsg = reply_text('「%s」 保存成功' % (title.encode('utf-8')))\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tmsg = reply_text('保存失败,请重试。\\r\\n如需获取帮���请点下方菜单。')\n\t\t\t\t\t\t\t\tSaveitem(wechat_user = uid, pocket_user = pocket_user, title = title, url = url, status = response_json['status']).save()\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\tadd_data = {\n\t\t\t\t\t\t\t\t\t\t'url': url,\n\t\t\t\t\t\t\t\t\t\t'title': title,\n\t\t\t\t\t\t\t\t\t\t'tags': 'wechat',\n\t\t\t\t\t\t\t\t\t\t'consumer_key': POCKET['consumer_key'],\n\t\t\t\t\t\t\t\t\t\t'access_token': access_token\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tadd_req = urllib2.Request(POCKET['add_uri'], urllib.urlencode(add_data))\n\t\t\t\t\t\t\t\t\tadd_response = urllib2.urlopen(add_req, timeout=5)\n\t\t\t\t\t\t\t\t\tresponse = str(add_response.read())\n\t\t\t\t\t\t\t\t\tresponse_json = json.loads(response)\n\t\t\t\t\t\t\t\t\tif response_json['item']['title'] is None:\n\t\t\t\t\t\t\t\t\t\ttitle = 'null'\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttitle = response_json['item']['title']\n\t\t\t\t\t\t\t\t\tif (response_json['status'] == 1):\n\t\t\t\t\t\t\t\t\t\tif title == 'null':\n\t\t\t\t\t\t\t\t\t\t\tmsg = reply_text('保存成功')\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tmsg = reply_text('「%s」 保存成功' % (title.encode('utf-8')))\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tmsg = reply_text('保存失败,请重试。\\r\\n如需获取帮助请点下方菜单。')\n\t\t\t\t\t\t\t\t\tSaveitem(wechat_user = uid, pocket_user = pocket_user, title = title, url = url, status = response_json['status']).save()\n\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t# msg = reply_text('保存失败或超时,请重试,请确认链接有效。如需获取帮助请点下方菜单。')\n\t\t\t\t\t\t\t\t\tresponse = 'timeout'\n\t\t\t\t\t\t\t\t\tSaveitem(wechat_user = uid, title = title, log = response, url = url, status = '-1').save()\n\t\t\t\t\t\telif content[0:5] == 'menu:':\n\t\t\t\t\t\t\twechat_access_token = getCredential()\n\t\t\t\t\t\t\tadd_data = {\n\t\t\t\t\t\t\t\t\"button\": [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"name\": \"最近待读\",\n\t\t\t\t\t\t\t\t\t\t\"sub_button\": [\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"click\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"最近10篇\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"key\": \"V1001_SAVED_RECENTLY_10\"\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"click\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"最近5篇\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"key\": \"V1001_SAVED_RECENTLY_5\"\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"type\": \"click\",\n\t\t\t\t\t\t\t\t\t\t\"name\": \"使用帮助\",\n\t\t\t\t\t\t\t\t\t\t\"key\": \"V1001_HELP\"\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"type\": \"click\",\n\t\t\t\t\t\t\t\t\t\t\"name\": \"账号绑定\",\n\t\t\t\t\t\t\t\t\t\t\"key\": \"V1001_ACCOUNT_AUTH\"\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\turl = 'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % wechat_access_token\n\t\t\t\t\t\t\thttp_header = {\n\t\t\t\t\t\t\t\t\"User-Agent\" : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11\",\n\t\t\t\t\t\t\t\t\"Accept\" : \"text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,text/png,*/*;q=0.5\",\n\t\t\t\t\t\t\t\t\"Accept-Language\" : \"en-us,en;q=0.5\",\n\t\t\t\t\t\t\t\t\"Accept-Charset\" : \"ISO-8859-1\",\n\t\t\t\t\t\t\t\t\"Content-type\": \"application/x-www-form-urlencoded\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ttimeout = 15\n\t\t\t\t\t\t\tsocket.setdefaulttimeout(timeout)\n\t\t\t\t\t\t\tcookie_jar = cookielib.LWPCookieJar()\n\t\t\t\t\t\t\tcookie = urllib2.HTTPCookieProcessor(cookie_jar)\n\t\t\t\t\t\t\tproxy = {}\n\t\t\t\t\t\t\topener = urllib2.build_opener(cookie)\n\t\t\t\t\t\t\tadd_req = urllib2.Request(url, json.dumps(add_data, ensure_ascii=False), http_header)\n\t\t\t\t\t\t\tresponse = str(opener.open(add_req).read())\n\t\t\t\t\t\t\tmsg = reply_text(response)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmsg = reply_text('回复链接即可保存文章到Pocket,只支持链接哦。\\r\\n如需获取帮助请点下方菜单。')\n\t\t\t\t\t\t\tpocket_user = user_item2[user_item2.count() - 1].pocket_user\n\t\t\t\t\t\t\tChat(wechat_user = uid, pocket_user = pocket_user, chat = content).save()\n\n\t\t\telse:\n\t\t\t\tmsg = 'Invalid'\n\n\t\t\treturn HttpResponse(msg)\n\telse:\n\t\treturn render(request, 'index.html', {})\n\ndef test(request):\n\ttxt = ''\n\treturn str(txt)\n\ndef fetchTitle(url):\n\ttry:\n\t\tfetch_req = urllib2.Request(url)\n\t\tfetch_response = urllib2.urlopen(fetch_req, timeout=5)\n\t\tre_title = re.compile(r\"(.*?)\",re.I)\n\t\thtml = re.sub(r\"\\n+\", \"\\n\", str(fetch_response.read()))\n\t\ttitle = re_title.search(html).group(1)\n\texcept:\n\t\ttitle = '~'\n\treturn title\n\ndef get_request_token(uid, request):\n\tauth_data1 = {\n\t\t'consumer_key': POCKET['consumer_key_new'],\n\t\t'redirect_uri': POCKET['redirect_uri'] + '?uid=' + uid\n\t}\n\tauth_data1 = urllib.urlencode(auth_data1)\n\tauth_req1 = urllib2.Request(POCKET['get_token_uri'], auth_data1)\n\tauth_response1 = urllib2.urlopen(auth_req1)\n\tcode = auth_response1.read().replace('code=', '')\n\trequest.session[\"code\"] = code\n\tUser(wechat_user = uid, request_token = code).save()\n\treturn code\n\ndef authorize(uid, code):\n\tauth_data2 = {\n\t\t'consumer_key': POCKET['consumer_key_new'],\n\t\t'code': code\n\t}\n\tauth_data2 = urllib.urlencode(auth_data2)\n\tauth_req2 = urllib2.Request(POCKET['access_token_uri'], auth_data2)\n\ttry:\n\t\tauth_response2 = urllib2.urlopen(auth_req2)\n\t\tresponse = auth_response2.read()\n\t# except urllib2.HTTPError, e:\n\t# \tresponse = '!!! HTTPError = ' + str(e.code)\n\t# except urllib2.URLError, e:\n\t# \tresponse = '!!! URLError = ' + str(e.reason)\n\t# except httplib.HTTPException, e:\n\t# \tresponse = '!!! HTTPException'\n\t# except Exception:\n\t# \timport traceback\n\t# \tresponse = '!!! generic exception: ' + traceback.format_exc()\n\texcept:\n\t\tresponse = 'error'\n\treturn response\n\ndef authorizationFinished(request):\n\tuid = request.GET.get('uid', '~~~')\n\tuser_item = User.objects.order_by('id').filter(wechat_user=uid)\n\tuser_item2 = User2.objects.order_by('id').filter(wechat_user=uid)\n\tpocket_user = ''\n\t# if user_item2.count() == 0:\n\tif user_item.count() == 0:\n\t\tauth_result = 0\n\telse:\n\t\tcode = user_item[user_item.count() - 1].request_token\n\t\tauth_response = authorize(uid, code)\n\t\tif auth_response == 'error':\n\t\t\tif user_item2.count() == 0:\n\t\t\t\tauth_result = 0\n\t\t\telse:\n\t\t\t\tauth_result = 2\n\t\telif auth_response[0:3] == '!!!':\n\t\t\tauth_result = 3\n\t\telse:\n\t\t\taccess_token = auth_response.split('&')[0].replace('access_token=', '')\n\t\t\tpocket_user = auth_response.split('&')[1].replace('username=', '')\n\t\t\tUser2(wechat_user = uid, pocket_user = pocket_user, access_token = access_token).save()\n\t\t\tpocket_user = '%s,' % (pocket_user)\n\t\t\tauth_result = 1\n\n\tif auth_result == 0:\n\t\ttitle = '绑定失败'\n\t\tmsg = '账号绑定失败,请返回微信点击下方菜单“帐号绑定”或回复“a”重试。'\n\telif auth_result == 1:\n\t\ttitle = '绑定成功'\n\t\tmsg = pocket_user + '账号绑定成功。
返回微信给我发送链接即可保存到Pocket以便稍后阅读。'\n\telif auth_result == 2:\n\t\ttitle = '账号已绑定'\n\t\tmsg = '你已经绑定过Pocket账号
返回微信给我发送链接即可保存到Pocket以便稍后阅读。如有问题请尝试重新绑定帐号。'\n\telif auth_result == 3:\n\t\ttitle = '出现错误'\n\t\tmsg = auth_response\n\n\treturn render(request, 'authorize.html', {'title': title, 'result': msg})\n\ndef getCredential():\n\tauth_data2 = {\n\t\t'grant_type': 'client_credential',\n\t\t'appid': WECHAT_APPID,\n\t\t'secret': WECHAT_APPSECRET\n\t}\n\tauth_data2 = urllib.urlencode(auth_data2)\n\tauth_req2 = urllib2.Request(WECHAT_TOKEN_URL, auth_data2)\n\ttry:\n\t\tauth_response2 = urllib2.urlopen(auth_req2)\n\t\tresponse = json.loads(auth_response2.read())\n\texcept:\n\t\tresponse = { 'access_token': 'error' }\n\treturn response['access_token']\n\n\n","sub_path":"pocketapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135715036","text":"# useful ref:\n# https://stackoverflow.com/a/43379469\n# https://developers.google.com/gmail/api/guides/sending\n\n# token.json: user auth file\n# auth.json: api auth file\nimport logging\n\nfrom apiclient import errors, discovery\nfrom googleapiclient.discovery import build\n\nfrom httplib2 import Http\nfrom oauth2client.file import Storage as userAuth\nfrom oauth2client.tools import run_flow as getNewToken\nfrom oauth2client.client import flow_from_clientsecrets as newToken\n\n\nfrom base64 import urlsafe_b64encode as urlEncode\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\nclass Email:\n def __init__(self, clientId, to, subject, plain, html):\n self.clientId = clientId\n self.to = to\n self.subject = subject\n self.plain = plain\n self.html = html\n\n def auth(self):\n # If modifying these scopes, delete the file token.json.\n SCOPES = 'https://www.googleapis.com/auth/gmail.send'\n tokenFile = userAuth('token.json')\n creds = tokenFile.get()\n\n # authenticate the user\n if not creds or creds.invalid:\n token = newToken('auth.json', SCOPES)\n creds = getNewToken(token, tokenFile)\n return build('gmail', 'v1', http=creds.authorize(Http()))\n\n def formatEmail(self):\n msg = MIMEMultipart('alternative') \n msg['To'] = self.to\n msg['Subject'] = self.subject\n msg.attach(MIMEText(self.plain, 'plain'))\n msg.attach( MIMEText(self.html, 'html') )\n return {'raw': urlEncode(msg.as_bytes()).decode()}\n\n def send(self):\n try:\n email = self.formatEmail()\n return self.auth().users().messages().send(userId=self.clientId, body=email).execute()\n except errors.HttpError as error:\n logging.error(error)\n\n\n","sub_path":"gmailIt/gmailIt.py","file_name":"gmailIt.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1854448","text":"#\r\n# @lc app=leetcode.cn id=236 lang=python3\r\n#\r\n# [236] 二叉树的最近公共祖先\r\n#\r\n# https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree/description/\r\n#\r\n# algorithms\r\n# Medium (60.53%)\r\n# Likes: 416\r\n# Dislikes: 0\r\n# Total Accepted: 52.4K\r\n# Total Submissions: 86.4K\r\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n1'\r\n#\r\n# 给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。\r\n#\r\n# 百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x\r\n# 的深度尽可能大(一个节点也可以是它自己的祖先)。”\r\n#\r\n# 例如,给定如下二叉树:  root = [3,5,1,6,2,0,8,null,null,7,4]\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n# 输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\r\n# 输出: 3\r\n# 解释: 节点 5 和节点 1 的最近公共祖先是节点 3。\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n# 输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\r\n# 输出: 5\r\n# 解释: 节点 5 和节点 4 的最近公共祖先是节点 5。因为根据定义最近公共祖先节点可以为节点本身。\r\n#\r\n#\r\n#\r\n#\r\n# 说明:\r\n#\r\n#\r\n# 所有节点的值都是唯一的。\r\n# p、q 为不同节点且均存在于给定的二叉树中。\r\n#\r\n#\r\n#\r\n\r\n# @lc code=start\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\n\r\nclass Solution:\r\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode',\r\n q: 'TreeNode') -> 'TreeNode':\r\n self.res = None\r\n\r\n def find(node):\r\n if not node:\r\n return (False, False)\r\n lp, lq = find(node.left)\r\n rp, rq = find(node.right)\r\n findp = node == p or lp or rp\r\n findq = node == q or lq or rq\r\n if findp and findq and not self.res:\r\n self.res = node\r\n return (findp, findq)\r\n\r\n find(root)\r\n return self.res\r\n\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/236.二叉树的最近公共祖先.py","file_name":"236.二叉树的最近公共祖先.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59244127","text":"import networkx as nx\r\nimport sys\r\nsys.path.insert(1, './test_graphs/')\r\nimport graph6\r\nimport graph7\r\nimport graph8\r\nimport graph9\r\nimport graph10\r\n\r\ndef bfs(G,a,b):\r\n G.add_nodes_from(G.nodes(), label = -1) # initialization of all labels\r\n G.nodes[a]['label'] = 0\r\n\r\n tagged = []\r\n tagged.append(a)\r\n\r\n i = 0\r\n while (b not in tagged):\r\n for node in G.nodes():\r\n if (G.nodes[node]['label'] == i):\r\n for connection in G.neighbors(node):\r\n if (connection not in tagged):\r\n G.nodes[connection]['label'] = i + 1\r\n tagged.append(connection)\r\n i += 1\r\n\r\n return(G.nodes[b]['label'])\r\n\r\n\r\n\r\nG6=graph6.Graph()\r\na=12\r\nb=40\r\nprint('Graph G6:')\r\nprint('The distance between vertices', a, 'and', b, 'is:', bfs(G6,a,b))\r\nprint()\r\n\r\nG7=graph7.Graph()\r\na=5\r\nb=36\r\nprint('Graph G7:')\r\nprint('The distance between vertices', a, 'and', b, 'is:', bfs(G7,a,b))\r\nprint()\r\n\r\nG8=graph8.Graph()\r\na=15\r\nb=35\r\nprint('Graph G8:')\r\nprint('The distance between vertices', a, 'and', b, 'is:', bfs(G8,a,b))\r\nprint()\r\n\r\nG9=graph9.Graph()\r\na=1\r\nb=19\r\nprint('Graph G9:')\r\nprint('The distance between vertices', a, 'and', b, 'is:', bfs(G9,a,b))\r\nprint()\r\n\r\nG10=graph10.Graph()\r\na=6\r\nb=30\r\nprint('Graph G10:')\r\nprint('The distance between vertices', a, 'and', b, 'is:', bfs(G10,a,b))\r\nprint()\r\n","sub_path":"breadth_first.py","file_name":"breadth_first.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"150741523","text":"'''Solving Ax=B with LUfactorization and backsubstitution'''\n#shorcut method: https://www.youtube.com/watch?v=UlWcofkUDDU\n#step by step : https://www.youtube.com/watch?v=rhNKncraJMk\n\nimport numpy as np\n\n#B=np.asarray( A ) : copy=False --> pass by reference --> modifying A will affect B \n#B=np.array( A ) : copy=True --> pass by value --> A and B are different instances\n# A : array like obj\n# np.matmul() != np.dot() : different broadcasting rule for tensors \n\n\n#L is lower triangular matrix with diagonal 1\n#U is upper triangular matrix with variable diagonal \ndef LUdecompose(Amat):\n A=np.array(Amat) #list --> np.array\n len_mat=len(Amat[0])\n L=np.identity(len_mat) #init L and U\n U=A\n\n for col in range(len_mat):\n for row in range(col+1, len_mat):\n c=U[row][col]/U[col][col]\n L[row][col]=c\n U[row]=U[row]-c*U[col]\n return L, U\n\ndef Back_sub(L,U,B_vec):\n #Ax=LUx=B\n #Ux=y --> x= U- *y\n #Ly=B --> y= L- *B\n B=np.array(B_vec)\n invL=np.linalg.inv(L)\n invU=np.linalg.inv(U)\n y=np.matmul(invL,B)\n x=np.matmul(invU,y)\n return x\n\n\nif __name__==\"__main__\":\n A_matrix=[ #compatible with any square-matrices\n [ 1, 3, 2, 5, 2],\n [ 2, 3, 4,-3, 2],\n [ 3, 4,-3, 2, 0],\n [-1,-2,-3, 4, 2],\n [ 6, 9, 8, 2, 1]\n ]\n\n B_vec=[6,2,16,2,3]\n\n #L,U=LUdecompose(A_matrix)\n x=Back_sub(LUdecompose(A_matrix),B_vec) \n\n print(\"solving Ax=B\")\n print(\"A = \\n\", np.array(A_matrix))\n print(\"B = \\n\", np.array(B_vec))\n print(\"A = LU\")\n print(\"L = \\n\", L)\n print(\"U = \\n\", U)\n print(\"\\nThus x is\\n\", x)\n\n\n\n#thinking raw (implementation of LUdecompose function)\n\n#trial 1: erase first column of u0 with u[0][0]-->u1 obtained \n#trial 2: erase second column with u1[1][1]-->u2 obtained\n#trial 3: erase thrid column with u2[2][2]-->u3 obtained\n#trial 4: erase fourth column with u3[3][3]-->u4 obtained\n#(trial ends in len(p[0])-1 th turn)","sub_path":"pywork2017/LUdecomposition.py","file_name":"LUdecomposition.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"651048414","text":"\"\"\"\nBased on rllab's logger.\n\nhttps://github.com/rll/rllab\n\"\"\"\nfrom enum import Enum\nfrom contextlib import contextmanager\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport datetime\nimport dateutil.tz\nimport csv\nimport json\nimport pickle\nimport errno\nimport torch\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nfrom lifelong_rl.core.logging.tabulate import tabulate\n\n\nclass TerminalTablePrinter(object):\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {\n '$enum': o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name\n }\n elif callable(o):\n return {\n '$function': o.__module__ + \".\" + o.__name__\n }\n return json.JSONEncoder.default(self, o)\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\nclass Logger(object):\n def __init__(self):\n self.log_dir = ''\n\n self._log_to_tensorboard = False\n self._writer = None\n\n self._prefixes = []\n self._prefix_str = ''\n\n self._tabular_prefixes = []\n self._tabular_prefix_str = ''\n\n self._tabular = []\n\n self._text_outputs = []\n self._tabular_outputs = []\n\n self._text_fds = {}\n self._tabular_fds = {}\n self._tabular_header_written = set()\n\n self._snapshot_dir = None\n self._snapshot_mode = 'all'\n self._snapshot_gap = 1\n\n self._log_tabular_only = False\n self._header_printed = False\n self.table_printer = TerminalTablePrinter()\n\n self._plt_figs = []\n\n def reset(self):\n self.__init__()\n\n def _add_output(self, file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n fds[file_name] = open(file_name, mode)\n\n def _remove_output(self, file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n def push_prefix(self, prefix):\n self._prefixes.append(prefix)\n self._prefix_str = ''.join(self._prefixes)\n\n def add_text_output(self, file_name):\n self._add_output(file_name, self._text_outputs, self._text_fds,\n mode='w')\n\n def set_text_output(self, file_name):\n old_log_files = [old_file for old_file in self._text_fds]\n for old_file in old_log_files:\n self.remove_text_output(old_file)\n self.add_text_output(file_name)\n\n def remove_text_output(self, file_name):\n self._remove_output(file_name, self._text_outputs, self._text_fds)\n\n def add_tabular_output(self, file_name, relative_to_snapshot_dir=False):\n if relative_to_snapshot_dir:\n file_name = osp.join(self._snapshot_dir, file_name)\n self._add_output(file_name, self._tabular_outputs, self._tabular_fds,\n mode='w')\n\n def set_tabular_output(self, file_name, relative_to_snapshot_dir=False):\n if relative_to_snapshot_dir:\n file_name = osp.join(self._snapshot_dir, file_name)\n old_log_files = [old_file for old_file in self._tabular_fds]\n for old_file in old_log_files:\n self.remove_tabular_output(old_file)\n self.add_tabular_output(file_name, relative_to_snapshot_dir=relative_to_snapshot_dir)\n\n def get_tabular_output(self, ind=0):\n return self._tabular_outputs[ind]\n\n def remove_tabular_output(self, file_name, relative_to_snapshot_dir=False):\n if relative_to_snapshot_dir:\n file_name = osp.join(self._snapshot_dir, file_name)\n if self._tabular_fds[file_name] in self._tabular_header_written:\n self._tabular_header_written.remove(self._tabular_fds[file_name])\n self._remove_output(file_name, self._tabular_outputs, self._tabular_fds)\n\n def set_snapshot_dir(self, dir_name):\n self._snapshot_dir = dir_name\n\n def get_snapshot_dir(self, ):\n return self._snapshot_dir\n\n def get_snapshot_mode(self, ):\n return self._snapshot_mode\n\n def set_snapshot_mode(self, mode):\n self._snapshot_mode = mode\n\n def get_snapshot_gap(self, ):\n return self._snapshot_gap\n\n def set_snapshot_gap(self, gap):\n self._snapshot_gap = gap\n\n def set_log_tabular_only(self, log_tabular_only):\n self._log_tabular_only = log_tabular_only\n\n def get_log_tabular_only(self, ):\n return self._log_tabular_only\n\n def set_log_to_tensorboard(self, log_to_tensorboard):\n self._log_to_tensorboard = log_to_tensorboard\n self._writer = SummaryWriter(self.log_dir)\n\n def log(self, s, with_prefix=False, with_timestamp=True):\n out = s\n if with_prefix:\n out = self._prefix_str + out\n if with_timestamp:\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if not self._log_tabular_only:\n # Also log to stdout\n print(out)\n for fd in list(self._text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n def record_tabular(self, key, val):\n self._tabular.append((self._tabular_prefix_str + str(key), str(val)))\n\n def record_dict(self, d, prefix=None):\n if prefix is not None:\n self.push_tabular_prefix(prefix)\n for k, v in d.items():\n self.record_tabular(k, v)\n if prefix is not None:\n self.pop_tabular_prefix()\n\n def push_tabular_prefix(self, key):\n self._tabular_prefixes.append(key)\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n def pop_tabular_prefix(self, ):\n del self._tabular_prefixes[-1]\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n def output_dir(self):\n return self._snapshot_dir\n\n def savefig(self, save_name, fig=None):\n orig_save_name = save_name\n save_name = self._snapshot_dir + '/' + save_name\n os.makedirs(os.path.dirname(save_name), exist_ok=True)\n plt.savefig(save_name)\n\n def save_extra_data(self, data, file_name='extra_data.pkl', mode='joblib'):\n \"\"\"\n Data saved here will always override the last entry\n\n :param data: Something pickle'able.\n \"\"\"\n file_name = osp.join(self._snapshot_dir, file_name)\n if mode == 'joblib':\n import joblib\n joblib.dump(data, file_name, compress=3)\n elif mode == 'pickle':\n pickle.dump(data, open(file_name, \"wb\"))\n else:\n raise ValueError(\"Invalid mode: {}\".format(mode))\n return file_name\n\n def get_table_dict(self, ):\n return dict(self._tabular)\n\n def get_table_key_set(self, ):\n return set(key for key, value in self._tabular)\n\n @contextmanager\n def prefix(self, key):\n self.push_prefix(key)\n try:\n yield\n finally:\n self.pop_prefix()\n\n @contextmanager\n def tabular_prefix(self, key):\n self.push_tabular_prefix(key)\n yield\n self.pop_tabular_prefix()\n\n def log_variant(self, log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n def record_tabular_misc_stat(self, key, values, placement='back'):\n if placement == 'front':\n prefix = \"\"\n suffix = key\n else:\n prefix = key\n suffix = \"\"\n if len(values) > 0:\n self.record_tabular(prefix + \"Average\" + suffix, np.average(values))\n self.record_tabular(prefix + \"Std\" + suffix, np.std(values))\n self.record_tabular(prefix + \"Median\" + suffix, np.median(values))\n self.record_tabular(prefix + \"Min\" + suffix, np.min(values))\n self.record_tabular(prefix + \"Max\" + suffix, np.max(values))\n else:\n self.record_tabular(prefix + \"Average\" + suffix, np.nan)\n self.record_tabular(prefix + \"Std\" + suffix, np.nan)\n self.record_tabular(prefix + \"Median\" + suffix, np.nan)\n self.record_tabular(prefix + \"Min\" + suffix, np.nan)\n self.record_tabular(prefix + \"Max\" + suffix, np.nan)\n\n def dump_tabular(self, *args, **kwargs):\n wh = kwargs.pop(\"write_header\", None)\n if len(self._tabular) > 0:\n if self._log_tabular_only:\n self.table_printer.print_tabular(self._tabular)\n else:\n for line in tabulate(self._tabular).split('\\n'):\n self.log(line, *args, **kwargs)\n\n tabular_dict = dict(self._tabular)\n\n if self._log_to_tensorboard:\n for key in tabular_dict:\n proc_key = key\n proc_key = proc_key.replace(' (s)', '')\n proc_key = proc_key.replace(' ', '_')\n proc_key = proc_key.lower()\n if '/' not in key or 'replay_buffer' in key:\n proc_key = 'misc/' + proc_key\n self._writer.add_scalar(proc_key, float(tabular_dict[key]), int(tabular_dict['Epoch']))\n\n # Also write to the csv files\n # This assumes that the keys in each iteration won't change!\n for tabular_fd in list(self._tabular_fds.values()):\n writer = csv.DictWriter(tabular_fd,\n fieldnames=list(tabular_dict.keys()))\n if wh or (\n wh is None and tabular_fd not in self._tabular_header_written):\n writer.writeheader()\n self._tabular_header_written.add(tabular_fd)\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del self._tabular[:]\n\n def pop_prefix(self, ):\n del self._prefixes[-1]\n self._prefix_str = ''.join(self._prefixes)\n\n def save_itr_params(self, itr, params, prefix='itr'):\n if self._snapshot_dir:\n if self._snapshot_mode == 'all':\n file_name = osp.join(self._snapshot_dir, '%s_%d.pt' % (prefix, itr))\n torch.save(params, file_name)\n elif self._snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(self._snapshot_dir, 'params.pkl')\n torch.save(params, file_name)\n elif self._snapshot_mode == \"gap\":\n if itr % self._snapshot_gap == 0:\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n torch.save(params, file_name)\n elif self._snapshot_mode == \"gap_and_last\":\n if itr % self._snapshot_gap == 0:\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n torch.save(params, file_name)\n file_name = osp.join(self._snapshot_dir, 'params.pkl')\n torch.save(params, file_name)\n elif self._snapshot_mode == 'none':\n pass\n else:\n raise NotImplementedError\n\n\nlogger = Logger()\n","sub_path":"lifelong_rl/core/logging/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":12280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"244733298","text":"# coding: utf-8\nimport os, sys, time, random\nimport settings\nimport pya3rt\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom slackbot.bot import respond_to # @botname: で反応するデコーダ\nfrom slackbot.bot import listen_to # チャネル内発言で反応するデコーダ\nfrom slackbot.bot import default_reply # 該当する応答がない場合に反応するデコーダ\n\n\"\"\"\n# @respond_to('string') bot宛のメッセージ\n# stringは正規表現が可能 「r'string'」\n# @listen_to('string') チャンネル内のbot宛以外の投稿\n# @botname: では反応しないことに注意\n# 他の人へのメンションでは反応する\n# 正規表現可能\n# @default_reply() DEFAULT_REPLY と同じ働き\n# 正規表現を指定すると、他のデコーダにヒットせず、\n# 正規表現にマッチするときに反応\n# ・・・なのだが、正規表現を指定するとエラーになる?\n\n# message.reply('string') @発言者名: string でメッセージを送信\n# message.send('string') string を送信\n# message.react('icon_emoji') 発言者のメッセージにリアクション(スタンプ)する\n# 文字列中に':'はいらない\n\"\"\"\n\n\"\"\"\n# Collection of Well known quotes for akinobu\n\"\"\"\nwell_known_quotes_df = pd.read_csv('assets/well-known-quotes.csv',\n encoding=\"shift-jis\",\n header=None)\n\"\"\"\n\"\"\"\napikey = os.environ['TALK_API_KEY']\nclient = pya3rt.TalkClient(apikey)\n\n@default_reply()\ndef send_message(message):\n \"\"\"\n メンションでのデフォルトの動作\n https://qiita.com/takahirono7/items/197375db24a03cbcd591#%E3%81%93%E3%81%AE%E8%A8%98%E4%BA%8B%E3%81%A7%E3%82%84%E3%82%8B%E3%81%93%E3%81%A8\n \"\"\"\n reply_message = client.talk(message.body['text'])\n # 以下の形式でjsonが返ってくるので、replyの部分をとりだす\n # {'status': 0, 'message': 'ok', 'results': [{'perplexity': 1.2802554542585969, 'reply': '私にはよくわからないです'}]}\n message.reply(reply_message['results'][0]['reply'])\n\n\n@listen_to('疲れた')\n@listen_to('つかれた')\ndef listen_func(message):\n message.react('muscle')\n message.reply('がんばれーーーーー!!') # メンション\n\n\n@listen_to('辛い')\n@listen_to('つらい')\n@listen_to('turai')\ndef listen_func(message):\n message.react('+1')\n message.reply('がんばれーーーーー!!') # メンション\n\n\n@listen_to('あきのぶ')\n@listen_to('清水')\n@listen_to('名言')\n@listen_to('語録')\n@listen_to('先生')\n@listen_to('教授')\n@listen_to('感動')\n@listen_to('愉悦')\n@listen_to('歓喜')\n@listen_to('満悦')\n@listen_to('論文')\n@listen_to('研究')\n@listen_to('研究室')\n@listen_to('検定')\n@listen_to('分布')\n@listen_to('手法')\n@listen_to('提案')\ndef listen_func(message):\n idx = np.random.randint(0, len(well_known_quotes_df))\n message.reply(well_known_quotes_df.iloc[idx, 0])\n","sub_path":"plugins/my_mention.py","file_name":"my_mention.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"260206461","text":"\"\"\"\nWrite a program that checks to see if a number N is prime. A simple approach checks\nall numbers from 2 up to N, but after some point numbers are checked that need\nnot be checked. For example, numbers greater than\n√\nN need not be checked. Write a\nprogram that checks for primality and avoids those unnecessary checks. Remember to\nimport the math module.\n\"\"\"\nimport math\n\nnumber = input('Enter Integer: ')\nwhile not number.isdigit():\n print('Error: Please enter integer: ', end= ' ')\n number = input(' ')\nelse:\n print('The integer is: ', number)\n\nn_number = int(number)\nfor i in range(2, int(math.sqrt(n_number - 1))):\n if n_number % i == 0:\n print(n_number, 'is not Prime number.')\n break\nelse:\n print(n_number, 'is a Prime number.')","sub_path":"chapter 2/prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"318407353","text":"import os\r\nimport pygame \r\nimport sys\r\nimport random\r\nimport numpy as np\r\nfrom collections import namedtuple\r\n\r\nPoint = namedtuple('Point', 'x, y')\r\n\r\nclass Snake(object):\r\n \r\n \r\n \r\n def __init__(self):\r\n self.length = 1\r\n self.positions = [((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))]\r\n self.direction = UP\r\n self.color = (17, 24, 47)\r\n self.score = 0\r\n self.generation = 1\r\n self.frameIteration = 0\r\n self.reward = 0\r\n self.gameOver = False\r\n self.clock = pygame.time.Clock() # initialises close\r\n self.Point = namedtuple('Point', 'x, y')\r\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)#gives peramiters for screensize\r\n self.myfont = pygame.font.SysFont(\"monospace\", 16)\r\n \r\n self.clockSpeed = 100\r\n\r\n self.foodPosition = (random.randint(1, GRID_WIDTH-2) * GRIDSIZE, random.randint(1, GRID_HEIGHT-2) * GRIDSIZE)\r\n\r\n \r\n\r\n surface = pygame.Surface(self.screen.get_size())\r\n surface = surface.convert()\r\n drawGrid(surface)#creates a grid using the peramiters from surface\r\n\r\n \r\n\r\n\r\n\r\n def getHeadPos(self):\r\n return self.positions[0]\r\n\r\n def turn(self, point):\r\n self.direction = point\r\n\r\n def move(self):\r\n reward = 0\r\n gameOver = False\r\n\r\n cur = self.getHeadPos()\r\n x, y = self.direction\r\n new = (((cur[0] + (x*GRIDSIZE)) % SCREEN_WIDTH), (cur[1] + (y*GRIDSIZE)) % SCREEN_HEIGHT)\r\n\r\n if new[0] == 0 or new[0] == (SCREEN_WIDTH - 20) or new[1] == 0 or new[1] == (SCREEN_HEIGHT - 20):#hits wall\r\n reward = -10\r\n reward += (self.score * 10)\r\n gameOver = True\r\n return reward, gameOver, self.score\r\n #self.reset()\r\n elif len(self.positions) > 2 and new in self.positions[2:]:#hits self\r\n reward = -10\r\n reward += (self.score * 10)\r\n gameOver = True\r\n return reward, gameOver, self.score##<------------------------\r\n #self.reset()\r\n elif self.frameIteration > 100*(self.length):#lives too long\r\n reward = -10\r\n reward += (self.score * 10)\r\n gameOver = True\r\n return reward, gameOver, self.score\r\n else:#it is fine\r\n self.positions.insert(0, new)\r\n if len(self.positions) > self.length:\r\n self.positions.pop()#remove the last part of the tail, add a new part\r\n return reward, gameOver, self.score\r\n \r\n \r\n\r\n def collisionCheck(self, pt=None):\r\n if pt == None:\r\n cur = self.getHeadPos()\r\n x, y = self.direction\r\n new = (((cur[0] + (x*GRIDSIZE)) % SCREEN_WIDTH), (cur[1] + (y*GRIDSIZE)) % SCREEN_HEIGHT)\r\n pt = new\r\n \r\n if pt[0] == 0 or pt[0] == (SCREEN_WIDTH - 20) or pt[1] == 0 or pt[1] == (SCREEN_HEIGHT - 20):\r\n return True\r\n elif pt in self.positions[2:]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n \r\n\r\n def gameOverCheck(self):\r\n if self.gameOver == True:\r\n self.reset()\r\n\r\n def reset(self):\r\n self.score = 0\r\n self.length = 1\r\n self.generation += 1\r\n self.frameIteration = 0\r\n self.positions = [((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))]\r\n self.direction = RIGHT\r\n print(str(self.generation))\r\n self.foodPosition = (random.randint(1, GRID_WIDTH-2) * GRIDSIZE, random.randint(1, GRID_HEIGHT-2) * GRIDSIZE)\r\n\r\n \r\n #defines our objects to classes\r\n snake = Snake()\r\n food = Food()\r\n\r\n \r\n\r\n\r\n\r\n\r\n def draw(self, surface):\r\n \r\n #temp = self.getHeadPos##need to convert getHeadPos into actual coordinates to compare\r\n cur = self.getHeadPos()\r\n x, y = self.direction\r\n new = (((cur[0] + (x*GRIDSIZE)) % SCREEN_WIDTH), (cur[1] + (y*GRIDSIZE)) % SCREEN_HEIGHT)\r\n for p in self.positions:\r\n if p == cur:\r\n r = pygame.Rect((p[0], p[1]), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, self.color, r)\r\n pygame.draw.rect(surface, (255, 255, 255), r, 1)\r\n else:\r\n r = pygame.Rect((p[0], p[1]), (GRIDSIZE, GRIDSIZE))#############this doesnt work/ head and body aren't seperate colours ##why???##\r\n pygame.draw.rect(surface, self.color, r)\r\n pygame.draw.rect(surface, (0, 0, 0), r, 1)\r\n\r\n def handleInput(self, action):\r\n for event in pygame.event.get():#will make sure the entire game closes upon hitting the close button\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n clockWise = [1, 2, 3, 4]\r\n tempDirection = self.direction\r\n\r\n if tempDirection == UP:\r\n idx = 1\r\n elif tempDirection == RIGHT:\r\n idx = 2\r\n elif tempDirection == DOWN:\r\n idx = 3\r\n elif tempDirection == LEFT:\r\n idx = 4\r\n\r\n if np.array_equal(action, [1, 0, 0]):\r\n if idx == 5:\r\n idx = 1\r\n\r\n if idx == 1:\r\n tempDirection = UP\r\n elif idx == 2:\r\n tempDirection = RIGHT\r\n elif idx == 3:\r\n tempDirection = DOWN\r\n elif idx == 4:\r\n tempDirection = LEFT\r\n\r\n self.turn(tempDirection)\r\n\r\n elif np.array_equal(action, [0, 1, 0]):\r\n idx += 1\r\n\r\n if idx == 5:\r\n idx = 1\r\n\r\n if idx == 1:\r\n tempDirection = UP\r\n elif idx == 2:\r\n tempDirection = RIGHT\r\n elif idx == 3:\r\n tempDirection = DOWN\r\n elif idx == 4:\r\n tempDirection = LEFT\r\n\r\n self.turn(tempDirection)\r\n\r\n elif np.array_equal(action, [0, 0, 1]):\r\n idx -= 1\r\n\r\n if idx == 5:\r\n idx = 1\r\n\r\n if idx == 1:\r\n self.direction = UP\r\n elif idx == 2:\r\n self.direction = RIGHT\r\n elif idx == 3:\r\n self.direction = DOWN\r\n elif idx == 4:\r\n self.direction = LEFT\r\n\r\n self.turn(self.direction)\r\n #[up, down, left, right]\r\n\r\n ##\r\n\r\n\r\n def playStep(self, action):\r\n clock = pygame.time.Clock() # initialises close\r\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)#gives peramiters for screensize\r\n self.rewardAdd = 0\r\n surface = pygame.Surface(screen.get_size())\r\n surface = surface.convert()\r\n clock.tick(self.clockSpeed)\r\n self.frameIteration += 1\r\n self.handleInput(action)\r\n drawGrid(surface)\r\n rewardAdd0, gameOver0, score0 = self.move()\r\n headPos = self.getHeadPos()\r\n foodPos = self.foodPosition\r\n if headPos == foodPos:\r\n self.length += 1\r\n self.score += 1\r\n self.create()\r\n rewardAdd0 += 10\r\n self.draw(surface)\r\n self.drawFood(surface)\r\n self.drawFin(surface)\r\n\r\n\r\n\r\n return rewardAdd0, gameOver0, score0, \r\n \r\n\r\n def drawFin(self, surface):\r\n self.screen.blit(surface, (0, 0))\r\n text = self.myfont.render(\"Score {0}\".format(self.score), 1, (0, 0, 0))\r\n self.screen.blit(text, (5, 10))\r\n pygame.display.update()\r\n\r\n ##unused\r\n def locateDanger(self):\r\n cur = self.getHeadPos()\r\n x, y = self.direction\r\n new = (((cur[0] + (x*GRIDSIZE)) % SCREEN_WIDTH), ((cur[1] + (y*GRIDSIZE)) % SCREEN_HEIGHT))\r\n\r\n\r\n if new[0] == 0 or new[0] == (SCREEN_WIDTH - 20) or new[1] == 0 or new[1] == (SCREEN_HEIGHT - 20):\r\n reward = -10\r\n reward += (self.score * 10)\r\n gameOver = True\r\n return reward, gameOver, self.score\r\n #self.reset()\r\n elif len(self.positions) > 2 and new in self.positions[2:]:\r\n reward = -10\r\n reward += (self.score * 10)\r\n gameOver = True\r\n return reward, gameOver, self.score##<------------------------\r\n #self.reset().\r\n\r\n\r\n\r\n \r\n \r\n ####\r\n #fruit\r\n ####\r\n\r\n def create(self):#will create a fruit at a random position within the grid\r\n self.foodPosition = (random.randint(1, GRID_WIDTH-2) * GRIDSIZE, random.randint(1, GRID_HEIGHT-2) * GRIDSIZE)\r\n\r\n def drawFood(self, surface):#draws the fruit onscreen\r\n r = pygame.Rect((self.foodPosition[0], self.foodPosition[1]), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, self.color, r)\r\n pygame.draw.rect(surface, (93, 216, 228), r, 1)\r\n\r\n def getFoodX(self):\r\n return self.foodPosition[0]\r\n\r\n def getFoodY(self):\r\n return self.foodPosition[1]\r\n \r\n \r\n\r\n\r\n\r\nclass Food(object):\r\n\r\n def __init__(self):#initialising values for the Food object\r\n self.position = (0, 0)\r\n self.color = (223, 163, 49)\r\n self.create()\r\n\r\n def create(self):#will create a fruit at a random position within the grid\r\n self.position = (random.randint(1, GRID_WIDTH-2) * GRIDSIZE, random.randint(1, GRID_HEIGHT-2) * GRIDSIZE)\r\n\r\n def getX(self):\r\n return self.position[0]\r\n\r\n def getY(self):\r\n return self.position[1]\r\n\r\n def drawFood(self, surface):#draws the fruit onscreen\r\n r = pygame.Rect((self.position[0], self.position[1]), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, self.color, r)\r\n pygame.draw.rect(surface, (93, 216, 228), r, 1)\r\n\r\n\r\ndef drawGrid(surface):\r\n for y in range (0, int(GRID_HEIGHT)):\r\n for x in range (0, int(GRID_WIDTH)):\r\n if (x == 0 or x == GRID_WIDTH-1 or y == 0 or y == GRID_HEIGHT-1) :#if our current coordinate is anywhere along the outer edge of the grid\r\n r = pygame.Rect((x*GRIDSIZE, y*GRIDSIZE), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, (255, 0, 0), r)#draw red rectangles\r\n else:#everything within the grid\r\n if (x+y) % 2 == 0:#if we have an even number place the first colour\r\n r = pygame.Rect((x*GRIDSIZE, y*GRIDSIZE), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, (93, 216, 228), r)\r\n else:#if we have an odd number place the second colour\r\n r = pygame.Rect((x*GRIDSIZE, y*GRIDSIZE), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surface, (84, 194, 205), r)\r\n\r\n\r\n#global vars\r\n\r\n\r\n#set grid size, number within SQUARESIZE is the number of grid spaces within an axis (SQUARESIZE = 12 will make a 12*12 grid)\r\nSQUARESIZE = 12\r\n###########################\r\nSQUARESIZE = SQUARESIZE * 20\r\n\r\nSCREEN_WIDTH = SQUARESIZE\r\nSCREEN_HEIGHT = SQUARESIZE\r\n\r\n\r\nGRIDSIZE = 20\r\nGRID_WIDTH = SCREEN_HEIGHT / GRIDSIZE\r\nGRID_HEIGHT = SCREEN_WIDTH / GRIDSIZE#will divide our screen into squares each taking up 20 pixels\r\n\r\nUP = (0, -1)\r\nDOWN = (0, 1)\r\nLEFT = (-1, 0)\r\nRIGHT = (1, 0)\r\n\r\n\r\ndef main():\r\n #pygame.init()#pygame setup\r\n\r\n clock = pygame.time.Clock() # initialises close\r\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)#gives peramiters for screensize\r\n\r\n surface = pygame.Surface(screen.get_size())\r\n surface = surface.convert()\r\n drawGrid(surface)#creates a grid using the peramiters from surface\r\n\r\n #defines our objects to classes\r\n snake = Snake()\r\n food = Food()\r\n\r\n #sets the font for scoring\r\n myfont = pygame.font.SysFont(\"monospace\", 16)\r\n\r\n\r\n score = 0#unused global score\r\n \"\"\" while (True):\r\n clock.tick(10)\r\n snake.playStep()\r\n if snake.getHeadPos() == food.position:\r\n snake.length += 1\r\n snake.score += 1\r\n food.create()\r\n drawGrid(surface)\r\n snake.draw(surface)\r\n food.draw(surface)\r\n screen.blit(surface, (0, 0))\r\n text = self.myfont.render(\"Score {0}\".format(snake.score), 1, (0, 0, 0))\r\n screen.blit(text, (5, 10))\r\n pygame.display.update() \"\"\"\r\n\r\n #while (True):\r\n #clock.tick(10)\r\n #snake.frameIteration += 1\r\n #snake.handleInput()\r\n #drawGrid(surface)\r\n #snake.move()\r\n #if snake.getHeadPos() == food.position:\r\n #snake.length += 1\r\n #snake.score += 1\r\n #food.create()\r\n #snake.draw(surface)\r\n #food.draw(surface)\r\n # screen.blit(surface, (0, 0))\r\n # text = myfont.render(\"Score {0}\".format(snake.score), 1, (0, 0, 0))\r\n # screen.blit(text, (5, 10))\r\n # pygame.display.update()\r\n#main()","sub_path":"1. Snake/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":12775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"382726759","text":"from app import create_app, db\nfrom flask_script import Manager, Server\nfrom flask_migrate import Migrate, MigrateCommand\nfrom app.models import User, Mininfo,Comments\n\napp = create_app('development')\n\nmanager = Manager(app)\nmanager.add_command('server', Server)\n\n@manager.shell\n\ndef shell():\n return dict(app = app, User=User, db= db, Mininfo=Mininfo, Comments=Comments)\n\nmigrate = Migrate(app, db)\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n manager.run()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509306650","text":"import sys\n# -*- coding: -*-\nfrom com.myfunctions import print_contents, test_keywords_parameters, test_named_keywords_parameters\n\nif __name__ == '__main__':\n\n # print(my_abs(-100))\n # print(my_power(2))\n # print(my_power(2, 3))\n # print(my_add_end([1, 2, 3, 4, 5]))\n # print(my_add_end())\n # print(my_add_end())\n # print(my_calc(1, 2, 3, 4, flag=3))\n # print(my_calc(*[10, 20, 30, 40], flag=10))\n #\n # a = {'name': 1, 'age': 2, 'home': 3}\n # for key, value in a.items():\n # print(key, value)\n #\n # for key in a.keys():\n # print(key)\n #\n # for value in a.values():\n # print(value)\n #\n # a = time.time()\n # print(\"current time is\", a)\n\n # fw = open(\"data.log\", 'w')\n # for x in range(100):\n # fw.write(str(x) + '\\n')\n #\n # fw.close()\n #\n # fr = open(\"data.log\", 'r')\n # for line in fr:\n # line = line.strip()\n # print(line)\n #\n # fr.close()\n #\n # try:\n # print(1 / 0)\n # except Exception as e:\n # print(e)\n # else:\n # print('No exception')\n # finally:\n # print(\"I'm sage\")\n\n # read contents from file\n # fr = open(\"xyj.txt\", 'r', encoding='utf-8')\n # characters = []\n # stat = {}\n #\n # for line in fr:\n # line = line.strip()\n # if len(line) == 0:\n # continue\n #\n # for x in range(0, len(line)):\n # if not line[x] in characters:\n # characters.append(line[x])\n #\n # if not line[x] in stat:\n # stat[line[x]] = 0\n # stat[line[x]] += 1\n #\n # print(len(characters))\n # for key, value in stat.items():\n # print(key, value)\n #\n # # write contents to file\n # fw = open(\"result.txt\", 'w')\n # # this is the correct way to read the contents from dictionary\n # for content in stat.items():\n # fw.write(content[0] + ',' + str(content[1]) + '\\n')\n #\n # fw.close()\n # fr.close()\n # index = [1, 2, 3, 4, 5, 6, 7]\n # label = ('s', 'a', 'g', 'e', 'name')\n # print_contents(*index)\n # print_contents(*label)\n\n name = 'sage'\n age = 23\n others = {'job': 'programmer', 'address': 'longhua'}\n test_keywords_parameters(age, name, **others)\n test_keywords_parameters(age, name, city='beijing', job='ee')\n test_named_keywords_parameters(age, name, *[1, 2, 3, 4, 5, 6], city='china')\n\n\n","sub_path":"com/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606670907","text":"import numpy as np\nfrom sklearn import svm\nimport argparse\nimport datasets.uci_adult\n\n\nclass SupportVectorClassifier:\n\n clf_model = None\n\n def __init__(self):\n self.clf_model = svm.LinearSVC()\n print(\"Created SupportVectorClassifier\")\n\n def train(self, t_data):\n data = t_data[0]\n labels = t_data[1]\n print(\"Fitting model to training data.\")\n self.clf_model.fit(data, labels)\n print(\"Training Acc: \", self.clf_model.score(data, labels))\n\n def test(self, data):\n print(\"Beginning Test\")\n predict_vec = self.clf_model.predict(data[0])\n predict_vec = predict_vec == data[1]\n print(\"Test accuracy: \", np.sum(predict_vec)/len(predict_vec))\n\n\nif __name__ == '__main__':\n\n parse = argparse.ArgumentParser()\n parse.add_argument(\"--train_data\", default=\"\")\n parse.add_argument(\"--test_data\", default=\"\")\n args = parse.parse_args()\n\n train_data = args.train_data\n test_data = args.test_data\n if train_data == \"\":\n x_data, y_data = datasets.uci_adult.data()\n train_data = [x_data[0:10000], y_data[0:10000]]\n test_data = [x_data[10000:20000], y_data[10000:20000]]\n\n model = SupportVectorClassifier()\n model.train(train_data)\n model.test(test_data)\n","sub_path":"supportvectormachine.py","file_name":"supportvectormachine.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476018274","text":"import os\nimport some_script\nimport config\n\ndef run():\n\tprint(\"hello world\")\n\tsome_script.execute()\n\ndef parse_tab_delimited_file(file1):\n\twith open(file1) as F:\n\t\tfor line in F:\n\t\t\tline = line.strip('\\n').split('\\t')\n\t\t\t#line will be a list of the columns of your file\n\t\t\tchrom,start,stop = line\n\n#Takes as input a list of bed files (full path)\ndef bedtools_intersect(BEDS,outdir):\n\tcommand = \"bedtools intersect -a \" + BEDS[0] + \" -b \" + ' '.join(BEDS[1:])\n\tprint(command)\n\toutfile = outdir+'rep_intersect.bed'\n\t#os.system(\"bedtools intersect -a \" + BEDS[0] + \" -b \" + ' '.join(BEDS[1:] + \" > \" + outfile)\n\nif __name__ == \"__main__\":\n\tprint(\"hello different world\")\n\trun()\n\tos.system(\"ls\")\n\tprint(config.BEDS)\n\ta = \"hello\"\n\tprint(a+\" world2\")\n\toutdir = '../temp_files/'\n\tbedtools_intersect(config.BEDS,outdir)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"245460432","text":"import requests\nimport math\nfrom datetime import datetime\nimport settings\nimport shutil\nimport os\nimport sys\nimport webbrowser\nimport locale\nimport codecs\nfrom collections import namedtuple\nfrom jinja2 import Template\n\n\"\"\"\n Преобразование координат из метрической системы (которую выдает сервис Росреестра) в WGS-84 \n Позаимствовано отсюда: https://github.com/rendrom/rosreestr2coord/blob/master/scripts/utils.py \n\"\"\"\n\n\ndef y2lat(y):\n return (2 * math.atan(math.exp(y / 6378137)) - math.pi / 2) / (math.pi / 180)\n\n\ndef x2lon(x):\n return x / (math.pi / 180.0) / 6378137.0\n\n\ndef xy2lonlat(x, y):\n return [x2lon(x), y2lat(y)]\n\n\ndef degrees2dms(coord):\n \"\"\"\n Конвертация координаты, заданной в формате Градусы.ДробнаяЧасть, в формат градусы, минуты, секунды.\n coord - координата в формате float\n Возвращает ответ в строковом формате\n Тест: degrees2dms(61.234567) Ответ 61°14ʹ4ʺ\n\n \"\"\"\n degrees = int(coord)\n minutes = (coord - float(degrees)) * 60.0\n seconds = (minutes - int(minutes)) * 60.0\n return '{deg:02d}°{min:02d}ʹ{sec:02d}ʺ'.format(deg=degrees, min=int(minutes), sec=int(seconds))\n\n\ndef get_nomenclature(lat, lon):\n \"\"\"\n Определение номенклатуры листа карты M1:200000 по координатам.\n Параметры: lat, lon - широта и долгота в десятичном виде\n Возвращаемое значение: Строка вида O-41-25\n \"\"\"\n letter_n = int(int(lat) / 4) + 1 # Номер ряда\n lat_north = letter_n * 4\n letter = chr(64 + letter_n) # Преобразуем номер ряда в букву\n\n colonna = int(int(lon) / 6) + 31\n lon_west = (colonna - 31) * 6\n\n # В масштабе 1:200000 лист делится на 6x6 = 36 частей: через 40 сек по широте и через 1 градус по долготе\n col = int((lon - float(lon_west))) + 1\n\n row = int(((float(lat_north) - lat) * 3) / 2) + 1\n if row > 6:\n row = 6\n\n cell = 6 * (row - 1) + col\n return '{0}-{1}-{2}'.format(letter, colonna, cell)\n\n\ndef parse_cadaster(input_str):\n \"\"\"\n Обрабатывает входной параметр - кадастровый номер, который может быть в традиционной форме (aa:bb:ccccccc:ee..)\n Кроме того, он может быть с разделителями - пробелами либо вообще без разделителей.\n Возвращает кадастровый номер в традиционной форме.\n :type input_str: str\n \"\"\"\n if input_str.find(':') > -1: # Кадастровый номер задан в традиционном формате\n return input_str\n cn = ''.join(input_str.split()) # Удаляем пробелы, если они есть\n cadaster = f'{cn[0:2]}:{cn[2:4]}:{cn[4:11]}:{cn[11:]}'\n return cadaster\n\n\ndef parse_coords(input_str):\n \"\"\"\n Обрабатывает входной параметр - координаты центра участка, которые могут быть заданы через пробел (lat lon)\n Либо в таком формате (через запятую): lat, lon\n В качестве десятичного разделителя может быть точка или запятая\n Возвращает кортеж (lat, lon) в виде строк с десятичной точкой\n :type input_str: str\n \"\"\"\n (lat, lon) = input_str.replace(', ', ' ').replace(',', '.').split()\n\n return lat, lon\n\n\ndef get_obj_id(cadaster):\n \"\"\"\n Получить id участка по его кадастровому номеру.\n obj_id - это кадастровый номер с убранными ведущими нулями.\n Типичный кадастровый номер 66:06:0301012:102\n Для этого кадастрового номера id = 66:6:301012:102\n При успешном завершении возвращает id, при ошибке - пустую строку (False)\n \"\"\"\n try:\n codes = cadaster.split(':')\n if len(codes) != 4:\n return False\n\n lst = [str(int(code)) for code in codes] # При преобразовании строки в целое исчезают лидирующие нули\n obj_id = ':'.join(lst)\n\n return obj_id\n\n except ValueError as e:\n print(e)\n\n return False\n\n\ndef get_info(cadaster):\n \"\"\"\n Выполним GET запрос к API Росеестра и получим данные об участке.\n Функция возвращает именованный кортеж с полями:\n\n errmsg В случае ошибки сюда записывается сообщение об ошибке\n address\n coords\n lat\n lon\n nomenclature\n info\n ozi_info\n cadaster\n yandex_url\n yandex_url_static\n\n \"\"\"\n Result = namedtuple('Result', 'errmsg address coords lat lon nomenclature info brief ozi_info cadaster yandex_url '\n 'yandex_url_static')\n Result.info = ''\n Result.errmsg = ''\n Result.cadaster = cadaster\n\n obj_id = get_obj_id(cadaster)\n if not obj_id:\n Result.errmsg = 'Ошибка(опечатка) в кадастровом номере!'\n return Result\n\n # Эти URL иногда меняются, из-за чего все перестает работать!\n # url = f'http://pkk5.rosreestr.ru/api/features/1/{obj_id}'\n # url = f'http://pkk.rosreestr.ru/api/features/1/{obj_id}'\n url = f'http://pkk.rosreestr.ru/api/features/1/{obj_id}'\n\n # Метод GET не всегда работает нормально с API Росеестра, поскольку там идет редирект на https, а сервис,\n # отвечающий по https, иногда падает\n # С помощью Postman выявлено, что лучше рабо��ать с POST запросом с такими заголовками, при этом редирект\n # Росеестра игнорируется:\n headers = {\n 'User-Agent': \"PostmanRuntime/7.15.0\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"cc512150-e155-495d-9099-41f401b57e55,c40e8aca-f791-4afa-b8a0-f0f23163ca8d\",\n 'accept-encoding': \"gzip, deflate\",\n 'referer': url,\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n try:\n # r = requests.get(url, timeout=5) # Метод GET Не всегда работает!\n # установить verify=False, если вылезает ошибка ... certificate verify failed: unable to get local issuer certificate ...\n r = requests.request('POST', url, headers=headers, timeout=15, verify=False)\n r.raise_for_status() # Проверим на ошибки HTTP\n data = r.json()\n except requests.exceptions.Timeout as e:\n Result.errmsg = 'Ошибка по тайм-ауту: `{}`'.format(e)\n return Result\n except requests.exceptions.HTTPError as e:\n Result.errmsg = 'Ошибка HTTP: `{}`'.format(e)\n return Result\n except requests.exceptions.RequestException as e:\n Result.errmsg = 'Ошибка запроса: `{}`'.format(e)\n return Result\n\n if data:\n try:\n feature = data.get('feature')\n if feature:\n if feature.get('attrs'):\n attrs = feature['attrs']\n Result.address = attrs['address']\n if Result.address is None:\n Result.address = 'Адрес не задан'\n else:\n Result.address = ''\n\n center_x = feature['center']['x']\n center_y = feature['center']['y']\n lat = y2lat(center_y)\n lon = x2lon(center_x)\n lat_s = str(round(lat, 6))\n lon_s = str(round(lon, 6))\n Result.lat = lat_s\n Result.lon = lon_s\n\n Result.yandex_url = (\n 'https://yandex.ru/maps/?mode=search&text={latitude}%2C{longitude}'\n ).format(latitude=lat_s, longitude=lon_s)\n Result.yandex_url_static = (\n 'https://static-maps.yandex.ru/1.x/?pt={longitude},{latitude},comma&z=13&size=600,450&l=map'\n ).format(latitude=lat_s, longitude=lon_s)\n\n Result.nomenclature = get_nomenclature(lat, lon)\n Result.coords = '{0}\\t{1}'.format(lat_s.replace('.', ','), lon_s.replace('.', ','))\n\n Result.info = (\n 'Кадастровый номер: {cad}\\n'\n 'Адрес: {addr}\\n'\n 'Координаты:\\n{lat:9}\\t{lon:9}\\n{latdms} с.ш. {londms} в.д.\\n'\n 'Лист М 1:200000 : {nomenclature}\\n\\n'\n '{url}\\n\\n'\n 'file://d:/OziExplorer/data/CADASTER.txt\\n'\n 'Datum,WGS 84\\n'\n 'WP,D,{cad},{latp:9},{lonp:9},{day},{time},,D,N,-9999\\n'\n ).format(addr=Result.address, lat=lat_s.replace('.', ','), latdms=degrees2dms(lat),\n lon=lon_s.replace('.', ','), londms=degrees2dms(lon), nomenclature=get_nomenclature(lat, lon),\n url=Result.yandex_url, day=datetime.now().strftime('%m/%d/%y'),\n cad=cadaster, latp=lat_s, lonp=lon_s,\n time=datetime.now().strftime('%H/%M/%S'))\n\n Result.brief = (\n 'Кадастровый номер: {cad}\\n'\n 'Адрес: {addr}\\n'\n 'Координаты: {lat:9} {lon:9}\\n\\n'\n 'Лист М 1:200000 : {nomenclature}\\n'\n ).format(cad=cadaster, addr=Result.address, lat=lat_s.replace('.', ','),\n lon=lon_s.replace('.', ','), nomenclature=get_nomenclature(lat, lon)\n )\n\n Result.ozi_info = (\n 'Datum,WGS 84\\n'\n 'WP,D,{cad},{latp:9},{lonp:9},{day},{time},,D,N,-9999\\n'\n ).format(day=datetime.now().strftime('%m/%d/%y'),\n cad=cadaster, latp=lat_s, lonp=lon_s,\n time=datetime.now().strftime('%H/%M/%S'))\n\n else:\n Result.errmsg = 'Данных об участке нет'\n\n except Exception as err:\n Result.errmsg = 'Ошибка get_info(): {0}'.format(repr(err))\n\n return Result\n\n\ndef get_info_by_coords(lat, lon, address):\n \"\"\"\n Получим данные об участке по координатам его центра.\n Функция возвращает именованный кортеж с полями:\n\n errmsg В случае ошибки сюда записывается сообщение об ошибке\n address\n coords\n ym_coords\n lat\n lon\n nomenclature\n info\n ozi_info\n cadaster\n yandex_url\n yandex_url_static\n\n \"\"\"\n Result = namedtuple('Result', 'errmsg address coords ym_coords lat lon nomenclature info brief ozi_info '\n 'cadaster yandex_url yandex_url_static')\n Result.info = ''\n Result.errmsg = ''\n Result.cadaster = 'Участок'\n Result.address = address\n Result.lat = lat\n Result.lon = lon\n lat_f = float(lat)\n lon_f = float(lon)\n\n Result.yandex_url = (\n 'https://yandex.ru/maps/?mode=search&text={latitude}%2C{longitude}'\n ).format(latitude=lat, longitude=lon)\n Result.yandex_url_static = (\n 'https://static-maps.yandex.ru/1.x/?pt={longitude},{latitude},comma&z=13&size=600,450&l=map'\n ).format(latitude=lat, longitude=lon)\n\n Result.nomenclature = get_nomenclature(lat_f, lon_f)\n Result.coords = '{0}\\t{1}'.format(lat.replace('.', ','), lon.replace('.', ','))\n Result.ym_coords = '{}, {}'.format(lat, lon)\n\n Result.info = (\n 'Кадастровый номер: {cad}\\n'\n 'Адрес: {addr}\\n'\n 'Координаты:\\n{lat:9}\\t{lon:9}\\n{latdms} с.ш. {londms} в.д.\\n'\n 'Лист М 1:200000 : {nomenclature}\\n\\n'\n '{url}\\n\\n'\n 'file://d:/OziExplorer/data/CADASTER.txt\\n'\n 'Datum,WGS 84\\n'\n 'WP,D,{cad},{latp:9},{lonp:9},{day},{time},,D,N,-9999\\n'\n ).format(addr=Result.address, lat=lat.replace('.', ','), latdms=degrees2dms(lat_f),\n lon=lon.replace('.', ','), londms=degrees2dms(lon_f), nomenclature=get_nomenclature(lat_f, lon_f),\n url=Result.yandex_url, day=datetime.now().strftime('%m/%d/%y'),\n cad=Result.cadaster, latp=lat, lonp=lon,\n time=datetime.now().strftime('%H/%M/%S'))\n\n Result.brief = (\n 'Кадастровый номер: {cad}\\n'\n 'Адрес: {addr}\\n'\n 'Координаты: {lat:9} {lon:9}\\n\\n'\n 'Лист М 1:200000 : {nomenclature}\\n'\n ).format(cad=Result.cadaster, addr=Result.address, lat=lat.replace('.', ','),\n lon=lon.replace('.', ','), nomenclature=Result.nomenclature)\n\n Result.ozi_info = (\n 'Datum,WGS 84\\n'\n 'WP,D,{cad},{latp:9},{lonp:9},{day},{time},,D,N,-9999\\n'\n ).format(day=datetime.now().strftime('%m/%d/%y'),\n cad=Result.cadaster, latp=lat, lonp=lon,\n time=datetime.now().strftime('%H/%M/%S'))\n\n return Result\n\n\ndef modify_tex_file(filename, address, cadaster, nomenclature, coords, phone):\n \"\"\"\n Заменим поля адреса участка, кадастрового номера и номенклатуры в шаблонном tex-файле на\n реальные.\n Используется шаблонизатор Jinja2\n \"\"\"\n\n # Словарь, содержащий ключи - имена переменных в tex шаблоне, которые заменяются на значения,\n # переданные в функцию\n # Например, строка \\newcommand{\\txtAddress}{{ ADDRESS }} заменяется на\n # \\newcommand{\\txtAddress}{Свердловская обл., р-н Каменский, СТ Россия}\n data = {\n 'ADDRESS': '{' + address + '}',\n 'CADASTER': '{' + cadaster + '}',\n 'NOMENCLATURE': '{' + nomenclature + '}',\n 'PHONE': '{' + phone + '}',\n 'COORDINATES': '{' + coords + '}',\n }\n\n # Прочитаем файл целиком\n try:\n with codecs.open(filename, 'r', 'utf-8') as input_file:\n template = Template(input_file.read())\n tex = template.render(**data)\n except IOError as e:\n print('*** Ошибка чтения файла', e, file=sys.stderr)\n return\n\n # А теперь запишем все в тот же файл :\n try:\n with codecs.open(filename, 'w', 'utf-8') as out_file:\n out_file.write(tex)\n except IOError as e:\n print('*** Ошибка записи файла', e, file=sys.stderr)\n\n return\n\n\ndef make_ozi_file(filename, content):\n \"\"\"\n Запишем координаты в файл Ozi Explorer Waypoints.\n filename - полный путь к файлу\n \"\"\"\n try:\n with open(filename, 'w') as out_file:\n out_file.write(content)\n print('>>> Создаем файл Ozi Waypoints ', filename)\n except IOError as e:\n print('*** Ошибка записи в файл {0}: {1} '.format(filename, e), file=sys.stderr)\n\ndef make_reestr_file(filename, content):\n \"\"\"\n Запишем информацию в файл реестра изысканий.\n filename - полный путь к файлу\n \"\"\"\n try:\n with open(filename, 'a') as out_file:\n out_file.write(content)\n print('>>> Пишем инфу в файл реестра ', filename)\n except IOError as e:\n print('*** Ошибка записи в файл {0}: {1} '.format(filename, e), file=sys.stderr)\n\n\ndef gen_report_folder(addr):\n \"\"\"\n # Сгенерировать имя папки по шаблону: <Адрес> <Месяц> <Год>\n addr - адрес\n Возвращает сгенерированную строку\n \"\"\"\n locale.setlocale(locale.LC_ALL, \"\") # Чтобы дата и время выдавались в текущей локали\n return '{0} {1}'.format(addr.replace('\\\"', ''), datetime.now().strftime('%B %Y'))\n\n\ndef gen_bhpassport_folder(addr):\n \"\"\"\n # Сгенерировать имя папки по шаблону: <Паспорт> <Адрес>\n addr - адрес\n Возвращает сгенерированную строку\n \"\"\"\n return 'Паспорт {0}'.format(addr.replace('\\\"', ''))\n\n\ndef copy_template_folder(src, dst):\n \"\"\"\n Копируем папку с шаблоном отчета или паспорта в папку с изысканиями (паспортом).\n src - полный путь к папке с шаблонами отчета\n dst - полный путь к папке c отчетами по изысканиям\n \"\"\"\n retval = ''\n try:\n print('>>> Копируем шаблон отчета в папку: {0}'.format(dst))\n shutil.copytree(src, dst)\n print('>>> Шаблон скопирован.')\n except IOError as e:\n retval = '*** Ошибка копирования: {0}'.format(e)\n return retval\n\n\n#######################################################################################################################\n#\n# M A I N\n#\n#######################################################################################################################\nif __name__ == '__main__':\n # input_txt = '66:06:4501021:005728'\n # input_txt = '66 06 4501021 005728'\n # input_txt = '6666010102358'\n input_txt = input('Введите кадастровый номер : ')\n cadaster = parse_cadaster(input_txt)\n print('===========================================================================================================')\n print('Кадастровый номер ---> {}'.format(cadaster))\n\n area = get_info(cadaster) # получим именованный кортеж area\n if area.errmsg: # Была ошибка\n print('*** ОШИБКА *** ', area.errmsg, file=sys.stderr)\n exit(1)\n\n print(area.info)\n# print(area.brief)\n print('===========================================================================================================')\n\n # Запишем координаты в файл Ozi Explorer Waypoints\n make_ozi_file(settings.OZI_WAYPOINTS_FILE, area.ozi_info)\n\n locale.setlocale(locale.LC_ALL, \"\") # Чтобы дата и время выдавались в текущей локали\n\n address = area.address\n nomenclature = area.nomenclature\n coords = area.coords\n\n dst_folder = gen_report_folder(area.address)\n\n # Копируем папку с шаблоном отчета в папку с изысканиями\n dst_path = os.path.join(settings.REPORTS_PATH, dst_folder)\n # print('>>> Копируем шаблон отчета в папку', dst_path)\n err = copy_template_folder(settings.TEX_TEMPLATE_PATH, dst_path)\n if err:\n print(err, file=sys.stderr)\n exit(-1)\n else:\n print('>>> Шаблон скопирован')\n\n # Заменим в файле шаблона water.tex адрес, кад. номер и номенклатуру на реальные\n filename = os.path.join(dst_path, settings.TEX_TEMPLATE_FILE)\n modify_tex_file(filename, address, cadaster, nomenclature, coords)\n\n # Откроем проводник в папке назначения\n webbrowser.open(dst_path)\n","sub_path":"cadastron.py","file_name":"cadastron.py","file_ext":"py","file_size_in_byte":20541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52516821","text":"# \"r\" - Read - Default value. Opens a file for reading, error if the file does not exist\n# \"a\" - Append - Opens a file for appending, creates the file if it does not exist\n# \"w\" - Write - Opens a file for writing, creates the file if it does not exist\n# \"x\" - Create - Creates the specified file, returns an error if the file exists\n# \"t\" - Text - Default value. Text mode\n# \"b\" - Binary - Binary mode (e.g. images)\n\n\n# Sample Read Some File\nf = open(\"./Materi/Helper/demofile.txt\", \"r\")\nprint(f.read())\n# Tidak bisa digunakan lagi\nprint(f.read(5))\n\n\nf = open(\"./Materi/Helper/demofile.txt\", \"r\")\nprint(f.readline())\nprint(f.readline())\nf.close()\n\n\n# Sample Write / Create File\nf = open(\"./Materi/Helper/demofile.txt\", \"a\")\nf.write(\"Now the file has more content!\")\nf.close()\n\nf = open(\"./Materi/Helper/demofile.txt\", \"w\")\nf.write(\"Woops! I have deleted the content!\")\nf.close()\n\n\n# Sample Delete File\n\n# import os\n# if os.path.exists(\"demofile.txt\"):\n# os.remove(\"demofile.txt\")\n# else:\n# print(\"The file does not exist\")\n\n# os.rmdir(\"myfolder\")\n","sub_path":"Materi/Basic/37.file_handling.py","file_name":"37.file_handling.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314495060","text":"\"\"\"\nP156.选择排序\n 基本思想:首先,找到数组中最小的那个元素。其次,将它和数组的第一个元素交换位置。\n 再次,在剩下的元素中找到最小的元素,将它与数组的第二个元素交换位置。如此往复,直到将整个数组排序。\n 8 5 2 6 9 3 1 4 0 7\n 0 5 2 6 9 3 1 4 8 7\n 0 1 2 6 9 3 5 4 8 7\n 0 1 2 3 9 6 5 4 8 7\n 0 1 2 3 4 6 5 9 8 7\n 0 1 2 3 4 5 6 9 8 7\n 0 1 2 3 4 5 6 7 8 9\n 时间复杂度:O(n^2)\n 特点:\n 1.运行时间和输入顺序无关\n 2.数据移动是最少的。原地操作是选择排序唯一的优点。当空间复杂度要求较高时,可以考虑选择排序。\n\"\"\"\ndef Selection(array):\n l = len(array)\n for i in range(l):\n min = i\n #range(start, stop[, step])\n #start: 计数从 start 开始。默认是从 0 开始。例如range(5)等价于range(0, 5);\n #stop: 计数到 stop 结束,但不包括 stop。例如:range(0, 5) 是[0, 1, 2, 3, 4]没有5\n #step:步长,默认为1。例如:range(0, 5) 等价于 range(0, 5, 1)\n for j in range (i+1, l):\n if (array[min]>array[j]):\n min = j\n if min != i:\n array[i],array[min] = array[min],array[i]\n return array\nif __name__ == '__main__':\n print(Selection([17,23,20,14,12,25,1,20,81,14,11,12]))","sub_path":"算法第四版(python)/第二章 排序/2.1初级排序算法/01Selection.py","file_name":"01Selection.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135597591","text":"from sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC, LinearSVR, SVC, SVR, NuSVC, OneClassSVM\nfrom sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB, ComplementNB\nfrom sklearn.neural_network import MLPClassifier\nimport settings\nfrom FeatureExtraction import FeatureExtraction\nfrom LoadFile import FileStore, FileReader\nfrom sklearn import metrics\n\nclass Classifier(object):\n def __init__(self, features_train = None, labels_train = None, features_test = None, labels_test = None, estimator = SVC( C=2)):\n self.features_train = features_train\n self.features_test = features_test\n self.labels_train = labels_train\n self.labels_test = labels_test\n self.estimator = estimator\n\n def training(self):\n self.estimator.fit(self.features_train, self.labels_train)\n print(self.estimator.coef_)\n self.__training_result()\n\n def save_model(self, filePath): \n FileStore(filePath=filePath).save_pickle(obj=est)\n #trả về kết quả phân lớp\n def __training_result(self):\n y_true, y_pred = self.labels_test, self.estimator.predict(self.features_test)\n #self.estimator.predict(self.features_test) trả về tập nhãn của features_test được dự đoán\n print(\"Test accuracy: \", metrics.accuracy_score(y_true, y_pred))\n print(\"Test Precision-Recall : \")\n print(classification_report(y_true, y_pred))\n\nif __name__ == '__main__':\n train_loader = FileReader(filePath=settings.DATA_TRAIN_JSON)\n test_loader = FileReader(filePath=settings.DATA_TEST_JSON)\n data_train = train_loader.read_json()\n data_test = test_loader.read_json()\n features_train, labels_train = FeatureExtraction(data=data_train).get_data_and_label()\n # X_train,y_train,X_val,y_val= train_test_split(features_train,labels_train,test_size=0.1, random_state=42)\n features_test, labels_test = FeatureExtraction(data=data_test).get_data_and_label()\n\n # est = Classifier(features_train=features_train, features_test=features_test, labels_train=labels_train,\n # labels_test=labels_test)\n est = Classifier(features_train=features_train, features_test=features_test, labels_train=labels_train,\n labels_test=labels_test)\n est.training()\n\n # est.save_model(filePath='trained_model/linear_svc_3c_9k.pk')","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"126594103","text":"# Here we subscribe the orders to Dehumidifier. to Turn the Dehumidifier ON or OFF\r\n\r\nimport datetime\r\nimport paho.mqtt.client as paho\r\nimport requests\r\nimport json\r\nimport time\r\nfrom relayControl import RelayControl\r\n\r\nclass SubscribeAcOrder(object):\r\n def __init__(self, url, roomId, client):\r\n self.url =url\r\n self.room_Id = roomId\r\n self.source = \"DEHUM\"\r\n # create an object from RelayControl class\r\n self.controlling_Relay = RelayControl(url, roomId,self.source)\r\n self.client = client\r\n self.client.on_subscribe = self.on_subscribe\r\n self.client.on_message = self.on_message\r\n def load_topics(self):\r\n # sending request to get the topic by sending the room_id to the resource catalog\r\n try:\r\n self.respond = requests.get(self.url + self.room_Id)\r\n json_format = json.loads(self.respond.text)\r\n self.AC_status = json_format[\"topic\"][\"dehumOrder\"]\r\n except:\r\n print(\"* dehumSubscriber: ERROR IN CONNECTING TO THE SERVER FOR READING BROKER TOPICS *\")\r\n def on_subscribe(self, client, userdata, mid, granted_qos):\r\n get_time = datetime.datetime.now()\r\n current_time = get_time.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"Subscribed at time: \" + str(current_time))\r\n def on_message(self,client, userdata, msg):\r\n get_time = datetime.datetime.now()\r\n current_time = get_time.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"message received \", str(msg.payload.decode(\"utf-8\")))\r\n print(\"at time: \" + str(current_time))\r\n print(\"--------------------------------------------------------------------\")\r\n message_body = str(msg.payload.decode(\"utf-8\"))\r\n self.payload = json.loads(message_body)\r\n print(self.payload[\"order\"])\r\n self.orders = self.payload[\"order\"]\r\n sens.order()\r\n def order(self):\r\n # Order function will check the data of payload. if it is turn on it will try to call setup and Relay_on function\r\n # Relay Control file. if it is turn off vice versa. in Relay Control there are some lines that will try to publish\r\n # the status of AC by using AC Status Publisher\r\n if self.orders == \"turnOn\":\r\n # and self.flag == 0\r\n print(\"DEHUM_Subscriber : Sending Turn on order Dehum Relay\")\r\n try:\r\n self.controlling_Relay.setup()\r\n self.controlling_Relay.Relay_ON()\r\n except:\r\n print(\"* dehumSubscriber : ERROR IN SENDING TURN ON ORDER TO RELAY *\")\r\n elif self.orders == \"turnOff\":\r\n print(\"dehumSubscriber : Sending Turn off order To Dehum Relay\")\r\n try:\r\n self.controlling_Relay.setup()\r\n self.controlling_Relay.Relay_OFF()\r\n except:\r\n print(\"* dehumSubscriber: ERROR IN SENDING TURN OFF ORDER TO RELAY *\")\r\n def conn(self,bI,bP):\r\n try:\r\n self.client.connect(Broker_IP, int(Broker_Port))\r\n self.client.subscribe(str(sens.AC_status), qos=1)\r\n self.client.loop_start()\r\n except:\r\n print(\"* dehumSubscriber: PROBLEM IN CONNECTING TO THE BROKER *\")\r\n\r\nif __name__ == '__main__':\r\n # RUN THE SUBSCRIBE FOR GETTING THE TEMPERATURE AND HUMIDITY DATA\r\n try:\r\n # read the config file to set hte resource catalog url and the room_id\r\n file = open(\"configFile.json\", \"r\")\r\n json_string = file.read()\r\n file.close()\r\n except:\r\n raise KeyError(\"* DEHUM_Subscriber: ERROR IN READING CONFIG FILE *\")\r\n config_json = json.loads(json_string)\r\n res = config_json[\"url\"]\r\n roomId = config_json[\"roomId\"]\r\n client = paho.Client()\r\n sens = SubscribeAcOrder(res,roomId, client)\r\n try:\r\n # sending request to resource catalog to get the broker info\r\n sens.load_topics()\r\n respond = requests.get(res + \"broker\")\r\n json_format = json.loads(respond.text)\r\n Broker_IP = json_format[\"ip\"]\r\n Broker_Port = json_format[\"port\"]\r\n except:\r\n print(\"* dehumSubscriber: ERROR IN CONNECTING TO THE SERVER FOR READING BROKER TOPICS *\")\r\n sens.conn(Broker_IP, Broker_Port)\r\n while(True):\r\n time.sleep(1)","sub_path":"Embedded/Rasp2/dehumSubscriber.py","file_name":"dehumSubscriber.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"280784330","text":"'''Test wolff.sources.image_source'''\n\nimport unittest\nfrom wolff.sources.image_source import ImageSource\nfrom tests.regression_data import cfg\nimport shutil, tempfile, os\nimport numpy.testing as npt\nimport numpy as np\nimport vtkbone\nimport vtk\n\n\nclass TestImageSource(unittest.TestCase):\n '''Test wolff.sources.image_source'''\n filenames = [\n 'test25a.aim',\n 'test25a.nii'\n ]\n\n def setUp(self):\n # Create temporary directory to work in\n self.test_dir = tempfile.mkdtemp()\n\n # Download testing data\n for filename in self.filenames:\n # Fetch the data\n download_location = cfg['DOWNLOAD_TESTING_DATA'](filename)\n self.assertNotEqual(download_location, '', 'Unable to download file ' + filename)\n\n # Copy to temporary directory\n shutil.copy(download_location, self.test_dir)\n self.assertTrue(os.path.isfile(os.path.join(self.test_dir, filename)))\n\n def tearDown(self):\n # Remove temporary directory and all files\n shutil.rmtree(self.test_dir)\n\n def test_default_filename(self):\n '''Default filename is none'''\n S = ImageSource()\n self.assertEqual(S.filename, None)\n\n def test_set_filename(self):\n '''Can set filename'''\n S = ImageSource()\n S.filename = 'temp.aim'\n self.assertEqual(S.filename, 'temp.aim')\n\n def test_set_filename_only_takes_string(self):\n '''Can only set filename to string'''\n def instantiate():\n S = ImageSource()\n S.filename = 1\n self.assertRaises(TypeError, instantiate)\n\n def test_filename_must_be_set(self):\n '''Filename must be set to run'''\n def instantiate():\n S = ImageSource()\n S()\n self.assertRaises(RuntimeError, instantiate)\n\n def test_filename_must_exist(self):\n '''File must exist'''\n def instantiate():\n S = ImageSource('fake_file_name.anything')\n S()\n self.assertRaises(RuntimeError, instantiate)\n\n def test_read_aim(self):\n '''Reads AIM image correctly'''\n S = ImageSource()\n S.filename = os.path.join(self.test_dir, 'test25a.aim')\n g = S()\n\n npt.assert_array_almost_equal(g.origin, [6.647, 7.225, 1.717])\n npt.assert_array_almost_equal(g.spacing, [0.0340, 0.0340, 0.0340])\n\n r = vtkbone.vtkboneAIMReader()\n r.SetFileName(os.path.join(self.test_dir, 'test25a.aim'))\n r.DataOnCellsOff()\n r.Update()\n\n i = r.GetOutput()\n for index, x in np.ndenumerate(g.data):\n self.assertAlmostEqual(\n g.data[index],\n i.GetScalarComponentAsDouble(*index, 0)\n )\n\n def test_read_nii(self):\n '''Reads nifti image correctly'''\n S = ImageSource()\n S.filename = os.path.join(self.test_dir, 'test25a.nii')\n g = S()\n\n npt.assert_array_almost_equal(g.origin, [0.0, 0.0, 0.0])\n npt.assert_array_almost_equal(g.spacing, [0.0340, 0.0340, 0.0340])\n\n r = vtk.vtkNIFTIImageReader()\n r.SetFileName(os.path.join(self.test_dir, 'test25a.nii'))\n r.Update()\n\n i = r.GetOutput()\n for index, x in np.ndenumerate(g.data):\n self.assertAlmostEqual(\n g.data[index],\n i.GetScalarComponentAsDouble(*index, 0)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/sources/test_image_source.py","file_name":"test_image_source.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94842217","text":"import requests\nfrom bs4 import BeautifulSoup as soup\n\nfrom database_holder import session,Stock\n\nsymbol = \"ZEEL\"\nurl = \"https://www.nseindia.com/live_market/dynaContent/live_watch/get_quote/getHistoricalData.jsp?symbol={}&series=EQ&fromDate=undefined&toDate=undefined&datePeriod=week\".format(symbol)\nresponse = requests.get(url)\n\nresponse_str = response.content.decode(\"utf-8\").strip()\nhtml = soup(response_str,\"html.parser\")\ntable = html.find(\"table\")\nif table is not None:\n data_row = table.findAll(\"tr\")[-1]\n headers = [\"Date\",\"Symbol\",\"Series\",\"Open Price\",\"High Price\",\"Low Price\",\"Last Traded Price\",\"Close Price\",\"Total Traded Quantity\",\"Turnover (in Lakhs)\"]\n actual_data = []\n for cell in data_row.findAll(\"td\"):\n actual_data.append(cell.string.strip())\n date,symbol,series,open_price,high_price,low_price,last_traded_price,close_price,total_traded_quantity,turnover = actual_data\n a_stock = Stock(symbol=symbol, date=date, today_open=open_price, today_close=close_price,\n traded_quantity=total_traded_quantity)\n session.add(a_stock)\n session.commit()\n\n stocks = session.query(Stock).all()\n print(stocks)\nelse:\n print(\"Couldn't find the content\")\n print(\"Maybe yesterday is Sunday or Market holiday\")\n\n","sub_path":"day_fetcher.py","file_name":"day_fetcher.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92715609","text":"from datetime import datetime\nfrom database import *\nimport os\nimport sys\n\npython = sys.executable\n\n# for greetings\nnow = datetime.now()\nif 6 <= now.hour < 12:\n day_part = 'morning'\nelif 12 <= now.hour < 18:\n day_part = 'afternoon'\nelse:\n day_part = 'evening'\n\nif 18 <= now.hour < 21:\n add_new('goodbye', 'have a nice {}'.format(day_part))\nelif 21 <= now.hour or now.hour < 6:\n add_new('goodbye', 'good night')\nelse:\n add_new('goodbye', 'have a nice day')\n add_new('goodbye', 'goodbye')\n\nadd_new('hi', 'good {}'.format(day_part))\n\n\ndef capitalise(string):\n try:\n return string[0].capitalize() + string[1:]\n except IndexError: # if input is empty\n answer(answers_dict['goodbye'])\n quit()\n\n\ndef add_to_database(key, phrase):\n with open(\"database.py\", \"a\") as myfile:\n myfile.write('\\nadd_new(\"{}\", \"{}\")'.format(key, phrase))\n\n\ndef answer(string):\n print(capitalise(choice(string)))\n\n\ndef main_event():\n while True:\n try:\n me = input().lower()\n except KeyboardInterrupt:\n me = 'goodbye'\n my_answer = ''\n if me == 'goodbye':\n answer(answers_dict[me])\n break\n else:\n for key in answers_dict:\n if me.find(key) == -1:\n pass\n else:\n if my_answer == '':\n my_answer = my_answer + choice(answers_dict[key])\n else:\n my_answer = '{}, {}'.format(my_answer, choice(answers_dict[key]))\n if my_answer == '' or 'create new answer for ' in me:\n if 'create new answer for ' in me:\n me = me[22:]\n add_to_database(me, input('Answer?: '))\n print('Completed')\n else:\n print('{}? Do you want to create new answer for this? '.format(capitalise(me)))\n while True:\n answer_add = input().lower()\n if answer_add == 'yes':\n add_to_database(me, input('Answer?: '))\n print('Completed')\n os.execl(python, python, *sys.argv)\n elif answer_add == 'no' or answer_add == '':\n print('Cancelled')\n break\n else:\n print(capitalise(my_answer))\n if my_answer == 'Heads' or my_answer == 'Tails' or my_answer in str(list(range(1, 7))):\n os.execl(python, python, *sys.argv) # because random function can only be called once\n\n\nif __name__ == '__main__':\n main_event()\n","sub_path":"Project_JARVIS.py","file_name":"Project_JARVIS.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519609405","text":"from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nimport sys\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport utils\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\nimport threading\n\n\nclass PART_CIFAR10(Dataset):\n \"\"\"`HALF_CIFAR10 `_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``cifar-10-batches-py`` exists or will be saved to if download is set to True.\n train (bool, optional): If True, creates dataset from training set, otherwise\n creates from test set.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n train_list = ['data_batch_1','data_batch_2','data_batch_3',\n 'data_batch_4','data_batch_5']\n test_list = ['test_batch']\n all_classes = [0,1,2,3,4,5,6,7,8,9]\n def __init__(self, root, train=True, transform=None, target_transform=None,\n download=False, prune_classes=all_classes,\n fine_tune_classes=all_classes,\n prune_rate=1, fine_tune=False):\n\n self.transform = transform\n self.target_transform = target_transform\n self.download = download\n self.root = root\n self.train = train # training set or test set\n self.fine_tune = fine_tune\n if self.download:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n datasets.CIFAR10(os.path.join(self.root,'origin_data'),\\\n train=True, download=True, transform=transform)\n if not os.path.exists(os.path.join(self.root,'parsed_data','fine_tune_data')):\n print('in pasering')\n print(\"prune_classes:\", prune_classes)\n print(\"fine_tune_classes:\", fine_tune_classes)\n base_path = os.path.join(self.root, 'origin_data/cifar-10-batches-py')\n total_list = self.train_list + self.test_list\n threads = []\n for item in total_list:\n file_path = os.path.join(base_path,item)\n threads.append(threading.Thread(target = self.Paser_data,\\\n args = (prune_classes, fine_tune_classes, prune_rate,file_path)))\n for i in range(len(threads)):\n threads[i].join()\n\n\n if self.train:\n downloaded_list = self.train_list\n else:\n downloaded_list = self.test_list\n\n self.data = []\n self.targets = []\n if not self.fine_tune:\n datapath = os.path.join(self.root, 'parsed_data', 'prune_data')\n else:\n datapath = os.path.join(self.root, 'parsed_data', 'fine_tune_data')\n # now load the picked numpy arrays\n for file_name in downloaded_list:\n file_path = os.path.join(datapath, file_name)\n with open(file_path, 'rb') as f:\n if sys.version_info[0] == 2:\n entry = pickle.load(f)\n else:\n entry = pickle.load(f, encoding='latin1')\n self.data.append(entry['data'])\n if 'labels' in entry:\n self.targets.extend(entry['labels'] % 5)\n else:\n self.targets.extend(entry['fine_labels'])\n\n self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)\n self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC\n\n def Paser_data(self,prune_classes, fine_tune_classes, prune_rate,file_path):\n prune_label = np.array([])\n prune_data = np.array([])\n prune_file_name = np.array([])\n fine_tune_label = np.array([])\n fine_tune_data = np.array([])\n fine_tuen_file_name = np.array([])\n prune_iter = int(1 / prune_rate)\n with open(file_path,'rb') as f:\n data = pickle.load(f, encoding='latin1')\n size = len(data['labels'])\n iter_ = 0\n for i in range(size):\n if data['labels'][i] in prune_classes and iter_ % prune_iter == 0:\n iter_ += 1\n prune_label = np.append(prune_label,data['labels'][i])\n prune_data = np.append(prune_data,data['data'][i])\n prune_file_name = np.append(prune_file_name,data['filenames'][i])\n if data['labels'][i] in fine_tune_classes and data['labels'][i] in prune_classes\\\n and i % prune_iter != 0:\n iter_ += 1\n fine_tune_label = np.append(fine_tune_label,data['labels'][i])\n fine_tune_data = np.append(fine_tune_data,data['data'][i])\n fine_tuen_file_name = np.append(fine_tuen_file_name,data['filenames'][i])\n if data['labels'][i] in fine_tune_classes and data['labels'][i] not in prune_classes:\n fine_tune_label = np.append(fine_tune_label,data['labels'][i])\n fine_tune_data = np.append(fine_tune_data,data['data'][i])\n fine_tuen_file_name = np.append(fine_tuen_file_name,data['filenames'][i])\n new_dataset = {}\n new_dataset['labels'] = prune_label\n new_dataset['data'] = prune_data.reshape((-1,data['data'][0].shape[0],))\n new_dataset['filenames'] = prune_file_name\n utils.checkdir(os.path.join(self.root,'parsed_data','prune_data'))\n with open(os.path.join(self.root,'parsed_data','prune_data'),'wb') as f:\n pickle.dump(new_dataset, f, 0)\n new_dataset = {}\n new_dataset['labels'] = fine_tune_label\n new_dataset['data'] = fine_tune_data.reshape((-1,data['data'][0].shape[0],))\n new_dataset['filenames'] = fine_tuen_file_name\n utils.checkdir(os.path.join(self.root,'parsed_data','fine_tune_data'))\n with open(os.path.join(self.root,'parsed_data','prune_data'),'wb') as f:\n pickle.dump(new_dataset, f, 0)\n # data = {}\n # prune_label = np.array([])\n # prune_data = np.array([])\n # prune_file_name = np.array([])\n # fine_tune_label = np.array([])\n # fine_tune_data = np.array([])\n # fine_tuen_file_name = np.array([])\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n img = img.astype(np.uint8)\n target = target.astype(np.long)\n # print()\n # print('target type:',type(target))\n # print('target',target)\n # print('train:',self.train)\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n\n def __len__(self):\n return len(self.data)\n\n\n\n\nclass CIFAR100(PART_CIFAR10):\n \"\"\"`CIFAR100 `_ Dataset.\n\n This is a subclass of the `CIFAR10` Dataset.\n \"\"\"\n base_folder = 'cifar-100-python'\n url = \"https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\n filename = \"cifar-100-python.tar.gz\"\n tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'\n train_list = [\n ['train', '16019d7e3df5f24257cddd939b257f8d'],\n ]\n\n test_list = [\n ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],\n ]\n meta = {\n 'filename': 'meta',\n 'key': 'fine_label_names',\n 'md5': '7973b15100ade9c7d40fb424638fde48',\n }\n\n\n","sub_path":"part_cifar_loader.py","file_name":"part_cifar_loader.py","file_ext":"py","file_size_in_byte":8275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259177888","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom subprocess import Popen\nimport sys\nimport tempfile\nfrom ngless.wrap import ngl_prepare_options, ngl_prepare_payload\n\ntry:\n import argparse\nexcept ImportError:\n print(\"argparse not found. Please install argparse with 'pip install argparse'\")\n sys.exit(1)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", required=True,\n help=\"SAM/BAM/CRAM file filter\")\n parser.add_argument(\"-o\", \"--output\", required=True,\n help=\"Output file/path for results\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"Prints the payload before submitting to ngless\")\n\n return parser.parse_args()\n\n\ndef prepare(args):\n # NOTE this needs to match the arguments in parse_args and the targets in payload\n # commas at the beginning of each option are used when that section has\n # parameters from other functions.\n options = {\n \"input_opts\": {\n \"input\": \"'{input}'\",\n },\n \"write_opts\": {\n \"output\": \", ofile='{output}'\",\n },\n }\n\n ngl_options = ngl_prepare_options(args, options)\n\n payload_tpl = \"\"\"\\\nngless \"0.0\"\nmapped = samfile({input_opts})\nstats = mapstats(mapped)\nwrite(stats{write_opts})\n\"\"\".format(**ngl_options)\n\n return ngl_prepare_payload(args, payload_tpl)\n\n\ndef ngless(args):\n payload = prepare(args)\n\n with tempfile.NamedTemporaryFile() as script:\n script.write(payload.encode(\"utf8\"))\n script.flush()\n\n p = Popen([\"ngless\", script.name])\n p.communicate()\n\n if p.returncode:\n sys.stderr.write(\"ERROR: ngless failed with exit code {0}\\n\".format(p.returncode))\n sys.exit(p.returncode)\n\n\ndef main():\n args = parse_args()\n ngless(args)\n\n\nif __name__ == \"__main__\":\n main()\n\n# vim: ai sts=4 et sw=4\n","sub_path":"scripts/ngless-cwl/bin/ngless-mapstats.py","file_name":"ngless-mapstats.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632474556","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/spider_common/persistent/scrapy/pipelines.py\n# Compiled at: 2019-04-15 07:02:32\n# Size of source mod 2**32: 1607 bytes\nfrom .dw_logger import DwLogger\nfrom parser_engine.itemclassloader import ItemClassLoader\nfrom parser_engine.utils import load_scrapy_settings\nfrom spider_common.common_utils.exceptions import InitArgsException\n\nclass DwPipeline(object):\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n obj = cls(settings=(crawler.settings))\n return obj\n\n def __init__(self, action=None, item_cls=None, settings=None):\n self.item_loader = None\n self.logger = None\n self.item_configs = None\n self.setup_from_settings(settings=(settings if settings else load_scrapy_settings()))\n if action:\n if item_cls:\n cls = self.item_loader.load(item_cls)\n self.item_configs.update({action: cls})\n\n def setup_from_settings(self, settings):\n self.item_loader = ItemClassLoader(settings=settings)\n self.logger = DwLogger(settings=settings)\n conf = settings.get('DW_ITEMS_CONFIG')\n if conf:\n item_configs = {}\n for action, item_cls in conf.items():\n cls = self.item_loader.load(item_cls)\n if not cls:\n raise InitArgsException('item class %s not found' % item_cls)\n item_configs[action] = cls\n\n self.item_configs = item_configs\n\n def process_item(self, item, spider):\n if self.item_configs:\n for action, item_cls in self.item_configs.items():\n if isinstance(item, item_cls):\n (self.logger.log_to_dw)(action, **item)\n\n return item","sub_path":"pycfiles/spider_common-0.0.1-py3.6/pipelines.cpython-36.py","file_name":"pipelines.cpython-36.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533922324","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: Shengqiang Zhang\n# Time : 2019/12/4 20:05\n\nfrom typing import List\n\n\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n jump_count = 0\n i = 0\n while i < len(nums) - 1:\n if i + nums[i] >= len(nums) - 1:\n break\n cur_long_list = [j + nums[j] for j in range(i + 1, i + nums[i] + 1)]\n print(i , cur_long_list)\n cur_longest_index = i + 1 + cur_long_list.index(max(cur_long_list))\n i = cur_longest_index\n print(i)\n jump_count += 1\n return jump_count + 1 if i < len(nums) - 1 else jump_count\n\n\nif __name__ == '__main__':\n s = Solution()\n input = [2, 3, 1, 1, 4]\n input1 = [2, 3, 0, 1, 4]\n input2 = [2, 1]\n print(s.jump(input2))\n","sub_path":"LeetCode-45/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561894297","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 23 02:33:44 2021\n\n@author: Egemen G\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport csv\nimport matplotlib.pyplot as plt\n\n\nclass shakingTable():\n\n\n def __init__(self, conc, mid, gang, conc_size, mid_size, data_size):\n self.conc = conc\n self.mid = mid\n self.gang = gang\n self.conc_size = conc_size\n self.mid_size = mid_size\n self.data_size = data_size\n \n def info(self):\n print(\"Concentration color: \"+str(self.conc))\n print(\"Middling color: \"+str(self.mid))\n print(\"Gangue color: \"+str(self.gang))\n print(\"Concentration Size: \"+str(self.conc_size))\n print(\"Middling Size: \"+str(self.mid_size))\n \n\n\n def createData(self):\n self.data = []\n for c in range( 1, self.conc_size+1 ) : \n for m in range( 0, self.mid_size+1 ):\n frames = [self.gang]*100\n frames = np.array(frames) \n frames[:c,] = self.conc\n frames[c:c+m,] = self.mid \n self.data.append(frames)\n return self.data\n \n def sliderPoints(self):\n points = []\n for c in range (1, self.conc_size+1):\n for m in range (0, self.mid_size+1):\n new_point = [c,(c+m)]\n if ( c == (c+m)):\n new_point = [c,0]\n \n points.append(new_point)\n return points\n \n def ratios(self):\n c_perc =(self.conc_size/self.data_size)*100\n m_perc = (self.mid_size/self.data_size)*100\n g_perc = ((self.data_size-(c_perc+m_perc))/self.data_size)*100\n \n print(\"Concentration percentage : % \"+str(c_perc))\n print(\"Middling percentage : % \"+str(m_perc))\n print(\"Gangue percentage : % \"+str(g_perc))\n return c_perc, m_perc, g_perc\n\n\n#################### PLOT DATA ####################\n\ndef showData(data):\n\n for i in range(0,len(data)):\n plt.imshow(data[i])\n plt.show()\n\n#################### SAVE DATA #################### \n \ndef saveData(path,data):\n with open( path+\".csv\", 'w', newline='' ) as csvfile:\n writer = csv.writer(csvfile) \n writer.writerows(data)\n return True\n \n#################### LOAD DATA ####################\n\ndef loadData(path):\n delete = [\"\", \" \",\"[\",\"]\",\"[ \",\" ]\",\"''\"]\n loadedData = pd.read_csv(path+\".csv\",header=None)\n # Prepare single cell in data\n rows,cols = loadedData.shape[0],loadedData.shape[1]\n\n for row in range(0,rows):\n \n for col in range(0,cols):\n cell = loadedData[col][row] # take cell\n cell = cell.split(\" \") # split it\n \n newCell = []\n \n for i in cell:\n if not (i in delete):\n newCell.append(i)\n else:\n continue\n cell = newCell\n for j in range(0,3): #take the cell\n cell[j] = cell[j].replace(\"[\",\"\") # delete ] from cell\n cell[j] = cell[j].replace(\"]\",\"\") # delete [ from cell\n cell[j] = cell[j].replace(\" \",\"\")\n cell[j] = int(cell[j]) # render it integer\n \n loadedData[col][row] = cell \n#convert every df to np.array (100,3), all data in the frames list \n frames = []\n l = []\n for j in range(0,rows):\n for i in range(0,cols):\n cell = loadedData.iloc[j,i]\n l.append(cell)\n if i == (cols-1):\n l = np.array(l)\n frames.append(l)\n l = []\n return frames \n\n\"\"\" \npath = \"C:/Users/Egemen G/Desktop/dataPrep\"\nconc = [10,9,9]\nmid = [74,49,49]\ngang = [136,106,105]\n\ndata_size = 100\nconc_size = 10\nmid_size = 5\n\nnew = shakingTable(conc,mid,gang,conc_size,mid_size,data_size)\nnew.info()\ncccc = new.createData()\npoints = new.sliderPoints()\n\nsaveData(path,cccc)\na = loadData(path)\nshowData(a)\n\nnew.ratios()\n\n\"\"\"\n","sub_path":"DataCreator.py","file_name":"DataCreator.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599153845","text":"#!/usr/bin/python\n\nfrom functools import partial\nfrom os import system\nfrom sys import argv\nfrom time import sleep\nimport atexit\nfrom threading import Thread\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.node import Host\nfrom mininet.link import TCLink\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.cli import CLI\n\n\ndef indexGen(prefix):\n i = 1\n while True:\n yield '%s%s' % (prefix, i)\n i += 1\n\n\nclass FatTree(Topo):\n\n def __init__(self):\n self.coreSwitchList = []\n self.aggSwitchList = []\n self.edgeSwitchList = []\n self.hostList = []\n self.sgen = indexGen(\"s\")\n self.hgen = indexGen(\"h\")\n\n Topo.__init__(self)\n\n def build(self, *args, **params):\n c, a, e = 3, 3, 5\n # c, a, e = 1, 1, 2\n for i in range(c):\n self.coreSwitchList.append(self.addSwitch(next(self.sgen)))\n\n for i in range(a):\n self.aggSwitchList.append(self.addSwitch(next(self.sgen)))\n\n for i in range(e):\n self.edgeSwitchList.append(self.addSwitch(next(self.sgen)))\n\n for core in self.coreSwitchList:\n for agg in self.aggSwitchList:\n self.addLink(core, agg)\n\n for agg in self.aggSwitchList:\n for edge in self.edgeSwitchList:\n self.addLink(agg, edge)\n\n for edge in self.edgeSwitchList:\n for i in range(2):\n host = self.addHost(next(self.hgen))\n self.hostList.append(host)\n self.addLink(edge, host)\n\ntopos = { 'fattree' : ( lambda : FatTree()) }\n\n# if __name__ == '__main__':\n# pass\n# setLogLevel('info')\n# topo = FatTree()\n# net = Mininet(topo=topo)\n# net.start()\n","sub_path":"topo_fat.py","file_name":"topo_fat.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"356059041","text":"import usb.core\nimport sys\nimport threading\nfrom multiprocessing import Process, Queue\nimport time\nimport math\nimport numpy as np\nimport usb.util\nfrom setting import *\n\ndef init():\n device.ctrl_transfer(bmRequestType = 0x40, #Write\n bRequest = 0x01,\n wValue = 0x0000,\n wIndex = 0x0D, #PIX_GRAB register value\n data_or_wLength = None\n )\n\nclass SlideArray:\n def __init__(self, size):\n self.window = None\n self.size = size\n\n def push(self, ele): # chunk for coordinate, ele for binary bits\n if self.is_full():\n self.window = self.window[1:]\n if self.window is None:\n self.window = ele\n return\n assert ele[0][0] > self.window[-1][0]\n self.window = np.vstack((self.window, ele))\n\n def is_full(self):\n if self.window is None:\n return False\n return self.window.size == self.size * 2\n\n def reset(self):\n self.window = None\n\n\ndef update():\n global raw_frames_m\n global q\n raw_file_count = 0\n while True:\n\n response, timestamp = q.get()\n if not response:\n return\n\n val = int.from_bytes(response, 'big')\n val_fixed = val\n # print(val)\n if val_fixed < 128:\n val_fixed += 128\n if val_fixed > 240:\n continue\n\n # print(timestamp, val_fixed)\n raw_frames_m.push(np.array([[timestamp, val_fixed], ]))\n if raw_frames_m.is_full():\n fn = './data/' + str(raw_file_count) + '_raw_v3.bin'\n raw_frames_m.window.tofile(fn)\n print('[ Expection: 45 ] Write done: ' + fn)\n raw_frames_m.reset()\n raw_file_count += 1\n\ndur = input('Duration: ')\ndur = 10 if dur == '' else int(dur)\n\ndevice = usb.core.find(idVendor=0x046d, idProduct=0xc077)\n\nif device.is_kernel_driver_active(0):\n device.detach_kernel_driver(0)\n\ndevice.set_configuration()\n\nraw_frames_m = SlideArray(MOUSE_FRAME_RATE * 60)\n\nq = Queue()\np = Process(target=update) # for display\np.start()\n\nstart = time.time()\nwhile time.time() - start < dur: # 11000\n response = device.ctrl_transfer(bmRequestType = 0xC0, #Read\n bRequest = 0x01,\n wValue = 0x0000,\n wIndex = 0x0D, #PIX_GRAB register value\n data_or_wLength = 1\n )\n init()\n q.put((response, time.time()))\nprint('All done.')\n\n","sub_path":"legency/chromatic_test.py","file_name":"chromatic_test.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"150751668","text":"#Constants\nVERSION \t\t\t\t = 0.1\n\nLOG_FILE \t\t\t\t = 'seek_log.txt'\n\nWINDOW_WIDTH \t\t\t = 800\nWINDOW_HEIGHT \t\t\t = 600\nWINDOW_TITLE \t\t\t = 'Seek!'\n\n#--------------------------------------------------------------------\n# Tweakable Params\n\n# Time\nMaxDelta \t\t\t\t = 0.1 # Dt can be a max of 0.1 seconds\nSlowMotionRatio \t\t = 1\n\n# Rendering\nScale = 18\n\n# Objects\nNumAgents \t\t\t\t = 20\n\n# This is used to multiply the steering force AND all the multipliers\n# found in SteeringBehavior\nSteeringForceTweaker = 200.0\n\nMaxForce = 2050.0\nMaxSpeed = 2050.0\nMaxTurnRate = 50.0\nVehicleMass = 1.0\nVehicleRadius = 1.0\n\n# Use these values to tweak the amount that each steering force\n# contributes to the total steering force\nSeparationWeight = 1.0\nAlignmentWeight = 1.0\nCohesionWeight = 2.0\nObstacleAvoidanceWeight = 10.0\nWallAvoidanceWeight = 10.0\nWanderWeight = 1.0\nSeekWeight = 1.0\nFleeWeight = 1.0\nArriveWeight = 1.0\nPursuitWeight = 1.0\nOffsetPursuitWeight = 1.0\nInterposeWeight = 1.0\nHideWeight = 1.0\nEvadeWeight = 0.01\nFollowPathWeight = 0.05\n","sub_path":"seek/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"147031517","text":"from PIL import Image\nfrom pathlib import Path\nimport cv2\nimport argparse\nimport logging\nimport sys\nimport random\nimport shutil\n\n# input image dimensions\nimg_rows, img_cols = 32, 32\n\n\ndef trim_faces_opencv(file_path: Path, casc_path: str) -> [Image]:\n logging.debug(\"start process on %s\", file_path)\n faceCascade = cv2.CascadeClassifier(casc_path)\n\n image = cv2.imread(str(file_path))\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Detect faces in the image\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30)\n # flags = cv2.CV_HAAR_SCALE_IMAGE\n )\n\n output = []\n for (x, y, w, h) in faces:\n # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n im = Image.open(file_path)\n im = im.convert(\"L\")\n # im.thumbnail((500, 500))\n\n margin = w / 2 * 0.42\n\n if (\n x - margin < 0\n or y - margin < 0\n or x + w + margin > im.width\n or y + h + margin > im.height\n ):\n # print(\"size skip\")\n continue\n\n im = im.crop((x - margin, y - margin, x + w + margin, y + h + margin))\n\n output.append(im)\n return output\n\n\ndef trim_faces_ssd_keras(picture_file: Path):\n raise NotImplementedError()\n\n\ndef main():\n logging.basicConfig(filename=\"generate_dataset.log\", level=logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n handler.setFormatter(formatter)\n logging.getLogger().addHandler(handler)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input\",\n \"-i\",\n type=str,\n help=\"directory to load the raw data including photos\",\n required=True,\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n help=\"directory to output the generated datasets\",\n required=True,\n )\n parser.add_argument(\n \"--method\", \"-m\", choices=[\"opencv\", \"ssd_keras\"], default=\"opencv\"\n )\n parser.add_argument(\n \"--cascade_path\", default=\"../haarcascade_frontalface_default.xml\"\n )\n parser.add_argument(\"--debug\", \"-d\", action=\"store_true\", help=\"log debug info\")\n parser.add_argument(\n \"--test-split\",\n \"-t\",\n action=\"store_true\",\n help=\"if you want to split data into train and test you can use this flag\",\n )\n parser.add_argument(\n \"--test-split-ratio\",\n \"-r\",\n type=float,\n help=\"the ratio of train test split\",\n default=0.1,\n )\n\n opts = parser.parse_args()\n\n if opts.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n if opts.method == \"ssd_keras\":\n raise NotImplementedError\n\n input_path = Path(opts.input)\n logging.debug(\"input path = %s\", input_path)\n output_path = Path(opts.output)\n logging.debug(\"output path = %s\", output_path)\n assert input_path.is_dir()\n\n if output_path.exists() and not output_path.is_dir():\n logging.error(\"output path not dir\")\n raise RuntimeError(\"output path not dir\")\n if not output_path.exists():\n logging.info(\"creating directory at %s\", output_path)\n output_path.mkdir()\n\n if (output_path / \"train\").exists() and not (output_path / \"train\").is_dir():\n logging.error(\"output/train not dir\")\n raise RuntimeError(\"output/train not dir\")\n if not (output_path / \"train\").exists():\n logging.info(\"creating directory at %s\", (output_path / \"train\"))\n (output_path / \"train\").mkdir()\n\n try:\n logging.debug(\"reading index from output directory\")\n with (output_path / \"index.txt\").open(\"r\") as f:\n index = int(f.read())\n except IOError:\n logging.debug(\"using fallback index\")\n index = 0\n logging.debug(\"index = %s\", index)\n\n logging.debug(\"using method %s\", opts.method)\n\n for picture_file in input_path.iterdir():\n if not picture_file.is_file():\n continue\n if opts.method == \"opencv\":\n faces = trim_faces_opencv(picture_file, casc_path=opts.cascade_path)\n elif opts.method == \"ssd_keras\":\n faces = trim_faces_ssd_keras(picture_file)\n\n try:\n faces\n except NameError:\n faces = []\n\n for face in faces:\n face.save(output_path / \"train\" / f\"image{index}.jpg\")\n index += 1\n if opts.test_split:\n if (output_path / \"test\").exists() and not (output_path / \"test\").is_dir():\n logging.error(\"output/test not dir\")\n raise RuntimeError(\"output/test not dir\")\n if not (output_path / \"test\").exists():\n logging.info(\"creating directory at %s\", (output_path / \"test\"))\n (output_path / \"test\").mkdir()\n\n train_list = [p for p in (output_path / \"train\").iterdir()]\n random.shuffle(train_list)\n test_list = train_list[0 : int(len(train_list) * opts.test_split_ratio)]\n for file in test_list:\n shutil.move(str(file), str(output_path / \"test\"))\n with (output_path / \"index.txt\").open(\"w\") as f:\n f.write(f\"{index}\")\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"face_rotation/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271301331","text":"# 设置redis相关的配置信息\nREDIS_CONFIG = {\n\t\"host\": \"www.booksell.cn\",\n\t\"port\": 6380\n}\n\n# 设置neo4j图数据库的配置信息\nNEO4J_CONFIG = {\n\t\"uri\": \"bolt://www.booksell.cn:7688\",\n\t\"auth\": (\"neo4j\", \"yunda618\"),\n\t\"encrypted\": False\n}\n\n# 设置句子相关服务的请求地址\nmodel_serve_url = \"http://0.0.0.0:5002/v1/recognition/\"\n\n# 设置服务的超时时间\nTIMEOUT = 2\n\n# 设置规则对话的模板加载路径\nreply_path = \"./reply.json\"\n\n# 用户对话信息保存的过期时间\nex_time = 36000\n\n","sub_path":"online/doctor_online/main_server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"299680075","text":"from psycopg2.extensions import connection as _connection\r\nfrom models import Person, FilmWorkPerson, FilmWork, Genre, FilmWorkGenre\r\n\r\n\r\nclass PostgresSaver:\r\n def __init__(self, pg_conn: _connection):\r\n self.cursor = pg_conn.cursor()\r\n self._counter = 0\r\n\r\n def reset(self):\r\n \"\"\"Truncates content tables\"\"\"\r\n truncate_sql = \"\"\"truncate film_work, \r\n film_work_genre, \r\n film_work_person, \r\n genre,\r\n person;\"\"\"\r\n self.cursor.execute(truncate_sql)\r\n\r\n def save_all_data(self, movies_iterator) -> int:\r\n \"\"\"\r\n Iterate over movies saving them in postgres content tables\r\n @param movies_iterator: the movies\r\n @return: number of movies saved\r\n \"\"\"\r\n cursor = self.cursor\r\n\r\n for movie in movies_iterator:\r\n film_work = FilmWork(title=movie['title'], description=movie['description'], rating=movie['imdb_rating'])\r\n film_work.save(cursor)\r\n\r\n for role in ['actor', 'director', 'writer']:\r\n for person_name in movie[role]:\r\n person = Person(full_name=person_name)\r\n person.save(cursor)\r\n FilmWorkPerson(film_work_id=film_work.id, person_id=person.id, role=role).save(cursor)\r\n\r\n for genre_name in movie['genre']:\r\n genre = Genre(name=genre_name)\r\n genre.save(cursor)\r\n FilmWorkGenre(film_work_id=film_work.id, genre_id=genre.id).save(cursor)\r\n\r\n self._counter += 1\r\n print('.', end='', flush=True)\r\n\r\n return self._counter\r\n","sub_path":"sqlite_to_postgres/postgres_saver.py","file_name":"postgres_saver.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"444636783","text":"from tkinter import *\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nfrom mysql.connector import errorcode\r\nimport ctypes # An included library with Python install.\r\n\r\nimport config # import global variables for form\r\nfrom config import database\r\n\r\ndef editrecord(passedidnumber):\r\n global id\r\n global fn\r\n global ln\r\n global ad\r\n global si\r\n\r\n try:\r\n mydb = database.connect_db()\r\n mycursor = mydb.cursor()\r\n passedid = (passedidnumber, )\r\n database.connect_selectstudent(mycursor,passedid)\r\n record = mycursor.fetchone()\r\n id = record[0]\r\n fn = record[1]\r\n ln = record[2]\r\n ad = record[3]\r\n si = record[4]\r\n\r\n mydb.commit()\r\n #print(mycursor.rowcount, \"record(s) selected\")\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed to select record into Student table {}\".format(error))\r\n\r\n finally:\r\n if (mydb.is_connected()):\r\n mydb.close()\r\n #print(\"MySQL connection is closed\")\r\n\r\n ## Use of global variables makes them available in all functions\r\n global root\r\n root=Tk()\r\n root.title(\"Student Database\")\r\n root.geometry(\"450x400\")\r\n root.config(bg=\"pink\")\r\n root.resizable(0,0)\r\n\r\n Label(root, text='Please Edit your Student Details', bd=5,font=('arial', 12, 'bold'), relief=\"groove\", fg=\"white\",\r\n bg=\"blue\",width=300).pack()\r\n\r\n # added root to StringVar to get form working properly\r\n config.id = StringVar(root,id)\r\n config.firstname = StringVar(root,fn)\r\n config.lastname = StringVar(root,ln)\r\n config.address = StringVar(root,ad)\r\n config.balance = StringVar(root,si)\r\n\r\n\r\n Label(root, text=\"\").pack()\r\n Label(root, text=\"ID :\", fg=\"black\", font=('arial', 12, 'bold')).pack()\r\n Entry(root, textvariable=config.id,state=DISABLED).pack()\r\n Label(root, text=\"\").pack()\r\n Label(root, text=\"Firstname :\", fg=\"black\", font=('arial', 12, 'bold')).pack()\r\n Entry(root, textvariable=config.firstname).pack()\r\n Label(root, text=\"Lastname :\", fg=\"black\", font=('arial', 12, 'bold')).pack()\r\n Entry(root, textvariable=config.lastname).pack()\r\n Label(root, text=\"Address :\", fg=\"black\", font=('arial', 12, 'bold')).pack()\r\n Entry(root, textvariable=config.address).pack()\r\n Label(root, text=\"\").pack()\r\n Label(root, text=\"Balance :\", fg=\"black\", font=('arial', 12, 'bold')).pack()\r\n Entry(root, textvariable=config.balance,state=DISABLED).pack()\r\n Label(root, text=\"\").pack()\r\n\r\n Button(root, text=\"Store to Dbase\", bg=\"blue\", fg='white', relief=\"groove\", font=('arial', 12, 'bold'), command=writerecordtodatabase).pack()\r\n Button(root, text=\"Exit\", bg=\"blue\", fg='white', relief=\"groove\", font=('arial', 12, 'bold'), command=Exit).pack()\r\n\r\n Label(root, text=\"\")\r\n\r\n root.mainloop()\r\n\r\ndef writerecordtodatabase():\r\n try:\r\n connection = database.connect_db()\r\n cursor = connection.cursor()\r\n database.connect_updateeditstudent(cursor,config.firstname.get(),config.lastname.get(),config.address.get(), config.balance.get(),config.id.get())\r\n connection.commit()\r\n cursor.close()\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed to insert record into Student table {}\".format(error))\r\n\r\n finally:\r\n if (connection.is_connected()):\r\n connection.close()\r\n #print(\"MySQL connection is closed\")\r\n root.destroy()\r\ndef Exit():\r\n wayOut = Mbox('Student Database', 'Do you want to exit?', 1)\r\n if wayOut == 1 :\r\n root.destroy()\r\n return\r\n\r\ndef Mbox(title, text, style):\r\n return ctypes.windll.user32.MessageBoxW(0, text, title, style)\r\n\r\n\r\n#editrecord()\r\n#root.mainloop() ## this causes the program to loop indefinitely\r\n","sub_path":"EditAccount.py","file_name":"EditAccount.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"613099331","text":"## Script (Python) \"confirma_acomp_materia_pysc\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=end_email, txt_hash, cod_materia\n##title=\nrequest=context.REQUEST\nresponse=request.RESPONSE\nsession= request.SESSION\n\nmailhost = context.MailHost\n\nif hasattr(context.sapl_documentos.props_sapl,'logo_casa.gif'):\n imagem = context.sapl_documentos.props_sapl['logo_casa.gif'].absolute_url()\nelse:\n imagem = context.imagens.absolute_url() + \"/brasao_transp.gif\"\n\ncasa={}\naux=context.sapl_documentos.props_sapl.propertyItems()\nfor item in aux:\n casa[item[0]] = item[1]\nemail_casa = casa['end_email_casa']\ncasa_legislativa = casa['nom_casa']\n\nfor materia in context.zsql.materia_obter_zsql(cod_materia=cod_materia):\n ementa = materia.txt_ementa\n projeto = materia.sgl_tipo_materia.encode('utf-8')+\" \"+materia.des_tipo_materia.encode('utf-8')+\" \"+str(materia.num_ident_basica)+\"/\"+str(materia.ano_ident_basica)\n\n for autoria in context.zsql.autoria_obter_zsql(cod_materia=materia.cod_materia,ind_primeiro_autor=1):\n dic_autor = {}\n for autor in context.zsql.autor_obter_zsql(cod_autor = autoria.cod_autor):\n nom_autor = \" \"\n if autor.des_tipo_autor=='Parlamentar':\n for parlamentar in context.zsql.parlamentar_obter_zsql(cod_parlamentar=autor.cod_parlamentar):\n nom_autor = parlamentar.nom_completo\n elif autor.des_tipo_autor=='Comissao':\n for comissao in context.zsql.comissao_obter_zsql(cod_comissao=autor.cod_comissao):\n nom_autor = comissao.nom_comissao\n elif autor.des_tipo_autor=='Bancada':\n for bancada in context.zsql.bancada_obter_zsql(cod_bancada=autor.cod_bancada):\n nom_autor = bancada.nom_bancada\n else:\n nom_autor=autor.nom_autor\n\nremetente = email_casa\n\ndestinatario = str(end_email)\n\nhash = str(txt_hash)\n\nlink = \"\" + context.consultas.absolute_url() + \"/materia/acompanhamento/acomp_materia_confirmar_proc?txt_hash=\" + txt_hash\n\nmMsg = \"Prezado(a) Senhor(a),\\n\\n\"\nmMsg = mMsg + \"Para acompanhar por E-mail o andamento da matéria acima identificada, solicitamos que confirme o recebimento de futuras mensagens eletrônicas, clicando no link:\\n\\n\"\nmMsg = mMsg + link + \"\\n\\n\"\nmMsg = mMsg + \"Caso não tenha solicitado o acompanhamento dessa matéria em nosso sistema, favor desconsiderar esta mensagem.\\n\\n\"\nmMsg = mMsg + \"Cordialmente,\\n\\n\"\nmMsg = mMsg + \"\"+ str(casa_legislativa) +\"\\n\"\nmMsg = mMsg + \"Sistema de Apoio ao Processo Legislativo\\n\"\n\nmSubj = projeto +\" - Acompanhamento por E-mail\"\n\nmailhost.send(mMsg, destinatario, remetente, subject=mSubj, encode='base64')\n","sub_path":"branches/2.6/skins/sk_sapl/pysc/confirma_acomp_materia_pysc.py","file_name":"confirma_acomp_materia_pysc.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"327161165","text":"#! /usr/bin/env python\nimport os\nimport sys\n\nthisdir = os.path.dirname(__file__)\nlibdir = os.path.abspath(os.path.join(thisdir, '..'))\nif libdir not in sys.path:\n sys.path.insert(0, libdir)\n\nlibdir = os.path.abspath(os.path.join(thisdir, '../app'))\nif libdir not in sys.path:\n sys.path.insert(0, libdir)\n\nfrom app.main import Main\n\nbn = None\nif len(sys.argv) > 1:\n bn = sys.argv[1]\nMain().load_images(bn)\n","sub_path":"bin/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"552490637","text":"from flask import Flask, render_template, request, redirect, url_for\nimport ApiAccess as api\n\n\napp = Flask(__name__)\napp.config.from_object('flask_config.Config')\n\nobj1 = api.AccessTrelloApi()\n\n\ndef get_items():\n\n Items1 = obj1.getCardsFromTrelloList(\n api.TODOLISTURL, 'To Do')\n\n Items2 = obj1.getCardsFromTrelloList(\n api.DONELISTURL, 'Done')\n\n return Items1 + Items2\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', items=get_items())\n\n\n@app.route('/add_item/', methods=['POST'])\ndef add_item():\n NewItem = request.form[\"NewItem\"]\n obj1.AddItemTodoList(NewItem)\n return render_template('index.html', items=get_items())\n\n\n@app.route('/complete_item/', methods=['GET'])\ndef complete_item(item):\n obj1.MarkItemAsDone(item)\n return render_template('index.html', items=get_items())\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556141693","text":"\n\n#calss header\nclass _PROGNOSTICATE():\n\tdef __init__(self,): \n\t\tself.name = \"PROGNOSTICATE\"\n\t\tself.definitions = [u'to give a judgment about what is likely to happen in the future, especially in connection with a particular situation: ', u'to give a medical judgment about the likely or expected development of a disease']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_prognosticate.py","file_name":"_prognosticate.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"130023117","text":"#!/usr/bin/env python2\n\n# MIT License\n#\n# Copyright (c) 2017 Zhiang Chen\n'''\nReceive the cropped image from \"box_image/numpy\", and publish the class prediction and angle prediction onto \"prediction\"\n'''\nfrom __future__ import print_function\nimport rospy\nimport roslib\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport operator\nimport time\nimport os\nimport sys\nfrom rospy.numpy_msg import numpy_msg\nfrom rospy_tutorials.msg import Floats\nfrom depthnet.msg import PredictionMSG\nimport math\nfrom six.moves import cPickle as pickle\n\nwith open('small_data', 'rb') as f:\n save = pickle.load(f)\n small_data = save['small_data']\n del save\n\n\nname2value = {'empty':0,'duck':1,'cup':2,'sponge':3,'tball':4,'pball':5,'gball':6,'gstick':7,'nerf':8,'calc':9,'stapler':10}\nvalue2name = dict((value,name) for name,value in name2value.items()) \n\nname2string = {'v8':'v8 can','duck':'ducky','stapler':'stapler','pball':'ping pang ball','tball':'tennis ball','sponge':'sponge',\n 'bclip':'binder clip','tape':'big tape','gstick':'glue stick','cup':'cup','pen':'pen','calc':'calculator',\n 'blade':'razor','bottle':'bottle','cpin':'clothespin','scissors':'scissors','stape':'small tape','gball':'golf ball',\n 'orwidg':'orange thing','glue':'glue','spoon':'spoon','fork':'fork','nerf':'nerf gun','eraser':'eraser',\n 'empty':'empty plate'}\n\nangles_list = np.asarray([i*18 for i in range(10)]).astype(np.float32)\n\nnum_labels = 11\nimage_size = 40\n'''ConvNet'''\nk1_size = 4\nk1_stride = 1\nk1_depth = 1\nk1_nm = 16\nn1 = image_size*image_size*1\n\nk2_size = 3\nk2_stride = 2\nk2_depth = 16\nk2_nm = 16\nm1_size = image_size-k1_size+k1_stride\nn2 = m1_size*m1_size*k1_nm\n\nk3_size = 4\nk3_stride = 1\nk3_depth = 16\nk3_nm = 32\nm2_size = (m1_size-k2_size)/k2_stride+1\nn3 = m2_size*m2_size*k2_nm\n\nk4_size = 3\nk4_stride = 2\nk4_depth = 32\nk4_nm = 32\nm3_size = (m2_size-k3_size)/k3_stride+1\nn4 = m3_size*m3_size*k3_nm\n\nk5_size = 4\nk5_stride = 1\nk5_depth = 32\nk5_nm = 64\nm4_size = (m3_size-k4_size)/k4_stride+1\nn5 = m4_size*m4_size*k4_nm\n\nk6_size = 2\nk6_stride = 2\nk6_depth = 64\nk6_nm = 64\nm5_size = (m4_size-k5_size)/k5_stride+1\nn6 = m5_size*m5_size*k5_nm\n\n'''Class FC'''\nf7_class_size = 120\nm6_class_size = (m5_size-k6_size)/k6_stride+1\nn7_class = m6_class_size*m6_class_size*k6_nm\n\nf8_class_size = 60\nn8_class = f7_class_size\n\nclasses_size = 11\nn9_class = f8_class_size\n\n'''Angle FC'''\nf7_angle_size = 120\nm6_angle_size = (m5_size-k6_size)/k6_stride+1\nn7_angle = m6_angle_size*m6_angle_size*k6_nm\n\nf8_angle_size = 60\nn8_angle = f7_angle_size\n\nangles_size = 10\nn9_angle = f8_angle_size\n\n'''Dropout'''\nkeep_prob1 = 0.8\nkeep_prob2 = 0.5\n\n'''Mini-batch'''\nbatch_size = 33\nangles_list = np.asarray([i*18 for i in range(10)]).astype(np.float32)\n\ndef leaky_relu(x, leak=0.1):\n return tf.maximum(x, x * leak)\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n '''Input data'''\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, k1_depth))\n # k1_depth = input_channels\n # convolution's input is a tensor of shape [batch,in_height,in_width,in_channels]\n tf_train_classes = tf.placeholder(tf.float32, shape=(batch_size, 11))\n tf_train_angles = tf.placeholder(tf.float32, shape=(batch_size, 10))\n \n '''Xavier initialization'''\n k1_stddev = math.sqrt(1.0/n1)\n k1_weights = tf.Variable(tf.truncated_normal([k1_size, k1_size, k1_depth, k1_nm], stddev = k1_stddev))\n k1_biases = tf.Variable(tf.zeros([k1_nm]))\n \n k2_stddev = math.sqrt(2.0/n2)\n k2_weights = tf.Variable(tf.truncated_normal([k2_size, k2_size, k2_depth, k2_nm], stddev = k2_stddev))\n k2_biases = tf.Variable(tf.zeros([k2_nm]))\n \n k3_stddev = math.sqrt(2.0/n3)\n k3_weights = tf.Variable(tf.truncated_normal([k3_size, k3_size, k3_depth, k3_nm], stddev = k3_stddev))\n k3_biases = tf.Variable(tf.zeros([k3_nm]))\n \n k4_stddev = math.sqrt(2.0/n4)\n k4_weights = tf.Variable(tf.truncated_normal([k4_size, k4_size, k4_depth, k4_nm], stddev = k4_stddev))\n k4_biases = tf.Variable(tf.zeros([k4_nm]))\n \n k5_stddev = math.sqrt(2.0/n5)\n k5_weights = tf.Variable(tf.truncated_normal([k5_size, k5_size, k5_depth, k5_nm], stddev = k5_stddev))\n k5_biases = tf.Variable(tf.zeros([k5_nm]))\n \n k6_stddev = math.sqrt(2.0/n6)\n k6_weights = tf.Variable(tf.truncated_normal([k6_size, k6_size, k6_depth, k6_nm], stddev = k6_stddev))\n k6_biases = tf.Variable(tf.zeros([k6_nm]))\n \n ## class FC\n f7_class_stddev = math.sqrt(2.0/n7_class)\n f7_class_weights = tf.Variable(tf.truncated_normal([n7_class, f7_class_size], stddev = f7_class_stddev))\n f7_class_biases = tf.Variable(tf.zeros([f7_class_size]))\n \n f8_class_stddev = math.sqrt(2.0/n8_class)\n f8_class_weights = tf.Variable(tf.truncated_normal([n8_class, f8_class_size], stddev = f8_class_stddev))\n f8_class_biases = tf.Variable(tf.zeros([f8_class_size]))\n \n f9_class_stddev = math.sqrt(2.0/n9_class)\n f9_class_weights = tf.Variable(tf.truncated_normal([n9_class, classes_size], stddev = f9_class_stddev))\n f9_class_biases = tf.Variable(tf.zeros([classes_size]))\n \n ## angle FC\n f7_angle_stddev = math.sqrt(2.0/n7_angle)\n f7_angle_weights = tf.Variable(tf.truncated_normal([n7_angle, f7_angle_size], stddev = f7_angle_stddev))\n f7_angle_biases = tf.Variable(tf.zeros([f7_angle_size]))\n \n f8_angle_stddev = math.sqrt(2.0/n8_angle)\n f8_angle_weights = tf.Variable(tf.truncated_normal([n8_angle, f8_angle_size], stddev = f8_angle_stddev))\n f8_angle_biases = tf.Variable(tf.zeros([f8_angle_size]))\n \n f9_angle_stddev = math.sqrt(2.0/n9_angle)\n f9_angle_weights = tf.Variable(tf.truncated_normal([n9_angle, angles_size], stddev = f9_angle_stddev))\n f9_angle_biases = tf.Variable(tf.zeros([angles_size]))\n \n #print n1,n2,n3,n4,n5,n6,n7,n8,n9\n #print k1_stddev,k2_stddev,k3_stddev,k4_stddev,k5_stddev,k6_stddev,f7_stddev,f8_stddev,f9_stddev\n \n '''Batch normalization initialization'''\n beta1 = tf.Variable(tf.zeros([k1_nm]))\n gamma1 = tf.Variable(tf.ones([k1_nm]))\n \n beta2 = tf.Variable(tf.zeros([k2_nm]))\n gamma2 = tf.Variable(tf.ones([k2_nm]))\n \n beta3 = tf.Variable(tf.zeros([k3_nm]))\n gamma3 = tf.Variable(tf.ones([k3_nm]))\n \n beta4 = tf.Variable(tf.zeros([k4_nm]))\n gamma4 = tf.Variable(tf.ones([k4_nm]))\n\n beta5 = tf.Variable(tf.zeros([k5_nm]))\n gamma5 = tf.Variable(tf.ones([k5_nm]))\n \n beta6 = tf.Variable(tf.zeros([k6_nm]))\n gamma6 = tf.Variable(tf.ones([k6_nm]))\n\n saver = tf.train.Saver()\n # Model\n def test_model(data):\n conv = tf.nn.conv2d(data, k1_weights, [1, 1, 1, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta1,gamma1,1e-5)\n hidden = leaky_relu(y)\n \n conv = tf.nn.conv2d(hidden, k2_weights, [1, 2, 2, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta2,gamma2,1e-5)\n hidden = leaky_relu(y)\n \n conv = tf.nn.conv2d(hidden, k3_weights, [1, 1, 1, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta3,gamma3,1e-5)\n hidden = leaky_relu(y)\n \n conv = tf.nn.conv2d(hidden, k4_weights, [1, 2, 2, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta4,gamma4,1e-5)\n hidden = leaky_relu(y)\n \n conv = tf.nn.conv2d(hidden, k5_weights, [1, 1, 1, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta5,gamma5,1e-5)\n hidden = leaky_relu(y)\n \n conv = tf.nn.conv2d(hidden, k6_weights, [1, 2, 2, 1], padding='VALID')\n mean, variance = tf.nn.moments(conv, [0, 1, 2])\n y = tf.nn.batch_normalization(conv,mean,variance,beta6,gamma6,1e-5)\n hidden = leaky_relu(y)\n \n shape = hidden.get_shape().as_list()\n hidden_input = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n \n ## class FC\n class_hidden = leaky_relu(tf.matmul(hidden_input, f7_class_weights) + f7_class_biases)\n class_fc = tf.matmul(class_hidden,f8_class_weights)\n class_hidden = leaky_relu(class_fc + f8_class_biases)\n fc_classes = tf.matmul(class_hidden,f9_class_weights)\n output_classes = fc_classes + f9_class_biases\n \n ## angle FC\n angle_hidden = leaky_relu(tf.matmul(hidden_input, f7_angle_weights) + f7_angle_biases)\n angle_fc = tf.matmul(angle_hidden,f8_angle_weights)\n angle_hidden = leaky_relu(angle_fc + f8_angle_biases)\n fc_angles = tf.matmul(angle_hidden,f9_angle_weights)\n output_angles = fc_angles + f9_angle_biases \n \n return output_classes, output_angles\n\n\nconfig = tf.ConfigProto()\n#config.log_device_placement = True \nsession = tf.Session(graph=graph, config = config)\nsaver.restore(session, \"./model.ckpt\")\n\ndef accuracy_classes(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))/ predictions.shape[0])\n\n\nclass evaluator:\n def __init__(self):\n #Initialize ros publisher, subscriber\n self.pub1 = rospy.Publisher('prediction',PredictionMSG,queue_size=1)\n self.sub1 = rospy.Subscriber('box_image/numpy',numpy_msg(Floats),self.callback,queue_size=1)\n self.pub2 = rospy.Publisher('p_box_image/image',Image, queue_size=1)\n self.pub3 = rospy.Publisher('predicted_class', String, queue_size=1)\n self.bridge = CvBridge()\n self.pt1x = -15.0\n self.pt1y = 0.0\n self.pt2x = 15.0\n self.pt2y = 0.0\n rospy.loginfo(\"Initialized!\")\n\n def callback(self,data):\n with session.as_default():\n assert tf.get_default_session() is session\n input_image = np.flipud(data.data.reshape(image_size,image_size).astype(np.float32)).reshape(-1,image_size,image_size,1)\n images = np.append(input_image,small_data,axis=0)\n out_class, out_angle = test_model(images)\n pre_class = tf.nn.softmax(out_class)\n pre_angle = tf.nn.softmax(out_angle).eval()[0]\n angle = np.sum(np.multiply(pre_angle, angles_list))/np.sum(pre_angle)\n pre_dict = dict(zip(list(range(num_labels)),pre_class.eval()[0]))\n sorted_pre_dict = sorted(pre_dict.items(), key=operator.itemgetter(1))\n name1 = value2name[sorted_pre_dict[-1][0]]\n name1 = name2string[name1]\n self.pub3.publish(name1)\n value1 = str(sorted_pre_dict[-1][1])\n name2 = value2name[sorted_pre_dict[-2][0]]\n name2 = name2string[name2]\n value2 = str(sorted_pre_dict[-2][1])\n pre = PredictionMSG()\n pre.name1, pre.value1, pre.name2, pre.value2, pre.angle = name1, float(value1), name2, float(value2), angle\n self.pub1.publish(pre)\n image = ((input_image.reshape(image_size,image_size) + 0.65)*255).astype(np.uint8)\n image = image[5:35,10:40]\n pt1x = int(self.pt1x * math.cos(math.radians(angle)) + self.pt1y * -math.sin(math.radians(angle))) + 15\n pt1y = int(self.pt1x * math.sin(math.radians(angle)) + self.pt1y * math.cos(math.radians(angle))) + 15\n pt2x = int(self.pt2x * math.cos(math.radians(angle)) + self.pt2y * -math.sin(math.radians(angle))) + 15\n pt2y = int(self.pt2x * math.sin(math.radians(angle)) + self.pt2y * math.cos(math.radians(angle))) + 15\n cv2.line(image,(pt1x,pt1y),(pt2x,pt2y),150,1)\n ros_image = self.bridge.cv2_to_imgmsg(image, encoding=\"mono8\")\n self.pub2.publish(ros_image)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\nif __name__ == '__main__':\n rospy.init_node('multitask',anonymous=True)\n ev = evaluator()\ntry:\n rospy.spin()\nexcept KeyboardInterrupt:\n print(\"Shutting down ROS node evaluate_image\")\n\nsession.close()\nprint(\"Shutting down ROS node evaluate_image\")\n","sub_path":"thesis/supervised_learning/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":12378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259582576","text":"import ast\nimport io\nfrom urllib.parse import urlencode\n\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport flask\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output, State\nfrom flask import request\nfrom jitcache import Cache\n\nfrom common import BootstrapApp\nfrom utils.affinities import clusters_from_affinity, aaw_affinity\nfrom utils.cluster import neat_clusters\nfrom utils.filter import stasis_only, codes_filter\nfrom utils.dash import dash_kwarg\n\naaw = pd.read_pickle(\"data/AAW.pickle\")\n\nn_clusters_up = 42\nn_clusters_down = 50\n\nstate_col_names = [f\"occ4d{i}_title\" for i in range(6)]\nweight_col_names = [f\"weight{i}\" for i in range(6)]\nskill_col_names = [f\"skill{i}\" for i in range(6)]\ncluster_data = aaw[state_col_names + weight_col_names + skill_col_names]\n\ncache = Cache()\n\nanzsco = pd.read_csv(\"data/ANZSCO.csv\")\n\nvet_quals = pd.read_csv(\"data/VET_qual_to_job.csv\")\n\n# Grab the weights\nweights = aaw[\"weight0\"]\n\n\ndef unstack_many(df, col_group_lists):\n series_list = []\n\n for k, v in col_group_lists.items():\n series_list.append(\n df[v].unstack().reset_index(level=0, drop=True).rename(k)\n )\n\n return pd.concat(series_list, axis=1)\n\n\ndef make_list(input):\n if type(input) is not list:\n output = [input]\n else:\n output = input\n\n return output\n\n\ndef get_cluster_fname(\n n_clusters, direction, abs_transition_threshold, skill_diff_threshold\n):\n return (\n f\"{n_clusters}_{direction}_{abs_transition_threshold}_\"\n f\"{skill_diff_threshold}\"\n )\n\n\ndef get_cluster_attributes(filename):\n keys = [\n \"n_clusters\",\n \"direction\",\n \"abs_transition_threshold\",\n \"skill_diff_threshold\",\n ]\n\n values = filename.split(\"_\")[1:]\n\n attributes = dict(zip(keys, values))\n\n return attributes\n\n\n@cache.memoize\ndef get_clusters(\n n_clusters, direction, abs_transition_threshold, skill_diff_threshold\n):\n identifer_dict = {\n \"n_clusters\": n_clusters,\n \"direction\": direction,\n \"abs_transition_threshold\": abs_transition_threshold,\n \"skill_diff_threshold\": skill_diff_threshold,\n }\n\n kwargs = identifer_dict.copy()\n kwargs[\"data\"] = cluster_data\n kwargs[\"affinity_fn\"] = aaw_affinity\n\n return clusters_from_affinity(**kwargs)\n\n\ndefaults = {\n \"up\": {\n \"direction-select\": \"up\",\n \"ncluster-select\": n_clusters_up,\n \"threshold-select\": 0,\n \"skillgap-select\": 1,\n },\n \"down\": {\n \"direction-select\": \"down\",\n \"ncluster-select\": n_clusters_down,\n \"threshold-select\": 0,\n \"skillgap-select\": 1,\n },\n}\n\n\ndef do_something(**kwargs):\n input_value = kwargs[\"select\"]\n output_key = kwargs[\"output_key\"]\n\n if input_value is not None:\n # Make sure input was actually activated\n default_key = kwargs[\"preset-select\"]\n\n return defaults[default_key][output_key]\n else:\n output_key = kwargs[\"output_key\"]\n return kwargs[output_key]\n\n\nclass Clusters(BootstrapApp):\n\n title = \"AAW Clusters\"\n breadcrumbs = [(\"Home\", \"/\"), (\"AAW\", None), (\"Clusters\", None)]\n\n def body(self):\n\n return [\n dbc.Row(dbc.Col(html.H1(\"Clusters\"), lg=12)),\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.FormGroup(\n [\n dbc.Label(\"Max Number of Clusters\"),\n dcc.Dropdown(\n id=\"ncluster-select\",\n options=[\n {\"label\": x, \"value\": x}\n for x in range(1, 101)\n ],\n className=\"three columns\",\n value=n_clusters_up,\n ),\n dbc.FormText(id=\"ncluster-actual\"),\n dbc.FormText(\n html.A(\n \"Cluster Diagnostics\",\n href=\"diagnostics\",\n )\n ),\n ]\n ),\n dbc.FormGroup(\n [\n # Set the threshold absolute transitions\n dbc.Label(\"Absolute Transition Threshold\"),\n dcc.Dropdown(\n id=\"threshold-select\",\n options=[\n {\"label\": x, \"value\": x}\n for x in range(31)\n ],\n value=0,\n className=\"three columns\",\n ),\n ]\n ),\n dbc.FormGroup(\n [\n # Set the skills gap\n dbc.Label(\"Skill Difference Threshold\"),\n dcc.Dropdown(\n id=\"skillgap-select\",\n options=[\n {\"label\": x, \"value\": x}\n for x in range(5)\n ],\n value=1,\n className=\"three columns\",\n ),\n ]\n ),\n dbc.FormGroup(\n [\n # Set the direction and number of clusters\n dbc.Label(\"Direction of Skill Flow\"),\n dbc.RadioItems(\n id=\"direction-select\",\n options=[\n {\n \"label\": \"Upwards Skills\",\n \"value\": \"up\",\n },\n {\n \"label\": \"Downwards Skills\",\n \"value\": \"down\",\n },\n ],\n value=\"up\",\n ),\n html.Div(\n id=\"direction-firstload-flag\",\n hidden=True,\n children=\"True\",\n ),\n ]\n ),\n html.A(\n id=\"cluster-download-link\",\n children=dbc.Button(\n \"Download Current Clusters\"\n ),\n href=\"#\",\n style={\"fontSize\": 18},\n ),\n html.Hr(),\n dbc.FormGroup(\n [\n dbc.Label(\"Cluster Presets\"),\n dcc.Dropdown(\n options=[\n {\n \"value\": \"up\",\n \"label\": \"Optimal Upwards Clusters\",\n },\n {\n \"value\": \"down\",\n \"label\": \"Optimal Downwards Clusters\",\n },\n ],\n id=\"preset-select\",\n ),\n ]\n ),\n dbc.Button(\"Select\", id=\"select\"),\n ],\n lg=4,\n ),\n dbc.Col(\n dcc.Loading(\n dcc.Graph(\n id=\"cluster-finder\",\n figure={\n \"data\": [],\n \"layout\": go.Layout(height=800),\n },\n )\n ),\n lg=8,\n ),\n ]\n ),\n # Target Cluster (Dropdown)\n dbc.Row(\n [\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(\"Select Cluster/s\"),\n dcc.Dropdown(id=\"cluster-select\", multi=True),\n ]\n ),\n lg=12,\n )\n ]\n ),\n dbc.Row(\n [\n # Target Occupation (Dropdown)\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(\"Add Cluster from Occupation\"),\n dcc.Dropdown(\n id=\"occupation-select\",\n # This causes the drop down to reset after each\n # selection\n value=None,\n multi=False,\n ),\n ]\n ),\n lg=5,\n ),\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(\"Mode\"),\n dbc.RadioItems(\n id=\"mode-select\",\n options=[\n {\n \"label\": \"Combine Clusters\",\n \"value\": \"combine\",\n },\n {\n \"label\": \"Compare Clusters\",\n \"value\": \"compare\",\n },\n ],\n value=\"combine\",\n ),\n ]\n ),\n lg=3,\n ),\n ]\n ),\n dbc.Row(dbc.Col(html.H5(\"Counts\"))),\n dbc.Row(\n [\n dbc.Col(dcc.Loading(html.Div(id=\"counts-table\")), lg=8),\n dbc.Col(\n [\n html.A(\n id=\"download-current-counts-link\",\n children=html.P(\n \"Download Current Count Table\"\n ),\n href=\"#\",\n style={\"fontSize\": 18},\n ),\n html.A(\n id=\"download-counts-link\",\n children=\"Download All Cluster Counts\",\n href=\"#\",\n style={\"fontSize\": 18},\n ),\n ],\n lg=4,\n ),\n ]\n ),\n dbc.Row(\n dbc.Col(\n [\n dbc.Tabs(\n [\n dbc.Tab(\n label=\"Occupations\",\n className=\"pt-3\",\n children=[\n dcc.Loading(\n html.Div(id=\"cluster-table\")\n )\n ],\n ),\n dbc.Tab(\n label=\"Histograms\",\n children=dbc.Row(\n [\n dbc.Col(\n dcc.Loading(\n dcc.Graph(id=\"cluster-fos\")\n ),\n lg=6,\n ),\n dbc.Col(\n dcc.Loading(\n dcc.Graph(id=\"cluster-los\")\n ),\n lg=6,\n ),\n dbc.Col(\n dcc.Loading(\n dcc.Graph(id=\"cluster-ind\")\n ),\n lg=6,\n ),\n dbc.Col(\n dcc.Loading(\n dcc.Graph(id=\"cluster-age\")\n ),\n lg=6,\n ),\n ]\n ),\n ),\n dbc.Tab(\n label=\"VET Qualifications\",\n children=dbc.Row(\n dbc.Col(\n dcc.Loading(\n html.Div(id=\"quals-table\")\n ),\n lg=12,\n )\n ),\n ),\n ],\n id=\"tabs\",\n )\n ],\n lg=12,\n )\n ),\n ]\n\n def postlayout_setup(self):\n\n # Set the actual number of clusters\n @self.callback(\n Output(\n component_id=\"ncluster-actual\", component_property=\"children\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_actual_n_clusters(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n cluster_df = get_clusters(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n n_clusters = cluster_df[\"Cluster\"].nunique()\n\n return f\"Number of Non-Empty Clusters: {n_clusters}\"\n\n # List of outputs\n outputs = [\n Output(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Output(component_id=\"ncluster-select\", component_property=\"value\"),\n Output(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Output(component_id=\"skillgap-select\", component_property=\"value\"),\n ]\n\n preset_input = Input(\n component_id=\"select\", component_property=\"n_clicks\"\n )\n preset_state = State(\n component_id=\"preset-select\", component_property=\"value\"\n )\n\n for output in outputs:\n output_state = State(\n component_id=output.component_id,\n component_property=output.component_property,\n )\n\n self.callback(\n output, [preset_input], [preset_state, output_state]\n )(\n dash_kwarg(\n [preset_input, preset_state, output_state],\n {\"output_key\": output.component_id},\n )(do_something)\n )\n\n # Set the cluster download link based on direction\n @self.callback(\n Output(\n component_id=\"cluster-download-link\", component_property=\"href\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_cluster_download_link(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n file_name = get_cluster_fname(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n return f\"downloads/clusters/clusters_{file_name}.csv\"\n\n # Set the current counts table based on direction\n @self.callback(\n Output(\n component_id=\"download-current-counts-link\",\n component_property=\"href\",\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"cluster-select\", component_property=\"value\"\n ),\n Input(component_id=\"mode-select\", component_property=\"value\"),\n ],\n )\n def set_current_counts_download_link(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n input_clusters,\n input_mode,\n ):\n file_name = get_cluster_fname(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n\n state = urlencode(\n dict(zip([\"clusters\", \"method\"], [input_clusters, input_mode]))\n )\n\n return f\"downloads/counts/counts_{file_name}.csv?{state}\"\n\n # Set the counts download link based on direction\n @self.callback(\n Output(\n component_id=\"download-counts-link\", component_property=\"href\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_total_counts_download_link(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n file_name = get_cluster_fname(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n return f\"downloads/counts/counts_{file_name}.csv\"\n\n # Set the cluster options based on direction\n @self.callback(\n Output(\n component_id=\"cluster-select\", component_property=\"options\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_cluster_options(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n cluster_df = get_clusters(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n\n cluster_ids = cluster_df[\"Cluster\"].unique()\n\n clusters = [\n {\"label\": f\"Cluster {i}\", \"value\": i} for i in cluster_ids\n ]\n return clusters\n\n # Set the occupation selector options based on direction\n @self.callback(\n Output(\n component_id=\"occupation-select\", component_property=\"options\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_occupation_options(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n cluster_df = get_clusters(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n occupations = [\n {\"label\": f\"{i}\", \"value\": i}\n for i in cluster_df[\"Occupation\"].sort_values()\n ]\n return occupations\n\n @self.callback(\n Output(component_id=\"cluster-finder\", component_property=\"figure\"),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def generate_cluster_finder(\n input_direction,\n input_nclusters,\n input_transition_threshold,\n input_skillgap,\n ):\n cluster_df = get_clusters(\n input_nclusters,\n input_direction,\n input_transition_threshold,\n input_skillgap,\n )\n\n cluster_ids = cluster_df[\"Cluster\"].unique()\n\n educations = []\n skills = []\n texts = []\n\n education_labels = [\n \"Year 10 or below\",\n \"Year 11\",\n \"Year 12\",\n \"Diploma or Certificate\",\n \"Trade qualification\",\n \"Degree\",\n \"Post grad degree\",\n ]\n mapping_dict = dict(\n zip(education_labels, np.arange(1, len(education_labels)))\n )\n\n for cluster in cluster_ids:\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n # TODO: use all available information on education\n # by using unstack_many then filter for occupation\n # however this might bias results as some clusters and occupations will have higher response rates\n cluster_sub_df = aaw[\n aaw[\"occ4d1_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n # Map highed column to numeric level\n # Take the mean\n # Unfortunately AAW doesn't adhere to AQF Levels\n ed_mean = (\n cluster_sub_df[\"highed1\"]\n .apply(\n lambda x: mapping_dict[x]\n if x in mapping_dict\n else np.NaN\n )\n .mean()\n )\n\n educations.append(ed_mean)\n\n skill_mean = (cluster_sub_df[\"skill1\"]).mean()\n skills.append(skill_mean)\n\n text = \"
\".join(\n [f\"Cluster {cluster}\"]\n + [\n f\"- {occupation}\"\n for occupation in cluster_occupations[\"Occupation\"]\n ]\n )\n texts.append(text)\n\n return {\n \"data\": [\n go.Scatter(\n x=skills,\n y=educations,\n text=texts,\n mode=\"markers\",\n customdata=cluster_ids,\n )\n ],\n \"layout\": go.Layout(\n height=800,\n title=\"Cluster Finder\",\n xaxis={\"title\": \"Skill Level\"},\n yaxis={\"title\": \"Education Level\"},\n hovermode=\"closest\",\n clickmode=\"select+event\",\n ),\n }\n\n # Clear occupation selector when its values change\n @self.callback(\n Output(\n component_id=\"occupation-select\", component_property=\"value\"\n ),\n [\n Input(\n component_id=\"occupation-select\",\n component_property=\"options\",\n )\n ],\n )\n def set_occupation_value(input_occupation_options):\n return None\n\n @self.callback(\n Output(component_id=\"cluster-select\", component_property=\"value\"),\n [\n Input(\n component_id=\"occupation-select\",\n component_property=\"value\",\n ),\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"cluster-finder\",\n component_property=\"clickData\",\n ),\n Input(\n component_id=\"cluster-finder\",\n component_property=\"selectedData\",\n ),\n ],\n [\n State(\n component_id=\"cluster-select\", component_property=\"value\"\n ),\n State(\n component_id=\"direction-firstload-flag\",\n component_property=\"children\",\n ),\n State(\n component_id=\"ncluster-select\", component_property=\"value\"\n ),\n State(\n component_id=\"threshold-select\", component_property=\"value\"\n ),\n State(\n component_id=\"skillgap-select\", component_property=\"value\"\n ),\n ],\n )\n def set_clusters_selected(\n input_occupation,\n input_direction,\n input_cluster_finder_click,\n input_cluster_finder_select,\n state_cluster_select,\n state_firstload,\n state_nclusters,\n state_transition_threshold,\n state_skillgap,\n ):\n cluster_df = get_clusters(\n state_nclusters,\n input_direction,\n state_transition_threshold,\n state_skillgap,\n )\n\n # If nothing currently selected and input occupation did not trigger the event\n # then it is likely that this is the first load. So set it to the first cluster.\n # Otherwise user probably switched between upwards/downwards so empty all cluster selection.\n if input_occupation is None:\n # This is extremely unsafe as the user could set the children of this to any valid python\n # code and it would be executed!\n # Check if it's the first load\n if ast.literal_eval(state_firstload):\n output_list = [cluster_df[\"Cluster\"].unique()[0]]\n else:\n if (\n input_cluster_finder_click is not None\n or input_cluster_finder_select is not None\n ):\n output_list = make_list(state_cluster_select)\n if input_cluster_finder_click is not None:\n cluster_id = input_cluster_finder_click[\"points\"][\n 0\n ][\"customdata\"]\n output_list.append(cluster_id)\n if input_cluster_finder_select is not None:\n cluster_ids = [\n point[\"customdata\"]\n for point in input_cluster_finder_select[\n \"points\"\n ]\n ]\n output_list.extend(cluster_ids)\n else:\n output_list = []\n else:\n cluster_id_series = cluster_df[\n cluster_df[\"Occupation\"] == input_occupation\n ][\"Cluster\"]\n output_list = make_list(state_cluster_select) + list(\n cluster_id_series\n )\n\n return sorted(list(set(output_list)))\n\n @self.callback(\n Output(\n component_id=\"direction-firstload-flag\",\n component_property=\"children\",\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n ),\n Input(\n component_id=\"cluster-select\", component_property=\"value\"\n ),\n ],\n [\n State(\n component_id=\"direction-firstload-flag\",\n component_property=\"children\",\n )\n ],\n )\n def set_direction_flag(\n input_direction, input_cluster_select, state_firstload\n ):\n if input_cluster_select is None and ast.literal_eval(\n state_firstload\n ):\n return \"True\"\n return \"False\"\n\n @self.callback(\n Output(\n component_id=\"cluster-finder\", component_property=\"clickData\"\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n )\n ],\n )\n def clear_clickdata(input_direction):\n return None\n\n @self.callback(\n Output(\n component_id=\"cluster-finder\",\n component_property=\"selectedData\",\n ),\n [\n Input(\n component_id=\"direction-select\", component_property=\"value\"\n )\n ],\n )\n def clear_selecteddata(input_direction):\n return None\n\n def route_combine_compare(combine_fn, compare_fn):\n def handler(**kwargs):\n\n kwargs[\"cluster_df_current\"] = get_clusters(\n kwargs[\"ncluster-select\"],\n kwargs[\"direction-select\"],\n kwargs[\"threshold-select\"],\n kwargs[\"skillgap-select\"],\n )\n\n if kwargs[\"mode-select\"] == \"combine\":\n return combine_fn(**kwargs)\n else:\n return compare_fn(**kwargs)\n\n return handler\n\n inputs = [\n Input(component_id=\"ncluster-select\", component_property=\"value\"),\n Input(component_id=\"threshold-select\", component_property=\"value\"),\n Input(component_id=\"skillgap-select\", component_property=\"value\"),\n Input(component_id=\"direction-select\", component_property=\"value\"),\n Input(component_id=\"cluster-select\", component_property=\"value\"),\n Input(component_id=\"mode-select\", component_property=\"value\"),\n ]\n\n def generate_cluster_table(**kwargs):\n\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n dfs = []\n\n for i in range(len(cluster_ids)):\n cluster = cluster_ids[i]\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n sub_anzsco = (\n anzsco.copy()\n .set_index(\"d4_title\", drop=False)\n .loc[cluster_occupations[\"Occupation\"]]\n )\n\n columns = [\"d4_code\", \"d4_title\"]\n name_remap = {\"d4_code\": \"ANZSCO\", \"d4_title\": \"Occupation\"}\n\n cluster_dataframe = sub_anzsco[columns].rename(\n name_remap, axis=\"columns\"\n )\n\n cluster_dataframe = cluster_dataframe.sort_values(\"ANZSCO\")\n\n dfs.append(cluster_dataframe)\n\n table = dbc.Table.from_dataframe(\n pd.concat(dfs, axis=1, sort=True),\n striped=True,\n bordered=True,\n hover=True,\n )\n\n return table\n\n self.callback(\n Output(\n component_id=\"cluster-table\", component_property=\"children\"\n ),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_cluster_table, generate_cluster_table\n )\n )\n )\n\n highed_column_order = [\n \"Year 10 or below\",\n \"Year 11\",\n \"Year 12\",\n \"Diploma or Certificate\",\n \"Trade qualification\",\n \"Degree\",\n \"Post grad degree\",\n ]\n\n def generate_los_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n ed_estimates = cluster_sub_df.groupby(\"highed5\").sum()[\"weight5\"]\n ed_estimates = ed_estimates.reindex(index=highed_column_order)\n\n data = [\n go.Bar(\n x=ed_estimates.index,\n y=ed_estimates.values,\n name=f\"Selected Clusters\",\n )\n ]\n\n layout = {\"title\": \"Highest Level of Study\", \"showlegend\": True}\n\n return {\"data\": data, \"layout\": layout}\n\n def generate_los_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n data = []\n\n for cluster in cluster_ids:\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n ed_estimates = cluster_sub_df.groupby(\"highed5\").sum()[\n \"weight5\"\n ]\n ed_estimates = ed_estimates.reindex(index=highed_column_order)\n\n data.append(\n go.Bar(\n x=ed_estimates.index,\n y=ed_estimates.values,\n name=f\"Cluster {cluster}\",\n )\n )\n\n layout = {\"title\": \"Highest Level of Study\", \"barmode\": \"group\"}\n\n return {\"data\": data, \"layout\": layout}\n\n self.callback(\n Output(component_id=\"cluster-los\", component_property=\"figure\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_los_combine, generate_los_compare\n )\n )\n )\n\n fos_column_order = [\n \"UNCLASSIFIABLE\",\n \"NO FORMAL QUAL\",\n \"NATURAL AND PHYSICAL SCIENCES\",\n \"INFORMATION TECHNOLOGY\",\n \"ENGINEERING AND RELATED TECHNOLOGIES\",\n \"ARCHITECTURE AND BUILDING\",\n \"AGRICULTURE, ENVIRONMENTAL AND RELATED STUDIES\",\n \"HEALTH\",\n \"EDUCATION\",\n \"MANAGEMENT AND COMMERCE\",\n \"SOCIETY AND CULTURE\",\n \"CREATIVE ARTS\",\n \"FOOD, HOSPITALITY AND PERSONAL SERVICES\",\n \"MIXED FIELD PROGRAMMES\",\n ]\n\n def generate_fos_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n fos_estimates = cluster_sub_df.groupby(\"fos2d5\").sum()[\"weight5\"]\n fos_estimates = fos_estimates.reindex(index=fos_column_order)\n\n data = [\n go.Bar(\n x=fos_estimates.index,\n y=fos_estimates.values,\n name=f\"Selected Clusters\",\n )\n ]\n\n layout = {\"title\": \"Field of Study\", \"showlegend\": True}\n\n return {\"data\": data, \"layout\": layout}\n\n def generate_fos_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n data = []\n\n for cluster in cluster_ids:\n # Get the occupations associated with the cluster ids\n\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n fos_estimates = cluster_sub_df.groupby(\"fos2d5\").sum()[\n \"weight5\"\n ]\n fos_estimates = fos_estimates.reindex(index=fos_column_order)\n\n data.append(\n go.Bar(\n x=fos_estimates.index,\n y=fos_estimates.values,\n name=f\"Cluster {cluster}\",\n )\n )\n\n layout = {\"title\": \"Field of Study\", \"barmode\": \"group\"}\n\n return {\"data\": data, \"layout\": layout}\n\n self.callback(\n Output(component_id=\"cluster-fos\", component_property=\"figure\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_fos_combine, generate_fos_compare\n )\n )\n )\n\n def generate_ind_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n industry_estimates = cluster_sub_df.groupby(\"ind1d5\").sum()[\n \"weight5\"\n ]\n\n data = [\n go.Bar(\n x=industry_estimates.index,\n y=industry_estimates,\n name=f\"Selected Clusters\",\n )\n ]\n\n layout = {\"title\": \"Industry\", \"showlegend\": True}\n\n return {\"data\": data, \"layout\": layout}\n\n def generate_ind_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n data = []\n\n for cluster in cluster_ids:\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n industry_estimates = cluster_sub_df.groupby(\"ind1d5\").sum()[\n \"weight5\"\n ]\n\n data.append(\n go.Bar(\n x=industry_estimates.index,\n y=industry_estimates,\n name=f\"Cluster {cluster}\",\n )\n )\n\n layout = {\"title\": \"Industry\", \"barmode\": \"group\"}\n\n return {\"data\": data, \"layout\": layout}\n\n self.callback(\n Output(component_id=\"cluster-ind\", component_property=\"figure\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_ind_combine, generate_ind_compare\n )\n )\n )\n\n def generate_age_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n age_estimates = cluster_sub_df.groupby(\"age5\").sum()[\"weight5\"]\n\n data = [\n go.Bar(\n x=age_estimates.index,\n y=age_estimates,\n name=f\"Selected Clusters\",\n )\n ]\n\n layout = {\"title\": \"Age\", \"showlegend\": True}\n\n return {\"data\": data, \"layout\": layout}\n\n def generate_age_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n data = []\n\n for cluster in cluster_ids:\n # Get the occupations associated with the cluster ids\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n # Pickup from AAW the people who were in one of the occupations\n cluster_sub_df = aaw[\n aaw[\"occ4d5_title\"].isin(cluster_occupations[\"Occupation\"])\n ]\n\n age_estimates = cluster_sub_df.groupby(\"age5\").sum()[\"weight5\"]\n\n data.append(\n go.Bar(\n x=age_estimates.index,\n y=age_estimates,\n name=f\"Cluster {cluster}\",\n )\n )\n\n layout = {\"title\": \"Age\", \"barmode\": \"group\"}\n\n return {\"data\": data, \"layout\": layout}\n\n self.callback(\n Output(component_id=\"cluster-age\", component_property=\"figure\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_age_combine, generate_age_compare\n )\n )\n )\n\n def generate_stats_table_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n data_dict = {}\n\n # Incidence, unstack, filter and sum\n incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_incidence = incidence[\n incidence[\"Occupation\"].isin(cluster_occupations[\"Occupation\"])\n ]\n data_dict[\"Incidence\"] = np.round(\n filtered_incidence[\"Weight\"].sum()\n )\n\n # Unique incidence, unstack, filter and sum\n unique_incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_unique_incidence = (\n unique_incidence[\n unique_incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n .reset_index(drop=False)\n .drop_duplicates(\"AAID\")\n )\n data_dict[\"Unique Incidence\"] = np.round(\n filtered_unique_incidence[\"Weight\"].sum()\n )\n\n # Cluster stasis\n cluster_stasis = codes_filter(\n aaw.set_index(\"AAID\")[[f\"occ4d{i}_title\" for i in range(6)]],\n cluster_occupations[\"Occupation\"],\n True,\n True,\n )\n data_dict[\"Combined Cluster Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[cluster_stasis.index][\"weight5\"]\n .sum()\n )\n\n # Occupation stasis, find those with stasis, filter for valid occupation and sum final weights\n occ_stasis = stasis_only(\n aaw.set_index(\"AAID\")[[f\"occ4d{i}_title\" for i in range(6)]]\n )\n data_dict[\"Occupation Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[\n occ_stasis[\n occ_stasis[\"occ4d0_title\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ].index\n ][\"weight5\"]\n .sum()\n )\n\n return dbc.Table.from_dataframe(\n pd.DataFrame([data_dict]),\n striped=True,\n bordered=True,\n hover=True,\n )\n\n def generate_stats_table_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n list_of_dicts = []\n\n for cluster in cluster_ids:\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n data_dict = {\"Cluster\": cluster}\n\n # Incidence, unstack, filter and sum\n incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_incidence = incidence[\n incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n data_dict[\"Incidence\"] = np.round(\n filtered_incidence[\"Weight\"].sum()\n )\n\n # Unique incidence, unstack, filter and sum\n unique_incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_unique_incidence = (\n unique_incidence[\n unique_incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n .reset_index(drop=False)\n .drop_duplicates(\"AAID\")\n )\n data_dict[\"Unique Incidence\"] = np.round(\n filtered_unique_incidence[\"Weight\"].sum()\n )\n\n # Cluster stasis\n cluster_stasis = codes_filter(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ],\n cluster_occupations[\"Occupation\"],\n True,\n True,\n )\n data_dict[\"Cluster Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[cluster_stasis.index][\"weight5\"]\n .sum()\n )\n\n # Occupation stasis, find those with stasis, filter for valid occupation and sum final weights\n occ_stasis = stasis_only(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ]\n )\n data_dict[\"Occupation Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[\n occ_stasis[\n occ_stasis[\"occ4d0_title\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ].index\n ][\"weight5\"]\n .sum()\n )\n\n list_of_dicts.append(data_dict)\n\n return dbc.Table.from_dataframe(\n pd.DataFrame(list_of_dicts),\n striped=True,\n bordered=True,\n hover=True,\n )\n\n self.callback(\n Output(component_id=\"counts-table\", component_property=\"children\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_stats_table_combine, generate_stats_table_compare\n )\n )\n )\n\n def generate_quals_table_combine(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n\n cluster_df = kwargs[\"cluster_df_current\"]\n\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n cluster_sub_df = vet_quals[\n vet_quals[\"Narrow ANZSCO Description\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n\n cluster_sub_df = cluster_sub_df.sort_values(\n \"Program Level of Education ID\", ascending=False\n )\n\n cols = [\n \"Program ID\",\n \"Program Level of Education Description\",\n \"Program Name\",\n ]\n\n dataframe = cluster_sub_df[cols]\n\n content = [\n dbc.Label(f\"VET Qualifications for All Selected Clusters\"),\n dbc.Table.from_dataframe(\n dataframe, striped=True, bordered=True, hover=True\n ),\n ]\n\n return content\n\n def generate_quals_table_compare(**kwargs):\n cluster_ids = make_list(kwargs[\"cluster-select\"])\n cluster_df = kwargs[\"cluster_df_current\"]\n\n content = []\n\n for cluster in cluster_ids:\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n cluster_sub_df = vet_quals[\n vet_quals[\"Narrow ANZSCO Description\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n\n cluster_sub_df = cluster_sub_df.sort_values(\n \"Program Level of Education ID\", ascending=False\n )\n\n cols = [\n \"Program ID\",\n \"Program Level of Education Description\",\n \"Program Name\",\n ]\n\n dataframe = cluster_sub_df[cols]\n\n sub_content = dbc.Table.from_dataframe(\n dataframe, striped=True, bordered=True, hover=True\n )\n\n content.append(\n html.Label(f\"VET Qualifications for Cluster {cluster}\")\n )\n content.append(sub_content)\n\n return content\n\n self.callback(\n Output(component_id=\"quals-table\", component_property=\"children\"),\n inputs,\n )(\n dash_kwarg(inputs)(\n route_combine_compare(\n generate_quals_table_combine, generate_quals_table_compare\n )\n )\n )\n\n @self.server.route(\n f\"{self.config.url_base_pathname}downloads/clusters/\",\n endpoint=f\"{self.config.url_base_pathname}:serve_clusters\",\n )\n def serve_clusters(path_to_file):\n\n filename = path_to_file.split(\".\")[0]\n\n cluster_attributes = get_cluster_attributes(filename)\n clusters_df = get_clusters(\n int(cluster_attributes[\"n_clusters\"]),\n cluster_attributes[\"direction\"],\n int(cluster_attributes[\"abs_transition_threshold\"]),\n int(cluster_attributes[\"skill_diff_threshold\"]),\n )\n\n neat_cluster_df = neat_clusters(clusters_df)\n\n proxyIO = io.StringIO()\n neat_cluster_df.to_csv(proxyIO, index=False, encoding=\"utf-8\")\n\n mem = io.BytesIO()\n mem.write(proxyIO.getvalue().encode(\"utf-8\"))\n mem.seek(0)\n\n return flask.send_file(\n mem,\n mimetype=\"text/csv\",\n attachment_filename=path_to_file,\n as_attachment=True,\n cache_timeout=0,\n )\n\n @self.server.route(\n f\"{self.config.url_base_pathname}downloads/counts/\",\n endpoint=f\"{self.config.url_base_pathname}:serve_counts\",\n )\n def serve_counts(path_to_file):\n method = request.args.get(\"method\")\n clusters = ast.literal_eval(request.args.get(\"clusters\"))\n\n requested_filename = path_to_file.split(\".\")[0]\n\n cluster_attributes = get_cluster_attributes(requested_filename)\n cluster_df = get_clusters(\n int(cluster_attributes[\"n_clusters\"]),\n cluster_attributes[\"direction\"],\n int(cluster_attributes[\"abs_transition_threshold\"]),\n int(cluster_attributes[\"skill_diff_threshold\"]),\n )\n\n if method is None or clusters is None:\n cluster_ids = cluster_df[\"Cluster\"].unique()\n cluster_id_representation = \"all\"\n method = \"compare\"\n else:\n cluster_ids = clusters\n cluster_id_representation = str(cluster_ids)\n\n if method == \"combine\":\n\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"].isin(cluster_ids)\n ].set_index(\"Occupation\", drop=False)\n\n data_dict = {}\n\n # Incidence, unstack, filter and sum\n incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_incidence = incidence[\n incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n data_dict[\"Incidence\"] = np.round(\n filtered_incidence[\"Weight\"].sum()\n )\n\n # Unique incidence, unstack, filter and sum\n unique_incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [f\"occ4d{i}_title\" for i in range(6)],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_unique_incidence = (\n unique_incidence[\n unique_incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n .reset_index(drop=False)\n .drop_duplicates(\"AAID\")\n )\n data_dict[\"Unique Incidence\"] = np.round(\n filtered_unique_incidence[\"Weight\"].sum()\n )\n\n # Cluster stasis\n cluster_stasis = codes_filter(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ],\n cluster_occupations[\"Occupation\"],\n True,\n True,\n )\n data_dict[\"Combined Cluster Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[cluster_stasis.index][\"weight5\"]\n .sum()\n )\n\n # Occupation stasis, find those with stasis, filter for valid occupation and sum final weights\n occ_stasis = stasis_only(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ]\n )\n data_dict[\"Occupation Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[\n occ_stasis[\n occ_stasis[\"occ4d0_title\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ].index\n ][\"weight5\"]\n .sum()\n )\n\n dataframe = pd.DataFrame([data_dict])\n\n else:\n\n list_of_dicts = []\n\n for cluster in cluster_ids:\n cluster_occupations = cluster_df[\n cluster_df[\"Cluster\"] == cluster\n ].set_index(\"Occupation\", drop=False)\n\n data_dict = {\"Cluster\": cluster}\n\n # Incidence, unstack, filter and sum\n incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [\n f\"occ4d{i}_title\" for i in range(6)\n ],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_incidence = incidence[\n incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n data_dict[\"Incidence\"] = np.round(\n filtered_incidence[\"Weight\"].sum()\n )\n\n # Unique incidence, unstack, filter and sum\n unique_incidence = unstack_many(\n aaw.set_index(\"AAID\"),\n {\n \"Occupation\": [\n f\"occ4d{i}_title\" for i in range(6)\n ],\n \"Weight\": [f\"weight{i}\" for i in range(6)],\n },\n )\n filtered_unique_incidence = (\n unique_incidence[\n unique_incidence[\"Occupation\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ]\n .reset_index(drop=False)\n .drop_duplicates(\"AAID\")\n )\n data_dict[\"Unique Incidence\"] = np.round(\n filtered_unique_incidence[\"Weight\"].sum()\n )\n\n # Cluster stasis\n cluster_stasis = codes_filter(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ],\n cluster_occupations[\"Occupation\"],\n True,\n True,\n )\n data_dict[\"Cluster Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[cluster_stasis.index][\"weight5\"]\n .sum()\n )\n\n # Occupation stasis, find those with stasis, filter for valid occupation and sum final weights\n occ_stasis = stasis_only(\n aaw.set_index(\"AAID\")[\n [f\"occ4d{i}_title\" for i in range(6)]\n ]\n )\n data_dict[\"Occupation Stasis\"] = np.round(\n aaw.set_index(\"AAID\")\n .loc[\n occ_stasis[\n occ_stasis[\"occ4d0_title\"].isin(\n cluster_occupations[\"Occupation\"]\n )\n ].index\n ][\"weight5\"]\n .sum()\n )\n\n list_of_dicts.append(data_dict)\n\n dataframe = pd.DataFrame(list_of_dicts)\n\n proxyIO = io.StringIO()\n dataframe.to_csv(proxyIO, index=False, encoding=\"utf-8\")\n\n mem = io.BytesIO()\n mem.write(proxyIO.getvalue().encode(\"utf-8\"))\n mem.seek(0)\n\n download_file_name = f\"{path_to_file.split('.')[0]}_{method}_{cluster_id_representation}.csv\"\n\n return flask.send_file(\n mem,\n mimetype=\"text/csv\",\n attachment_filename=download_file_name,\n as_attachment=True,\n cache_timeout=0,\n )\n","sub_path":"aaw/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":66900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239165717","text":"import math\nimport numpy as np\nerror_to_catch = getattr(__builtins__,'FileNotFoundError', IOError)\n\nimg_rows, img_cols = 448, 448\ndim=img_rows*img_cols\nnum_classes=80\n\ndef load_train_data(x) :\n\ttrain_prob_all= np.empty((dim,num_classes),float)\n\ttrain_l_all= np.empty((dim,num_classes),int)\n\tfor i in range(0,x):\n \n\t train_file=(\"frame%04d.txt\" % (i))\n\t label_file=(\"frame%04d_l.txt\" % (i))\n\t \n\t try:\n\t train_prob= np.genfromtxt(train_file, delimiter=' ', dtype=None)\n\t train_l=np.genfromtxt(label_file, delimiter=' ', dtype=None)\n\t \n\t train_prob_all = np.append(train_prob_all, train_prob,axis=0)\n\t train_l_all = np.append(train_l_all, train_l,axis=0)\n\t print(train_prob_all.shape)\n\t except error_to_catch:\n\t \t continue \n\n\treturn train_prob_all,train_l_all\n\n\t\n\n\ndef load_test_data(y) :\n\ttest_prob_all = np.empty((dim, num_classes), float )\n\ttest_l_all = np.empty((dim, num_classes),int)\n\tfor i in range(0,y):\n\n\t test_file=\"frame%04d.txt\" % i\n\t test_label=\"frame%04d_l.txt\" % i\n\t try:\n\t test_prob = np.genfromtxt(test_file, delimiter=' ', dtype=None)\n\t test_l=np.genfromtxt(test_file, delimiter=' ', dtype=None)\n\t test_prob_all = np.append(test_prob_all, test_prob,axis=0)\n\t test_l_all = np.append(test_l_all, test_l,axis=0)\n\t except error_to_catch:\n\t \t continue\n\treturn test_prob_all,test_l_all\n\n\t\n\n\n\t \n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282836355","text":"import ldap\nfrom ldap.ldapobject import ReconnectLDAPObject\n\n\n\"\"\"\nInititalizes a connection to the LDAP server\n\"\"\"\ndef ldap_init(app):\n app.config['LDAP_CONN'] = ReconnectLDAPObject(app.config['LDAP_URL'])\n app.config['LDAP_CONN'].simple_bind_s(\n app.config['LDAP_BIND_DN'],\n app.config['LDAP_BIND_PW'])\n\n\n\"\"\"\nQueries the LDAP server to return a dictionary of\nCSHers on floor.\n:returns: dictionary where each room number (key)\ncorresponds to a list of people living in the room.\n\"\"\"\ndef get_onfloors(app):\n if app.config['LDAP_CONN'] is None:\n ldap_init(app)\n\n ldap_results = app.config['LDAP_CONN'].search_s(\n app.config['LDAP_USER_OU'],\n ldap.SCOPE_SUBTREE,\n \"(&(objectClass=houseMember)\" +\n \"(memberof=cn=current_student,ou=Groups,dc=csh,dc=rit,dc=edu)\" +\n \"(roomNumber=*))\",\n ['cn', 'roomNumber'])\n onfloors = {}\n for onfloor in ldap_results:\n cn = onfloor[1]['cn'][0].decode('utf-8')\n roomNumber = onfloor[1]['roomNumber'][0].decode('utf-8')\n if roomNumber in onfloors:\n onfloors[roomNumber] += [cn]\n else:\n onfloors[roomNumber] = [cn]\n return onfloors\n\n\ndef _get_cn_from_dns(app, dns):\n cns = []\n for dn in dns:\n dn = dn.split(',')[0]\n cns += app.config['LDAP_CONN'].search_s(\n app.config['LDAP_USER_OU'],\n ldap.SCOPE_SUBTREE,\n \"(%s)\" % dn,\n ['cn'])\n return [cn.decode('utf-8') for cn in cns[0][1]['cn']]\n\n\n\"\"\"\nQueries the LDAP server to return a dictinary of e-board\nmembers.\n:returns: dictionary of e-board directors\n\"\"\"\ndef get_eboard(app):\n if app.config['LDAP_CONN'] is None:\n ldap_init(app)\n\n ldap_results = app.config['LDAP_CONN'].search_s(\n \"ou=Committees,dc=csh,dc=rit,dc=edu\",\n ldap.SCOPE_SUBTREE,\n \"(objectClass=Committee)\", ['cn', 'head'])\n eboard = {}\n for director in ldap_results:\n cn = director[1]['cn'][0].decode('utf-8')\n head_dns = [dn.decode('utf-8') for dn in director[1]['head']]\n eboard[cn] = _get_cn_from_dns(app, head_dns)\n return eboard\n\n\n\"\"\"\nQueries the LDAP server to return a dictionary of group members,\nlike RTP's and 3DA's\n:returns: dictionary of groups and members, where the key is \nthe name of the group (\"rtp\", \"3da\") and the value is a list\nof members in that group.\n\"\"\"\ndef get_groups(app):\n if app.config['LDAP_CONN'] is None:\n ldap_init(app)\n\n groups = {}\n\n rtp_results = app.config['LDAP_CONN'].search_s(\n \"ou=Groups,dc=csh,dc=rit,dc=edu\",\n ldap.SCOPE_SUBTREE,\n \"(cn=active_rtp)\")[0][1]['member']\n groups['rtp'] = [_get_cn_from_dns(app, [rtp.decode('utf-8')])[0]\n for rtp in rtp_results]\n\n threedeeayy_results = app.config['LDAP_CONN'].search_s(\n \"ou=Groups,dc=csh,dc=rit,dc=edu\",\n ldap.SCOPE_SUBTREE,\n \"(cn=3da)\")[0][1]['member']\n groups['3da'] = [_get_cn_from_dns(app, [admin.decode('utf-8')])[0]\n for admin in threedeeayy_results]\n\n return groups\n","sub_path":"csh_map/ldap.py","file_name":"ldap.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"200226318","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 8 21:25:28 2020\r\n\r\n@author: EHMTang\r\n\"\"\"\r\n# Import necessary packages\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport statistics as st\r\nimport matplotlib.pyplot as plt\r\nimport scipy\r\nfrom scipy import optimize\r\n\r\n\r\n\r\n# Get current working directory\r\ndirname = os.path.dirname(os.path.abspath(__file__))\r\nadvent_3 = dirname + r'\\advent_3.txt'\r\n\r\n# Get directory of 'data' to be imported\r\ndf = pd.read_csv(advent_3,\r\n header=None)\r\n\r\n\r\ndef is_tree_at_coordinates(hill_x, hill_y):\r\n map_x = hill_x % 31\r\n return df[0][hill_y][map_x] == \"#\"\r\n\r\n\r\nprint(is_tree_at_coordinates(32, 0))\r\n\r\n\r\n#%%\r\n\r\ndef tree_count_for_slope(right_increment, down_increment):\r\n right_coordinate = 0\r\n down_coordinate = 0\r\n tree_count = 0\r\n \r\n while down_coordinate < len(df):\r\n if is_tree_at_coordinates(right_coordinate, down_coordinate):\r\n tree_count += 1\r\n right_coordinate += right_increment\r\n down_coordinate += down_increment\r\n \r\n return tree_count\r\n\r\na = tree_count_for_slope(1,1)\r\nb = tree_count_for_slope(3,1)\r\nc = tree_count_for_slope(5,1)\r\nd = tree_count_for_slope(7,1)\r\ne = tree_count_for_slope(1,2)\r\n\r\nf = a*b*c*d*e\r\n\r\n\r\n\r\n ","sub_path":"advent_3.py","file_name":"advent_3.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541782408","text":"import bidict\n\"\"\"\"This module is kind of used as a config file, may be replaced by a configuration file in the future\"\"\"\n\n\nDB_ADDRESS = \"mongodb://mongodb:27017/\"\nOLD_DB_NAME = \"old_db\"\nDB_NAME = \"ieml_db\"\n\nOLD_TERMS_COLLECTION = \"terms\"\nOLD_RELATIONSHIPS_COLLECTION = \"relationships\"\n\nSCRIPTS_COLLECTION = \"relations\"\nTERMS_COLLECTION = \"terms\"\nPROPOSITION_COLLECTION = \"propositions\"\nTEXT_COLLECTION = \"texts\"\nHYPERTEXT_COLLECTION = \"hypertexts\"\n\n\nTAG_LANGUAGES = [\"FR\", \"EN\"]\n\nDB_NAME_USERS = \"\"\n\nUSERS_COLLECTIONS = \"\"\n\n# RELATIONS = bidict({\n# 'ASCENDING': 'DESCENDING',\n# 'GERMAN': 'GERMAN'\n# })\n\n\n# Script specific\nROOT_PARADIGM_TYPE = 'ROOT_PARADIGM'\nSINGULAR_SEQUENCE_TYPE = 'SINGULAR_SEQUENCE'\nPARADIGM_TYPE = 'PARADIGM'\n\n","sub_path":"src/main/docker/testing/api/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650010179","text":"import os\nfrom flask import Flask\nfrom flaskr.filters import timezone_filter\n\nfrom . import db, auth, blog\n\n\ndef create_app(test_config=None):\n # create and configure the app\n flaskr_app = Flask(__name__, instance_relative_config=True)\n flaskr_app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(flaskr_app.instance_path, 'flaskr-tutorial.sqlite'),\n DEBUG=True,\n ENV='development'\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n flaskr_app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n flaskr_app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(flaskr_app.instance_path)\n except OSError:\n pass\n\n db.init_app(flaskr_app)\n\n # various registries\n flaskr_app.register_blueprint(auth.bp)\n flaskr_app.register_blueprint(blog.bp)\n\n flaskr_app.add_url_rule('/', endpoint='index')\n\n flaskr_app.jinja_env.filters['timezone_filter'] = timezone_filter\n\n return flaskr_app\n\n","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"129040815","text":"import numpy as np\n\n\ndef calculate(list):\n if len(list) < 9:\n raise ValueError(\"List must contain nine numbers.\")\n\n numpyArray = np.array([[list[0:3]], [list[3:6]], [list[6:10]]])\n flattened = np.array([list])\n\n media = []\n media2 = []\n media.append(np.mean(numpyArray, axis=0).tolist()[0])\n for x in np.mean(numpyArray, axis=2).tolist():\n media2.append(x[0])\n media.append(media2)\n media.append(flattened.mean().tolist())\n\n varianza = []\n varianza2 = []\n varianza.append(np.var(numpyArray, axis=0).tolist()[0])\n for x in np.var(numpyArray, axis=2).tolist():\n varianza2.append(x[0])\n varianza.append(varianza2)\n varianza.append(flattened.var().tolist())\n\n stand = []\n stand2 = []\n stand.append(np.std(numpyArray, axis=0).tolist()[0])\n for x in np.std(numpyArray, axis=2).tolist():\n stand2.append(x[0])\n stand.append(stand2)\n stand.append(flattened.std().tolist())\n\n maximo = []\n maximo2 = []\n maximo.append(np.max(numpyArray, axis=0).tolist()[0])\n for x in np.max(numpyArray, axis=2).tolist():\n maximo2.append(x[0])\n maximo.append(maximo2)\n maximo.append(flattened.max().tolist())\n\n minimo = []\n minimo2 = []\n minimo.append(np.min(numpyArray, axis=0).tolist()[0])\n for x in np.min(numpyArray, axis=2).tolist():\n minimo2.append(x[0])\n minimo.append(minimo2)\n minimo.append(flattened.min().tolist())\n\n suma = []\n suma2 = []\n suma.append(np.sum(numpyArray, axis=0).tolist()[0])\n for x in np.sum(numpyArray, axis=2).tolist():\n suma2.append(x[0])\n suma.append(suma2)\n suma.append(flattened.sum().tolist())\n\n diccionario = {\n 'mean': media,\n 'variance': varianza,\n 'standard deviation': stand,\n 'max': maximo,\n 'min': minimo,\n 'sum': suma\n }\n return diccionario\n","sub_path":"fcc-mean-var-std/mean_var_std.py","file_name":"mean_var_std.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"33813709","text":"#\n# MIT License\n#\n# Copyright (c) 2020 Airbyte\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\nimport os\nfrom typing import List\n\nimport pytest\nfrom airbyte_protocol.models.airbyte_protocol import DestinationSyncMode, SyncMode\nfrom normalization.destination_type import DestinationType\nfrom normalization.transform_catalog.stream_processor import StreamProcessor\nfrom normalization.transform_catalog.table_name_registry import TableNameRegistry\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef before_tests(request):\n # This makes the test run whether it is executed from the tests folder (with pytest/gradle)\n # or from the base-normalization folder (through pycharm)\n unit_tests_dir = os.path.join(request.fspath.dirname, \"unit_tests\")\n if os.path.exists(unit_tests_dir):\n os.chdir(unit_tests_dir)\n else:\n os.chdir(request.fspath.dirname)\n yield\n os.chdir(request.config.invocation_dir)\n\n\n@pytest.mark.parametrize(\n \"cursor_field, expecting_exception, expected_cursor_field\",\n [\n (None, False, \"_airbyte_emitted_at\"),\n ([\"updated_at\"], False, \"updated_at\"),\n ([\"_airbyte_emitted_at\"], False, \"_airbyte_emitted_at\"),\n ([\"parent\", \"nested_field\"], True, \"nested_field\"),\n ],\n)\ndef test_cursor_field(cursor_field: List[str], expecting_exception: bool, expected_cursor_field: str):\n stream_processor = StreamProcessor.create(\n stream_name=\"test_cursor_field\",\n destination_type=DestinationType.POSTGRES,\n default_schema=\"default_schema\",\n raw_schema=\"raw_schema\",\n schema=\"schema_name\",\n source_sync_mode=SyncMode.incremental,\n destination_sync_mode=DestinationSyncMode.append_dedup,\n cursor_field=cursor_field,\n primary_key=[],\n json_column_name=\"json_column_name\",\n properties=dict(),\n tables_registry=TableNameRegistry(DestinationType.POSTGRES),\n from_table=\"\",\n )\n try:\n assert (\n stream_processor.get_cursor_field(column_names={expected_cursor_field: (expected_cursor_field, \"random\")})\n == expected_cursor_field\n )\n except ValueError as e:\n if not expecting_exception:\n raise e\n\n\n@pytest.mark.parametrize(\n \"primary_key, column_type, expecting_exception, expected_primary_keys, expected_final_primary_key_string\",\n [\n ([[\"id\"]], \"string\", False, [\"id\"], \"{{ adapter.quote('id') }}\"),\n ([[\"id\"]], \"number\", False, [\"id\"], \"cast({{ adapter.quote('id') }} as {{ dbt_utils.type_string() }})\"),\n ([[\"first_name\"], [\"last_name\"]], \"string\", False, [\"first_name\", \"last_name\"], \"first_name, last_name\"),\n ([[\"float_id\"]], \"number\", False, [\"float_id\"], \"cast(float_id as {{ dbt_utils.type_string() }})\"),\n ([[\"_airbyte_emitted_at\"]], \"string\", False, [], \"cast(_airbyte_emitted_at as {{ dbt_utils.type_string() }})\"),\n (None, \"string\", True, [], \"\"),\n ([[\"parent\", \"nested_field\"]], \"string\", True, [], \"\"),\n ],\n)\ndef test_primary_key(\n primary_key: List[List[str]],\n column_type: str,\n expecting_exception: bool,\n expected_primary_keys: List[str],\n expected_final_primary_key_string: str,\n):\n stream_processor = StreamProcessor.create(\n stream_name=\"test_primary_key\",\n destination_type=DestinationType.POSTGRES,\n raw_schema=\"raw_schema\",\n default_schema=\"default_schema\",\n schema=\"schema_name\",\n source_sync_mode=SyncMode.incremental,\n destination_sync_mode=DestinationSyncMode.append_dedup,\n cursor_field=[],\n primary_key=primary_key,\n json_column_name=\"json_column_name\",\n properties={key: {\"type\": column_type} for key in expected_primary_keys},\n tables_registry=TableNameRegistry(DestinationType.POSTGRES),\n from_table=\"\",\n )\n try:\n assert stream_processor.get_primary_key(column_names=stream_processor.extract_column_names()) == expected_final_primary_key_string\n except ValueError as e:\n if not expecting_exception:\n raise e\n","sub_path":"airbyte-integrations/bases/base-normalization/unit_tests/test_stream_processor.py","file_name":"test_stream_processor.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"472679207","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom astropy import units\nfrom astropy import constants\n\n#H_0 = 67.8 * units.km / (units.s * units.Mpc)\nH_0 = 3.2407789E-18 / units.s * 0.7\nH_0 = H_0.to(units.km / units.s / units.Mpc)\nOmega0 = 0.27\nG = constants.G\nG = G.to(units.kiloparsec**3 / (units.Msun * units.s**2))\n\ndef H(z):\n Lambda0 = 1. - Omega0\n return H_0*(Omega0*(1+z)**3 - (Omega0+Lambda0-1)*(1+z)**2 + Lambda0)**0.5\n\ndef Omega_z(z):\n return Omega0 * (1+z)**3 * (H_0/H(z))**2\n\ndef rho_crit(z):\n H2 = H(z)**2\n rho = 3*H2 / (8*np.pi*G)\n return rho\n\ndef Dvir(z):# from the solution of the top hat model! \n Omegaz = Omega_z(z)\n x = Omegaz - 1\n Deltavir = ((18*np.pi**2) + (82*x) - 39*x**2) / Omegaz\n return Deltavir\n\ndef rvir(Mvir, z):\n Mvir = Mvir * units.Msun\n Deltavir = Dvir(z)\n pcrit = rho_crit(z)\n Rvir = (3*Mvir / (4 * np.pi * Deltavir * pcrit * Omega0))**(1/3.)\n Rvir = Rvir.to(units.kpc)\n return Rvir\n\ndef rvir2(Mvir, z):\n h = 0.704\n rv = 206/h * (Dvir(z) * Omega0 / 97.2)**(-1.0/3.0) * (Mvir*h/(1E12))**(1.0/3.0)\n rv = rv * units.kpc\n return rv\n\n\ndef r200(M200):\n z = 0\n M200 = M200 * units.Msun\n Delta200 = 200\n pcrit = rho_crit(z)\n R200 = (3*M200 / (4 * np.pi * Delta200 * pcrit ))**(1/3.)\n R200 = R200.to(units.kpc)\n return R200\n\ndef concentration(Mvir):\n h = 0.7\n c = 9.60 * (Mvir * h/ 1E12)**(-0.075)\n return c\n","sub_path":"soda/cosmotools.py","file_name":"cosmotools.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"558271445","text":"import argparse\nimport logging\nimport requests\nimport sys\nfrom bs4 import BeautifulSoup\nimport json\nimport os.path\nimport peewee\nfrom datetime import datetime\nfrom urllib.parse import urlparse\nfrom rss_reader.parser import RssParser\nfrom rss_reader.db_worker import RssStorage, get_path\nfrom rss_reader.db_worker import db as rss_db\nfrom rss_reader.converter import save_html, save_pdf\nfrom termcolor import colored\n\n\ndef main(args=sys.argv[1:]):\n \"\"\"Main function. Entry point of program\"\"\"\n args.append('--colorize')\n arg_parser = get_arg_parser()\n parsed_arg = arg_parser.parse_args(args)\n if parsed_arg.verbose:\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.ERROR)\n\n check_limit(parsed_arg.limit)\n create_db()\n if parsed_arg.date:\n select = selection_from_db(parsed_arg.date)\n news, items = collect_and_format_news(select, parsed_arg)\n else:\n url_verification(parsed_arg.source)\n request = get_response(parsed_arg.source)\n soup = get_soup(request)\n select = selection_from_url(soup)\n news, items = collect_and_format_news(select, parsed_arg)\n\n print_news(parsed_arg.json, parsed_arg.colorize, news)\n if parsed_arg.to_html:\n save_html(items, parsed_arg.to_html, datetime.now().strftime(\"%m.%d %H.%M.%S\"))\n\n if parsed_arg.to_pdf:\n save_pdf(items, parsed_arg.to_pdf, datetime.now().strftime(\"%m.%d %H.%M.%S\"))\n\n\ndef get_arg_parser():\n \"\"\"This function add console arguments\"\"\"\n parser = argparse.ArgumentParser(description='Python command-line RSS reader.')\n parser.add_argument('source', nargs='?', type=str, help='RSS URL')\n parser.add_argument('--version', '-v', help='Print version info', action='version', version='Version 1.0')\n parser.add_argument('--json', help='Print result as JSON', action='store_true')\n parser.add_argument('--verbose', help='Outputs verbose status messages', action='store_true')\n parser.add_argument('--limit', type=int, help='Limit news topics')\n parser.add_argument('--date', type=str, help='Print cached news')\n parser.add_argument('--to_html', type=str, help='Convert news to html file. Path example \"d:/folder')\n parser.add_argument('--to_pdf', type=str, help='Convert news to html file. Path example \"d:/folder')\n parser.add_argument('--colorize', help='Print colorize result', action='store_true')\n return parser\n\n\ndef create_db():\n \"\"\"Function which create DB if it doesn't exist\"\"\"\n if not os.path.isfile(get_path()):\n logging.info('Create SQLite DB to store news.')\n with rss_db.connection_context():\n RssStorage.create_table()\n\n\ndef check_limit(limit):\n \"\"\"Function which check --limit arg\"\"\"\n logging.info('Check limit value.')\n if limit and limit <= 0:\n print('Invalid limit value. Please correct the limit value and try again')\n sys.exit(0)\n\n\ndef selection_from_db(date):\n \"\"\"Function which return news selection form db \"\"\"\n try:\n date = datetime.strptime(date, '%Y%m%d')\n except ValueError:\n print('Invalid date. Please correct the date and try again')\n sys.exit(0)\n logging.info('Select news from DB.')\n\n with rss_db.connection_context():\n select = RssStorage.select().where(RssStorage.pubDate == date).dicts()\n if not select:\n print('No news for entered date')\n sys.exit(0)\n\n return select\n\n\ndef selection_from_url(soup):\n \"\"\"Function which return news selection form \"\"\"\n logging.info('Select news from URL.')\n rss_parser = RssParser(soup)\n select = rss_parser.select_news()\n return select\n\n\ndef collect_and_format_news(select, parsed_arg):\n \"\"\"Function which return news from select\"\"\"\n news = dict()\n collected_items = list()\n for num, item in enumerate(select):\n if not parsed_arg.date:\n try:\n with rss_db.connection_context():\n RssStorage.create(**item)\n\n except peewee.IntegrityError:\n pass\n\n if parsed_arg.limit is None or num < parsed_arg.limit:\n collected_items.append(item)\n if parsed_arg.json:\n news[num] = RssParser.json_format(item)\n\n else:\n news[num] = RssParser.default_format(item, parsed_arg.colorize)\n return news, collected_items\n\n\ndef print_news(arg_json, arg_color, news):\n \"\"\"Function which print news\"\"\"\n logging.info('Printing news.')\n if arg_json:\n if arg_color:\n print(colored(json.dumps(news, indent=4, sort_keys=False, default=str), \"green\", \"on_grey\"))\n else:\n print(json.dumps(news, indent=4, sort_keys=False, default=str))\n\n else:\n print(*news.values())\n\n\ndef url_verification(url):\n \"\"\"This function verify URL\"\"\"\n logging.info('Verify URL')\n result = urlparse(url)\n if not all([result.scheme, result.netloc]):\n print(f'Invalid URL {url}. Please correct the URL and try again')\n sys.exit(0)\n\n\ndef get_response(source):\n \"\"\"This function return request.content\"\"\"\n try:\n response = requests.get(source)\n return response.content\n except requests.exceptions.ConnectionError:\n print(f'Invalid URL {source}. Please correct the URL and try again')\n sys.exit(0)\n\n\ndef get_soup(response):\n \"\"\"This function return soup\"\"\"\n logging.info('Get access to RSS feed')\n soup = BeautifulSoup(response, 'lxml-xml')\n if soup.find('rss'):\n return soup\n else:\n print('URL does not contain RSS')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rss_reader/rss_reader/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"429944681","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\n@pytest.fixture\ndef driver(request):\n wd = webdriver.Chrome()\n request.addfinalizer(wd.quit)\n return wd\n\n\ndef test_task_9_1(driver):\n\n driver.get('http://localhost/litecart/admin/?app=countries&doc=countries')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.NAME, 'username')))\n driver.find_element_by_name('username').send_keys('admin')\n driver.find_element_by_name('password').send_keys('admin')\n driver.find_element_by_name('login').click()\n\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'box-apps-menu')))\n\n all_country_links = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(5) a')\n timezones = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(6) ')\n\n array_length = len(all_country_links)\n\n for i in range(array_length-1):\n assert all_country_links[i].text < all_country_links[i+1].text\n if (timezones[i].text != '0'):\n all_country_links[i].click()\n\n sub_timezones = driver.find_elements_by_css_selector('#table-zones td:nth-child(3) input')\n for j in range(len(sub_timezones)-2):\n assert sub_timezones[j].get_attribute('value') <= sub_timezones[j+1].get_attribute('value')\n\n driver.back()\n all_country_links = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(5) a')\n timezones = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(6)')\n\n\ndef test_task_9_2(driver):\n\n driver.get('http://localhost/litecart/admin/?app=geo_zones&doc=geo_zones')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.NAME, 'username')))\n driver.find_element_by_name('username').send_keys('admin')\n driver.find_element_by_name('password').send_keys('admin')\n driver.find_element_by_name('login').click()\n\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'box-apps-menu')))\n\n all_countries = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(3) a')\n\n country_counter = len(all_countries)\n\n for i in range(country_counter):\n all_countries[i].click()\n\n zones = driver.find_elements_by_css_selector('#table-zones tr:not(.header) td:nth-child(3) option[selected]')\n\n for j in range(len(zones)-2):\n assert zones[j].text <= zones[j+1].text\n\n driver.back()\n all_countries = driver.find_elements_by_css_selector('.dataTable .row td:nth-child(3) a')\n\n\ndef test_task_10(driver):\n driver.get('http://localhost/litecart/en/')\n\n first_campaign_item = driver.find_element_by_css_selector('#box-campaigns .content .product')\n item_name = first_campaign_item.find_element_by_css_selector('.name').text\n\n regular_price_elem = first_campaign_item.find_element_by_css_selector('.regular-price')\n regular_price = regular_price_elem.text\n assert regular_price_elem.value_of_css_property(\"color\") == 'rgba(119, 119, 119, 1)'\n assert regular_price_elem.value_of_css_property(\"text-decoration\") == 'line-through'\n\n campaign_price_elem = first_campaign_item.find_element_by_css_selector('.campaign-price')\n campaign_price = campaign_price_elem.text\n assert campaign_price_elem.value_of_css_property(\"color\") == 'rgba(204, 0, 0, 1)'\n assert campaign_price_elem.value_of_css_property(\"font-weight\") == 'bold'\n assert campaign_price_elem.value_of_css_property(\"font-size\") > regular_price_elem.value_of_css_property(\"font-size\")\n\n first_campaign_item.click()\n\n item_name_on_page = driver.find_element_by_css_selector('h1.title').text\n\n regular_elem = driver.find_element_by_css_selector('.price-wrapper .regular-price')\n regular_price_on_page = regular_elem.text\n assert regular_elem.value_of_css_property(\"color\") == 'rgba(102, 102, 102, 1)'\n assert regular_elem.value_of_css_property(\"text-decoration\") == 'line-through'\n\n campaign_elem = driver.find_element_by_css_selector('.price-wrapper .campaign-price')\n campaign_price_on_page = campaign_elem.text\n assert campaign_elem.value_of_css_property(\"color\") == 'rgba(204, 0, 0, 1)'\n assert campaign_elem.value_of_css_property(\"font-weight\") == 'bold'\n assert campaign_elem.value_of_css_property(\"font-size\") > regular_elem.value_of_css_property(\"font-size\")\n\n assert item_name == item_name_on_page\n assert regular_price == regular_price_on_page\n assert campaign_price == campaign_price_on_page\n\n","sub_path":"get_properties_lesson.py","file_name":"get_properties_lesson.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"367227873","text":"\"\"\"\nThis module contains input management and stuff\n\nAuthor: Juan Ortiz\n\"\"\"\n\nfrom dijkstra import deliveries\nfrom GraphAL import GraphAL\nimport re\nimport sys\nimport webbrowser\nimport time\n\ntry:\n # Reading the map and making the digraph\n\n vertices = {}\n recov = {}\n coords = {}\n index = 0\n f = open(\"map/medellin_colombia-grande.txt\")\n for line in f:\n if line == \"\\n\":\n graph = GraphAL(index)\n index = 0\n break\n else:\n info = line.split(' ')\n recov[index] = info\n coords[str(info[1])+str(info[2])] = index\n info.append(index)\n vertices[info[0]] = info[1:]\n index += 1\n for line in f:\n info = line.split(' ')\n source = vertices[info[0]]\n goal = vertices[info[1]]\n graph.addArc(source[len(source)-1],\n goal[len(goal)-1],\n float(info[2]))\n\n f.close()\nexcept:\n print(\"Something went wrong!, check the map file\")\n\n\ndef p_input(url):\n \"\"\"\n Method to extract coordinates from a url like string (google maps)\n Params:\n url - String representing the url\n Returns:\n A list containing the respective coordinates\n \"\"\"\n r = re.compile(r'[-0-9]+\\.[-0-9]+')\n l = r.findall(url)\n coords = []\n for i in range(1, len(l)-1, 2):\n coords.append(l[i-1]+l[i])\n return coords\n\n\ndef build_url(c):\n \"\"\"\n Method to build the output url\n Params:\n c - list with coordinates\n Returns:\n A url like string\n \"\"\"\n url = \"https://www.google.com/maps/dir/\"\n for i in range(len(c)):\n url +=c[i]+'/'\n return url\n\n\nentry = input(\"Insert a url: \")\n# |REMEMBER TO UNCOMMENT THE LINE IN DIJKSTRA\n# entry = \"https://www.google.com.co/maps/dir/6.3455601,-75.5242386/6.344696,-75.530223/6.344424,-75.531294/@6.1816848,-75.5800564,15z?hl=en\"\ntry:\n s = time.time()\n # Finding the best route and opening the answer in a browser\n c = p_input(entry)\n locations = []\n for coor in c:\n locations.append(coords[coor])\n result = deliveries(graph, locations)\n path = []\n for p in range(len(result)):\n path += result[p][1][:-1]\n c_path = []\n for n in path:\n rec = recov[n]\n c_path.append(rec[1]+','+rec[2])\n url = build_url(c_path)\n webbrowser.open(url)\n print(time.time() - s)\nexcept:\n print('Something went wrong, check the coordinates')\n sys.exit()\n","sub_path":"proyecto/codigo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"497947020","text":"# refer: https://leetcode.com/problems/dungeon-game/discuss/52792/6-lines-Python-8-lines-Ruby\n\n\nclass Solution:\n def calculateMinimumHP(self, dungeon):\n \"\"\"\n :type dungeon: List[List[int]]\n :rtype: int\n \"\"\"\n\n n = len(dungeon[0])\n need = [2**31]*(n-1)+[1]\n for row in dungeon[::-1]:\n for j in range(n)[::-1]:\n need[j] = max(min(need[j:j+2]) - row[j], 1)\n print(need)\n return need[0]\n\n\ns = Solution()\ndun = [\n [-2, -3, 3],\n [-5, -10, 1],\n [10, 30, -5]\n]\nprint(s.calculateMinimumHP(dun))\n","sub_path":"Amazon/174. Dungeon Game.py","file_name":"174. Dungeon Game.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"302958791","text":"def binsearch(dizi, bas, son, aranan):\n orta = int((bas+son)/2)\n if( bas <= son):\n if(aranan == dizi[orta]):\n print(\"Aranan deger {} dizinin {}. indeksi.\".format(aranan,int((bas+son)/2)))\n print(\"Aranan == dizi[orta] Dizinin bas degeri {} dizinin son degeri {} \".format(bas,son))\n elif(aranan < dizi[orta]):\n binsearch(dizi,bas,son-1,aranan)\n print(\"Aranan < dizi[orta] Dizinin bas degeri {} dizinin son degeri {} \".format(bas,son))\n else:\n binsearch(dizi,bas,son+1,aranan)\n print(\"Aranan > dizi[orta] Dizinin bas degeri {} dizinin son degeri {} \".format(bas,son))\n\n \ndizi = [1,3,4,5,7,8,9,12,32,43,54,65,76,78,98]\nx = 54\nbinsearch(dizi,0,len(dizi)-1,x)","sub_path":"Python/diger/algoritmalar/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269769327","text":"import numpy as np\n\nfrom .dataset import DatasetManager\n\n\nclass BatchManager:\n def __init__(self, kind):\n dataset_manager = DatasetManager(kind)\n self.train_data = dataset_manager.get_train_data()\n self.valid_data = dataset_manager.get_valid_data()\n self.test_data = dataset_manager.get_test_data()\n\n self.n_user = int(\n max(\n np.max(self.train_data[:, 0]),\n np.max(self.valid_data[:, 0]), np.max(self.test_data[:,\n 0]))) + 1\n self.n_item = int(\n max(\n np.max(self.train_data[:, 1]),\n np.max(self.valid_data[:, 1]), np.max(self.test_data[:,\n 1]))) + 1\n self.mu = np.mean(self.train_data[:, 2])\n","sub_path":"app/utils/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"589054255","text":"import logging\nimport pytest\n\nfrom ocs_ci.framework.testlib import tier1, ManageTest\nfrom ocs_ci.ocs.cluster import CephCluster\n\n\nlog = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope='class')\ndef test_fixture(request):\n \"\"\"\n Create disks\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n teardown(self)\n request.addfinalizer(finalizer)\n setup(self)\n\n\ndef setup(self):\n \"\"\"\n Setting up the environment for the test\n \"\"\"\n self.cluster = CephCluster()\n assert self.cluster.create_user(self.username, self.caps)\n\n\ndef teardown(self):\n \"\"\"\n Tearing down the environment\n \"\"\"\n new_count = self.cluster.mon_count - 1\n self.cluster.mon_change_count(new_count)\n assert new_count == self.cluster.mon_count\n new_mdscount = int(self.cluster.mds_count / 2) - 1\n self.cluster.mds_change_count(new_mdscount)\n assert new_mdscount * 2 == self.cluster.mds_count\n del_cmd = f\"ceph auth del {self.username}\"\n self.cluster.toolbox.exec_ceph_cmd(del_cmd)\n\n\n@tier1\n@pytest.mark.usefixtures(\n test_fixture.__name__,\n)\nclass TestClusterUtils(ManageTest):\n # Cluster will be populated in the fixture\n cluster = None\n username = \"client.test\"\n caps = \"mon 'allow r' osd 'allow rwx'\"\n\n def test_get_user_key(self):\n key = self.cluster.get_user_key(self.username)\n assert key\n logging.info(key)\n\n def test_get_admin_key(self):\n \"\"\"\n By default admin user will be created by rook\n \"\"\"\n key = self.cluster.get_admin_key()\n assert key\n\n def test_get_mon_info(self):\n for mon in self.cluster.mons:\n logging.info(mon.name)\n logging.info(mon.port)\n\n def test_add_mon(self):\n cur_count = self.cluster.mon_count\n logging.info(f\"current mon count = {cur_count}\")\n new_count = cur_count + 1\n self.cluster.mon_change_count(new_count)\n assert new_count == self.cluster.mon_count\n\n def test_add_mds(self):\n cur_count = int(self.cluster.mds_count / 2)\n logging.info(f\"Current active count = {cur_count}\")\n new_count = cur_count + 1\n self.cluster.mds_change_count(new_count)\n assert new_count * 2 == self.cluster.mds_count\n","sub_path":"tests/test_cluster_utils.py","file_name":"test_cluster_utils.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571009981","text":"from src.un import name\nimport sys\nimport os\nimport datetime\nimport shutil\nfrom datetime import datetime, timedelta\nfrom src.funcoes import base_geral as b1\nfrom src.funcoes import base_comun as b2\nfrom src.funcoes import searchBinary, escreve\nfrom src.grava_smap import main as escreveResultadoSMap\nfrom src.organizaArquivosdeChuva import organizaArquivosDeChuva\nfrom src.organizaArquivosdeChuva2 import organizaArquivosDeSmap\nfrom src.smapPreliminar import organizaArquivosdeSmap as organizaArquivosDeSmap_preliminar\nimport time\nimport subprocess\n\nclass modelo_precipitacao(object):\n\n\tGERAL = '0' # Modelo padrão 22 mil linhas de grade\n\tCOMUN = '1' # Modelo resumido ONS, 52 linhas de grade\n\n\tdef __init__(self, nome='PMEDIA_ORIG'):\n\t\t\n\t\tself.__nome = nome\n\t\tself.__path = r'Arq_entrada\\Precipitacao' \n\t\tself.__data = self.findData()\n\t\tself.__nDias = len(os.listdir(self.path))\n\t\tself.state = modelo_precipitacao.COMUN\n\t\t\n\t@property\n\tdef nDias(self):\n\t\treturn self.__nDias\n\n\t@property\n\tdef data(self):\n\t\treturn self.__data\n\n\t@property\n\tdef nome(self):\n\t\treturn self.__nome\n\n\t@property\n\tdef path(self):\n\t\treturn self.__path\n\t\n\n\tdef findData(self):\n\t\taux = os.listdir(self.path)[0]\n\t\tfirst = aux.find('p')\n\t\tlast = aux.find('a')\n\n\t\treturn datetime.strptime(aux[first+1: last],'%d%m%y')\n\n\tdef diasPrevisao(self):\n\t\treturn len(os.listdir(self.path))\n\t\n\tdef defineState(self):\n\t\t# implementar\n\t\tpass\n\n\nclass modelo_cxv(object):\n\n\tdef __init__(self, data):\n\t\t\n\t\tself.__data = data\n\t\tself.__path =self.getPath()\n\t\tself.__regioes = ['Grande', 'Iguacu', 'Itaipu', 'Paranaiba', 'Paranapanema', 'SaoFrancisco', 'Tiete', 'Tocantins',\n 'Uruguai']\n\n\t@property\n\tdef data(self):\n\t\treturn self.__data\n\n\t@property\n\tdef path(self):\n\t\treturn self.__path\n\n\t@property\n\tdef regioes(self):\n\t\treturn self.__regioes\n\n\t@property\n\tdef dataString(self):\n\t\treturn datetime.strftime(self.data,'%Y%m%d')\n\t\n\n\tdef getPath(self):\n\t\tpath = r'Arq_entrada\\Modelo'\n\t\t\t\n\t\tmodels = os.listdir(path)\n\n\t\tif 'Modelos_Chuva_Vazao_' + self.dataString in models:\n\t\t\tpath += r'\\Modelos_Chuva_Vazao_' + datetime.strftime(self.data,'%Y%m%d') + r'\\Modelos_Chuva_Vazao\\SMAP'\n\t\t\treturn path\n\n\t\telse:\n\t\t\traise Exception('O arquivo solicitado não se encontra na pasta')\n\t\t\t\nclass rodada(object):\n\n\tOFICIAL = 0\n\tDEFINITIVO = 1\n\tPRELIMINAR = 2\n\n\n\tdef __init__(self, cxv, precipitacao = None):\n\n\t\tself.cxv = cxv\n\t\tself.precipitacao = precipitacao\n\t\tself.state = None\n\t\tself.rodada = self.defineRodada() # A rodada é definida via passagem da precipitacao ou não\n\n\tdef defineRodada(self):\n\t\t\n\t\tif self.precipitacao is None:\n\t\t\tself.state = rodada.OFICIAL\n\t\t\treturn self.oficial\n\n\t\telse:\n\t\t\tif datetime.now().hour < 13:\n\t\t\t\tself.state = rodada.PRELIMINAR\n\t\t\t\treturn self.preliminar\n\t\t\telse:\n\t\t\t\tself.state = rodada.DEFINITIVO\n\t\t\t\treturn self.definitivo\n\t\n\tdef preliminar(self):\n\t\torganizaArquivosDeChuva(cxv=self.cxv, prec=self.precipitacao, metodologia=b2)\n\t\torganizaArquivosDeSmap_preliminar(cxv=self.cxv, nDias=self.precipitacao.nDias, nome=self.precipitacao.nome)\n\n\tdef oficial(self): # Se for o definitvo ons, apenas escreve o resultado\n\t\tescreveResultadoSMap(path=self.cxv.path,data=self.cxv.data,dias_previsao=12)\n\n\tdef definitivo(self):\n\t\t# Implement: Organiza os Arquivos de chuva, aplica SMAP e escreve a planilha resultados.\n\t\torganizaArquivosDeChuva(cxv=self.cxv,prec=self.precipitacao,metodologia=b2)\n\t\tprint('arquivos de chuva organizados')\n\t\torganizaArquivosDeSmap(cxv=self.cxv, nDias=self.precipitacao.nDias)\n\t\tprint('ARQUIVOS DE SMAP ATUALIZADOS')\n\t\t# Cada Bacia será instanciada como um novo objeto, que assim será copiada as dependencias do SMAP\n\t\t#eachBacia(self.cxv).copy_depencies().aplica_smap()\n\t\tescreveResultadoSMap(path=self.cxv.path,data=self.precipitacao.data,dias_previsao=self.precipitacao.nDias)\n\nclass eachBacia(object):\n\n\tdef __init__(self, bacia_cxv): # Modelo cxv\n\n\t\tself.__path = bacia_cxv\n\t\tself.__dependency = r'base\\smap'\n\n\tdef copy_depencies(self):\n\n\t\ttry:\n\t\t\t\n\t\t\tshutil.copy(self.__dependency + r'\\batsmap-desktop.exe', self.__path)\n\t\t\tshutil.copytree(self.__dependency + r'\\bin', self.__path + '\\\\bin')\n\t\t\tshutil.copytree(self.__dependency + r'\\logs', self.__path + '\\\\logs')\n\t\t\n\t\texcept Exception as e:\n\t\t\t\n\t\t\tprint(e)\n\t\t\tpass\n\n\t\treturn self\n\n\tdef aplica_smap(self): # modificar\n\n\t\tos.chdir(self.__path)\n\t\tp = subprocess.Popen(self.__path + '\\\\batsmap-desktop.exe')\n\t\ttime.sleep(1)\n\t\ttempo = 0\n\t\t\n\t\twhile tempo < 600:\n\n\t\t\twith open(self.__path + '\\\\logs\\\\desktop_bat.log', 'r') as log:\n\t\t\t\t\n\t\t\t\ttxt = log.readlines()\n\t\t\t\tultimaLinha = txt[-1]\n\n\t\t\t\tif 'A rotina BAT-SMAP nao sera executada' in ultimaLinha:\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\n\t\t\t\tif 'Finalizando programa' in ultimaLinha:\n\t\t\t\t\t\n\t\t\t\t\tprint('tudo ok na regiao: ' + regiao)\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\telse:\n \n\t\t\t\t\ttime.sleep(10)\n\t\t\t\t\ttempo = tempo + 10\n\t\t\t\t\t\n\t\t\t\t\tif tempo > 600:\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint('overtime na regiao: ' + regiao)\n\t\t\t\t\t\tbreak\n\t\tp.kill()\n\n\n\n\nclass novaIteracao(object):\n\n\tGERAL = 0\n\tCOMUN = 1\n\n\tdef __init__(self, cxv, precipitacao):\n\n\t\tself.__cxv = cxv\n\t\tself.__precipitacao = precipitacao\n\t\tself.erroDeltaData()\n\t\tself.state = novaIteracao.COMUN\n\n\t@property\n\tdef cxv(self):\n\t\treturn self.__cxv\n\n\t@property\n\tdef precipitacao(self):\n\t\treturn self.__precipitacao\n\n\tdef prec_metodologia(self, arquivo):\n\t\tmodo = self.get_metodologia()\n\t\treturn modo(arquivo)\n\t\n\tdef erroDeltaData(self): # Se as datas não forem compativeis(Chuva dia x e modelo x-1, lança uma Excecção)\n\t\tif self.precipitacao.data - timedelta(days=1) != self.cxv.data:\n\t\t\traise Exception('Insira um modelo de Chuva vazao compativel com no máximo, um dia a menos a previsao de precipitacao, para melhores resultados!')\n\n\tdef get_metodologia(self):\n\t\taux = self.precipitacao.path + '//' + os.listdir(self.precipitacao.path)[0]\n\t\t\n\t\tif len(open(aux).readlines()) > 55:\n\t\t\tnovaIteracao.GERAL\n\t\t\treturn b1\n\t\t\n\t\telse:\n\t\t\treturn b2\n\n\tdef iterPathRegions(self):\n\t\t\tcaminhos_entrada = [self.cxv.path + '//' + x + '//ARQ_ENTRADA' for x in self.cxv.regioes]\n\t\t\tcaminhos_base = ['base//' + str(self.state) + '//' + j + '//Base.dat' for j in self.cxv.regioes]\n\t\t\t# Retorna duas listas de caminhos, a primeira com o caminho até o ARQ_ENTRADA a segunda com o caminho até os modelos de base.\n\t\t\treturn caminhos_entrada, caminhos_base\n\nclass calculadora(novaIteracao):\n\n\tdef __init__(self, cxv, prec):\n\t\t\n\t\tnovaIteracao.__init__(cxv,prec)\n\t\tself.entrda, self.base = self.iterPathRegions()\n\n\tdef main(self):\n\t\t\n\t\tfor i in range(len(self.entrda)):\n\n\t\t\tfor j in os.listdir(self.entrda[i]):\n\t\t\n\t\t\t\tif 'PMEDIA_ORIG' in j:\n\n\t\t\t\t\taplicado = self.prec_metodologia(caminhos_entrada[i] + '//' + j)\n\t\t\t\t\taplicado.sort(key=lambda x: (x[0],x[1]))\n\t\t\t\t\tbase = self.prec_metodologia(caminhos_base[i])\n\t\t\t\t\tbase.sort(key=lambda x: (x[0],x[1]))\n\t\t\t\t\tlista_auxiliar = []\t\n\n\n\n\n\n\t\n\n\n\t\n\t\t\n","sub_path":"src/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88662447","text":"\n\nfrom xai.brain.wordbase.adjectives._spindly import _SPINDLY\n\n#calss header\nclass _SPINDLIER(_SPINDLY, ):\n\tdef __init__(self,): \n\t\t_SPINDLY.__init__(self)\n\t\tself.name = \"SPINDLIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"spindly\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_spindlier.py","file_name":"_spindlier.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"151248755","text":"\"\"\"---------------------------------------------------------------------------------------------------------------------\nMODULE\n SBLCollateralActivityNeoxHook\n Hook\n\nDESCRIPTION\n This module contains Neox logic for the SBL Collateral trades.\n\n------------------------------------------------------------------------------------------------------------------------\nHISTORY\n========================================================================================================================\nDate Change no Developer Requester Description\n------------------------------------------------------------------------------------------------------------------------\n2020-10-21 FAOPS-959 Ncediso Nkambule Cuen Edwards Initial create.\n2020-10-21 FAOPS-1016 Ncediso Nkambule Gasant Thulsie Updated Hook Logger with Hook Name.\n2021-03-16 FAOPS-982 Ncediso Nkambule Gasant Thulsie Added functions to handle Cashflow driven events.\n\n------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport acm\nfrom at_logging import getLogger\nimport sbl_booking_utils as sbl_utils\nfrom NeoXActivityReportHook import ActivityReportsNeoxHook\nfrom NeoXActivityReportsHookBase import CollateralTradeToReport\nfrom NeoXActivityReportsUtils import is_valid_sbl_collateral_trade, get_object_value_by_name\n\n\nLOGGER = getLogger(__name__)\n\n\nclass SBLCollateralActivityHook(ActivityReportsNeoxHook):\n \"\"\"\n Definition of a hook used to perform STP triggered by the update or creation of a fixed cash flow on a SBL trade.\n \"\"\"\n\n file_identifier = \"Collateral_Trade_Activity\"\n\n def Name(self):\n \"\"\"\n Get the name of the SBL Collateral Activity NeoX Hook.\n \"\"\"\n return 'SBL Collateral Activity NeoX Hook'\n\n def IsTriggeredBy(self, event_object, event_message=None):\n \"\"\"\n Only trigger for the belo matched conditions.\n Other Collateral Trades:\n - Instrument Type in [\"Stock\", \"Bond\", \"IndexLinkedBond\", \"CD\", \"Bill\"]\n - Trade Portfolio matches SBL_NONCASH_COLLATERAL\n - Trade Category equals \"Collateral\"\n - Trade Acquirer equals to SECURITY LENDINGS DESK\n - Trade Counterparty startswith 'SL'\n \"\"\"\n\n instrument = None\n trade = self.get_trade_from_event_object(event_object)\n\n if trade:\n instrument = trade.Instrument()\n if trade and not trade.Acquirer().Name() == sbl_utils.ACQUIRER.Name():\n return False\n if instrument and not instrument.InsType() in sbl_utils.SBL_INSTRUMENTS:\n return False\n if not self._is_collateral_trade(trade):\n return False\n if not is_valid_sbl_collateral_trade(trade):\n return False\n\n return True\n\n def PerformEventProcessing(self, event_object, event_message=None):\n \"\"\"\n Perform the hooks STP action/s for an event on the specified\n object.\n\n Please note that the action does not necessarily occur to the\n event object itself but may occur to some related object/s.\n \"\"\"\n\n message_id = get_object_value_by_name(event_message, 'TXNBR')\n try:\n if event_object.IsKindOf(acm.FTrade):\n LOGGER.info(\"Processing Collateral Trade Update\")\n trade = self.get_trade_from_event_object(event_object)\n if trade:\n with CollateralTradeToReport(\n directory=self.temp_directory,\n file_name=self.file_identifier,\n acm_trade=trade) as collateral:\n collateral.process_trade(message_id)\n except Exception as error:\n LOGGER.exception(error)\n\n def PerformFileProcessing(self):\n with CollateralTradeToReport(\n directory=self.temp_directory,\n file_name=self.file_identifier,\n acm_trade=None) as collateral:\n collateral.move_file(destination_directory=self.final_directory)\n\n @staticmethod\n def _is_collateral_trade(trade):\n if not trade:\n return False\n if not trade.match_portfolio(sbl_utils.COLLATERAL_PORTFOLIO):\n return False\n if not trade.TradeCategory() == sbl_utils.COLLATERAL_CATEGORY:\n return False\n if not trade.Instrument().InsType() in sbl_utils.COLLATERAL_INSTRUMENTS:\n return False\n return True\n","sub_path":"Extensions/ABSA NeoX Activity Reporting/FPythonCode/SBLCollateralActivityNeoxHook.py","file_name":"SBLCollateralActivityNeoxHook.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64110201","text":"\"\"\" Simple test for Problem 3c: multiply a large matrix with its inverse to obtain the identity. \"\"\"\n\n# Results: all of the matrix multiplication implementations in Problem 3c work\n# BUT they have wildly varying degrees of efficiency:\n# For loops take a very long time, as expected\n# List comprehensions take about the same amount of time as for loops, since\n# they are basically just for loops in disguise\n# The built-in numpy function takes almost no time at all\n\nimport numpy as np\nimport problem3c\n\nA = np.random.randint(10., size=(100, 100))\n\nA_INV = np.linalg.inv(A)\n\ndef test_for():\n \"\"\" test the for loop multiplication with a big matrix \"\"\"\n print(\"Testing for loop multiplication\")\n I = problem3c.for_mult(A, A_INV)\n for i in range(len(I[0])):\n assert 0.9999 <= I[i][i] <= 1.0001, \"Result is not the identity matrix\"\n for j in range(len(I[0])):\n if j != i:\n assert abs(I[i][j]) <= 10**-12, \"Result is not the identity matrix\"\n\ndef test_comp():\n \"\"\" test the list comprehension multiplication from problem 3c \"\"\"\n print(\"Testing list comprehension multiplication\")\n I = problem3c.comp_mult(A, A_INV)\n for i in range(len(I[0])):\n assert 0.9999 <= I[i][i] <= 1.0001, \"Result is not the identity matrix\"\n for j in range(len(I[0])):\n if j != i:\n assert abs(I[i][j]) <= 10**-12, \"Result is not the identity matrix\"\n\ndef test_np():\n \"\"\" test the built-in numpy matrix multiplication \"\"\"\n print(\"Testing built-in numpy.matmult\")\n I = problem3c.np_mult(A, A_INV)\n for i in range(len(I[0])):\n assert 0.9999 <= I[i][i] <= 1.0001, \"Result is not the identity matrix\"\n for j in range(len(I[0])):\n if j != i:\n assert abs(I[i][j]) <= 10**-12, \"Result is not the identity matrix\"\n\nif __name__ == \"__main__\":\n test_for()\n test_comp()\n test_np()\n","sub_path":"problem3d.py","file_name":"problem3d.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391858039","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@version: $\n@author: zhuzhenping\n@contact: zhuzhenping@hikvision.com\n@site: \n@software: PyCharm\n@file name: urls.py\n@created time: 2017/4/2 15:16\nDescription:\n\"\"\"\n\n\nfrom django.conf.urls import patterns,include,url\nfrom django.contrib.auth import views as auth_views\nurlpatterns = patterns('CloudPlatform.views',\n\n #url(r'^$',include('cloudplatformYuchen.urls')),\n url(r'index.html/','index',name='index.html'),\n url(r'list/','list',name='list'),\n url(r'add/','add',name='add'),\n url(r'login/',auth_views.login, name='login'),\n # url(r'info.html/','info',name='info.html'),\n # url(r'pass.html/','pass_',name='pass.html'),\n # url(r'page.html/','page',name='page.html'),\n # url(r'book.html/','book',name='book.html'),\n # url(r'column.html/','column',name='column.html'),\n # url(r'add.html/','add',name='add.html'),\n # url(r'cate.html/','cate',name='cate.html'),\n # url(r'getman.html/','getman',name='getman.html'),\n # url(r'httptool.html/','httptool',name='httptool.html'),\n url(r'mokdlr.html/','mokdlr',name='mokdlr.html'),\n)\n\nurlpatterns +=patterns('CloudPlatform.views_project.project_view',\n url(r'project/$','project',name='project'),\n url(r'project/deleteproject$','project_deleteproject',name='project_deleteproject'),\n url(r'project/infoproject$','project_infoproject',name='project_infoproject'),\n url(r'project/editprojectname$','project_editprojectname',name='project_editprojectname'),\n url(r'project/addproject$','project_addproject',name='project_addproject'),\n\n url(r'projectTree/','projectTree',name='projectTree'),\n )","sub_path":"CloudPlatform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173695549","text":"#!/usr/bin/env python\n\nimport re\nimport sys\nimport logging\nimport hgvs.parser\nimport hgvs.dataproviders.uta\nimport hgvs.assemblymapper\nfrom operator import eq\n\n__modname__ = 'hgvs_convert'\n__version__ = 'v1.0.0'\n\nhp=hgvsparser = hgvs.parser.Parser()\nhdp = hgvs.dataproviders.uta.connect(db_url=\"postgresql://uta_admin:uta_admin@localhost/uta/uta_20180821\", pooling=True,cache=None)\nvm=variantmapper = hgvs.assemblymapper.AssemblyMapper(hdp)\nam37 = easyvariantmapper = hgvs.assemblymapper.AssemblyMapper(hdp, assembly_name='GRCh37')\nam38 = easyvariantmapper = hgvs.assemblymapper.AssemblyMapper(hdp, assembly_name='GRCh38')\nhv = hgvs.validator.Validator(hdp)\nhn = hgvs.normalizer.Normalizer(hdp)\n\ndef hgvs_convert (hgvs_c, genotype=1):\n #homo->genotype=0, het->1\n hgvs.global_config.formatting.p_3_letter = False\n hgvs.global_config.formatting.p_term_asterisk = False\n var_c = hgvs_c = hgvsparser.parse_hgvs_variant(hgvs_c)\n ref_ref = var_c.posedit.edit.ref\n ref_var = var_c.posedit.edit.alt\n ref_ac = var_c.ac\n ref_pos = var_c.posedit.pos\n #print(ref_ref)\n #print(ref_var)\n try:\n #print(var_c)\n hgvs_p = am37.c_to_p(var_c)\n hgvs_p_3 = hgvs_p.format(conf={\"p_3_letter\": True})\n hgvs_g = am37.c_to_g(var_c)\n return {'hgvs_p':str(hgvs_p),'hgvs_g':str(hgvs_g),'hgvs_c':str(hgvs_c),'hgvs_p_3':str(hgvs_p_3),'msg':'complete'}\n except hgvs.exceptions.HGVSError as e:\n msg = str(e).split('ERROR: ')\n msg = str(msg[0])\n #msg parsing\n pat = re.compile('\\([A-z]\\)')\n m = pat.findall(msg)\n #NM sequence reference different RefSeq\n #check the NM sequence and change it try again\n if m != None:\n hgvs_ref_tmp = str(m[1]).replace(\"(\",\"\")\n hgvs_ref = hgvs_ref_tmp.replace(\")\",\"\")\n hgvs_var_tmp = str(m[0]).replace(\"(\",\"\")\n hgvs_var = hgvs_var_tmp.replace(\")\",\"\")\n\n if(eq(ref_ref, hgvs_var) and eq(ref_var, hgvs_ref)):\n #print('ref/nm is fliped')\n\n if genotype == 1:\n #print('hetero, nm is diff')\n hgvs_ref_tmp = str(m[1]).replace(\"(\",\"\")\n hgvs_ref = hgvs_ref_tmp.replace(\")\",\"\")\n hgvs_var_tmp = str(m[0]).replace(\"(\",\"\")\n hgvs_var = hgvs_var_tmp.replace(\")\",\"\")\n else:\n #print('homo, nm is same with ref')\n hgvs_ref_tmp = str(m[1]).replace(\"(\",\"\")\n hgvs_ref = hgvs_ref_tmp.replace(\")\",\"\")\n hgvs_var_tmp = str(m[1]).replace(\"(\",\"\")\n hgvs_var = hgvs_var_tmp.replace(\")\",\"\")\n\n try:\n hgvs_c = \"%s:c.%s%s>%s\"%(ref_ac,ref_pos,hgvs_ref,hgvs_var)\n var_c = hgvs_c = hgvsparser.parse_hgvs_variant(hgvs_c)\n #print('new: %s'%var_c)\n hgvs_p = am37.c_to_p(var_c)\n hgvs_p_3 = hgvs_p.format(conf={\"p_3_letter\": True})\n hgvs_g = am37.c_to_g(var_c)\n return {'hgvs_p':str(hgvs_p),'hgvs_g':str(hgvs_g),'hgvs_c':str(hgvs_c),'hgvs_p_3':str(hgvs_p_3),'msg':'complete'}\n except hgvs.exceptions.HGVSError as e:\n #print(\"final error\")\n return {'hgvs_p':'','hgvs_g':'','hgvs_c':'','hgvs_p_3':'','msg':msg}\n return {'hgvs_p':'','hgvs_g':'','hgvs_c':'','hgvs_p_3':'','msg':msg}\n\nif __name__ == '__main__':\n if len(sys.argv) < 1:\n raise Exception('Error running parsing vcf file: Invalid arguements ({0})'.format(str(sys.argv)))\n\n for arg in sys.argv:\n try:\n name, val = arg.split('=')\n if name == 'hgvsc':\n hgvsc = val\n except:\n pass \n\n hgvsp = hgvs_convert(str(hgvsc),1)\n #hgvsp=hgvs_convert('NM_001282224.1:c.3814G>A')\n #hgvsp1=hgvs_convert('NM_000166.5:c.77C>T')\n print(hgvsp)\n","sub_path":"pipelines/utils/ngb_hgvs_convert.py","file_name":"ngb_hgvs_convert.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467833517","text":"import numpy as np\r\nimport warnings\r\n\r\nfrom tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\r\nfrom tensorflow.keras.layers import Dense, Lambda\r\nfrom tensorflow.keras.layers import add, Flatten\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.preprocessing import image\r\nimport tensorflow.keras.backend as K\r\nfrom keras_applications.imagenet_utils import _obtain_input_shape\r\nfrom keras.engine.topology import get_source_inputs\r\n\r\nfrom Resnet_models.oct_conv2d import *\r\n\r\nimport tensorflow as tf\r\n\r\ndef identity_block(input_tensor, alpha, kernel_size, filters, stage, block, strides = (1,1)):\r\n \r\n filters1, filters2 = filters\r\n if K.image_data_format() == 'channels_last':\r\n bn_axis = 3\r\n else:\r\n bn_axis = 1\r\n \r\n conv_name_base = 'res' + str(stage) + block + 'conv_branch'\r\n high_conv_bn_name_base = 'bn' + str(stage) + block + 'high_conv_branch'\r\n low_conv_bn_name_base = 'bn' + str(stage) + block + 'low_conv_branch'\r\n\r\n high, low = input_tensor\r\n skip_high, skip_low = input_tensor\r\n\r\n high, low = OctConv2D(filters1, alpha, kernel_size = kernel_size, strides = strides, padding = 'same', name = conv_name_base + '2a')([high, low])\r\n high = BatchNormalization(axis=bn_axis, name = high_conv_bn_name_base + '2a')(high)\r\n high = Activation('relu')(high)\r\n low = BatchNormalization(axis=bn_axis, name = low_conv_bn_name_base + '2a')(low)\r\n low = Activation('relu')(low)\r\n\r\n high, low = OctConv2D(filters2, alpha, kernel_size = kernel_size, padding = 'same', name = conv_name_base + '2b')([high, low])\r\n high = BatchNormalization(axis=bn_axis, name = high_conv_bn_name_base + '2b')(high)\r\n low = BatchNormalization(axis=bn_axis, name = low_conv_bn_name_base + '2b')(low)\r\n\r\n high = add([high, skip_high])\r\n low = add([low, skip_low])\r\n\r\n high = Activation('relu')(high)\r\n low = Activation('relu')(low)\r\n\r\n return [high, low]\r\n\r\ndef conv_block(input_tensor, alpha, kernel_size, filters, stage, block, strides=(1, 1)):\r\n \r\n filters1, filters2 = filters\r\n if K.image_data_format() == 'channels_last':\r\n bn_axis = 3\r\n else:\r\n bn_axis = 1\r\n\r\n conv_name_base = 'res' + str(stage) + block + 'conv_branch'\r\n high_conv_bn_name_base = 'bn' + str(stage) + block + 'high_conv_branch'\r\n low_conv_bn_name_base = 'bn' + str(stage) + block + 'low_conv_branch'\r\n\r\n high, low = input_tensor\r\n skip_high, skip_low = input_tensor\r\n \r\n high, low = OctConv2D(filters1, alpha, kernel_size = kernel_size, padding = 'same', strides= strides, name = conv_name_base + '2a')([high, low])\r\n high = BatchNormalization(axis=bn_axis, name = high_conv_bn_name_base + '2a')(high)\r\n high = Activation('relu')(high)\r\n low = BatchNormalization(axis=bn_axis, name = low_conv_bn_name_base + '2a')(low)\r\n low = Activation('relu')(low)\r\n\r\n high, low = OctConv2D(filters2, alpha, kernel_size = kernel_size, padding = 'same', name = conv_name_base + '2b')([high, low])\r\n high = BatchNormalization(axis=bn_axis, name = high_conv_bn_name_base + '2b')(high)\r\n low = BatchNormalization(axis=bn_axis, name = low_conv_bn_name_base + '2b')(low)\r\n \r\n skip_high = Conv2D(int(filters2 * (1 - alpha)), kernel_size = kernel_size, strides=strides, padding = 'same', name = conv_name_base + '1')(skip_high)\r\n skip_high = BatchNormalization(axis=bn_axis, name = high_conv_bn_name_base + '1')(skip_high)\r\n\r\n skip_low = Conv2D(int(filters2 * alpha), kernel_size = kernel_size, strides=strides, padding = 'same', name = conv_name_base + '2')(skip_low)\r\n skip_low = BatchNormalization(axis=bn_axis, name = low_conv_bn_name_base + '2')(skip_low)\r\n\r\n high = add([high, skip_high])\r\n low = add([low, skip_low])\r\n\r\n high = Activation('relu')(high)\r\n low = Activation('relu')(low)\r\n\r\n return [high, low]\r\n\r\ndef last_OctConv_2_Vanila(input_tensor, filters, alpha):\r\n \r\n if K.image_data_format() == 'channels_last':\r\n bn_axis = 3\r\n else:\r\n bn_axis = 1\r\n\r\n high, low = input_tensor\r\n\r\n high_2_high = Conv2D(filters, (3, 3), padding = 'same')(high)\r\n low_2_high = Conv2D(filters, (3, 3), padding=\"same\")(low)\r\n low_2_high = Lambda(lambda x: \r\n K.repeat_elements(K.repeat_elements(x, 2, axis=1), 2, axis=2))(low_2_high)\r\n \r\n x = add([high_2_high, low_2_high])\r\n x = BatchNormalization(axis = bn_axis, name = 'bn_last_OctConv_2_Vanila')(x)\r\n x = Activation('relu')(x)\r\n\r\n return x\r\n\r\ndef Oct_ResNet18(include_top = False, \r\n weights=None,\r\n alpha = 0,\r\n input_tensor=None, input_shape=None,\r\n pooling=None,\r\n classes=1000):\r\n \r\n # if weights not in {'imagenet', None}:\r\n # raise ValueError('The `weights` argument should be either '\r\n # '`None` (random initialization) or `imagenet` '\r\n # '(pre-training on ImageNet).')\r\n \r\n # if weights == 'imagenet' and include_top and classes != 1000:\r\n # raise ValueError('If using `weights` as imagenet with `include_top`'\r\n # ' as true, `classes` should be 1000')\r\n \r\n \r\n input_shape = _obtain_input_shape(input_shape,\r\n default_size=224,\r\n min_size=32,\r\n data_format=K.image_data_format(),\r\n require_flatten = include_top)\r\n \r\n if input_tensor is None:\r\n img_input = Input(shape=input_shape)\r\n else:\r\n if not K.is_keras_tensor(input_tensor):\r\n img_input = Input(tensor=input_tensor, shape=input_shape)\r\n else:\r\n img_input = input_tensor\r\n if K.image_data_format() == 'channels_last':\r\n bn_axis = 3\r\n else:\r\n bn_axis = 1\r\n \r\n low = AveragePooling2D(2)(img_input)\r\n\r\n high, low = OctConv2D(64, alpha = alpha, kernel_size = (7, 7), strides = (2, 2))([img_input, low])\r\n \r\n high = BatchNormalization(axis=bn_axis, name='high_Oct_bn_conv1')(high)\r\n high = Activation(\"relu\")(high)\r\n \r\n low = BatchNormalization(axis=bn_axis, name='low_Oct_bn_conv1')(low)\r\n low = Activation(\"relu\")(low)\r\n\r\n high, low = conv_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [64, 64], stage = 2, block = 'a')\r\n high, low = identity_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [64, 64], stage = 2, block = 'b')\r\n \r\n high, low = conv_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [128, 128], stage = 3, block = 'a', strides = (2, 2))\r\n high, low = identity_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [128, 128], stage = 3, block = 'b')\r\n\r\n high, low = conv_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [256, 256], stage = 4, block = 'a', strides = (2, 2))\r\n high, low = identity_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [256, 256], stage = 4, block = 'b')\r\n\r\n high, low = conv_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [512, 512], stage = 5, block = 'a', strides = (2, 2))\r\n high, low = identity_block([high, low], alpha = alpha, kernel_size = (3 , 3), filters = [512, 512], stage = 5, block = 'b')\r\n x = last_OctConv_2_Vanila([high, low], filters = 512, alpha = alpha)\r\n \r\n x = AveragePooling2D(name='avg_pool')(x)\r\n \r\n if include_top:\r\n x = Flatten()(x)\r\n x = Dense(classes, activation='softmax', name='fc1000')(x)\r\n else:\r\n if pooling == 'avg':\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(classes, activation='softmax', name='resnet18')(x)\r\n elif pooling == 'max':\r\n x = GlobalMaxPooling2D()(x)\r\n \r\n if input_tensor is not None:\r\n inputs = get_source_inputs(input_tensor)\r\n else:\r\n inputs = img_input\r\n \r\n model = Model(inputs, x, name='resnet18')\r\n \r\n \r\n # if weights == 'imagenet':\r\n # if include_top:\r\n # weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\r\n # WEIGHTS_PATH,\r\n # cache_subdir='models',\r\n # md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\r\n # else:\r\n # weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\r\n # WEIGHTS_PATH_NO_TOP,\r\n # cache_subdir='models',\r\n # md5_hash='a268eb855778b3df3c7506639542a6af')\r\n # model.load_weights(weights_path)\r\n # if K.backend() == 'theano':\r\n # layer_utils.convert_all_kernels_in_model(model)\r\n \r\n # if K.image_data_format() == 'channels_first':\r\n # if include_top:\r\n # maxpool = model.get_layer(name='avg_pool')\r\n # shape = maxpool.output_shape[1:]\r\n # dense = model.get_layer(name='fc1000')\r\n # layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\r\n \r\n # if K.backend() == 'tensorflow':\r\n # warnings.warn('You are using the TensorFlow backend, yet you '\r\n # 'are using the Theano '\r\n # 'image data format convention '\r\n # '(`image_data_format=\"channels_first\"`). '\r\n # 'For best performance, set '\r\n # '`image_data_format=\"channels_last\"` in '\r\n # 'your Keras config '\r\n # 'at ~/.keras/keras.json.')\r\n return model\r\n\r\n","sub_path":"Resnet_models/Oct_res18.py","file_name":"Oct_res18.py","file_ext":"py","file_size_in_byte":9834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305687993","text":"from __future__ import division, print_function\n\nfrom math import atan2, sqrt\n\nfrom fontPens.penTools import getCubicPoint\nfrom fontTools.misc.bezierTools import calcCubicArcLength\n\n\ndef getPointsFromCurve(p, div=0.75):\n points = []\n length = calcCubicArcLength(p[0], p[1], p[2], p[3])\n t = 0\n step = div / length\n # print(\"Length:\", d, \"Steps:\", step)\n while t < 1:\n points.append(getCubicPoint(t, p[0], p[1], p[2], p[3]))\n t += step\n points.append(p[3])\n return points\n\n\ndef angleBetweenPoints(p0, p1):\n return atan2(p1[1] - p0[1], p1[0] - p0[0])\n\n\ndef distanceBetweenPoints(p0, p1, doRound=False):\n # Calculate the distance between two points\n d = sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)\n if doRound:\n return int(round(d))\n else:\n return d\n\n\ndef halfPoint(p0, p1, doRound=False):\n x0, y0 = p0\n x1, y1 = p1\n xh = .5 * (x0 + x1)\n yh = .5 * (y0 + y1)\n if doRound:\n return int(round(xh)), int(round(yh))\n return xh, yh\n\n\nclass Triangle(object):\n def __init__(self, A, B, C):\n self.A = A\n self.B = B\n self.C = C\n\n def sides(self):\n self.a = distanceBetweenPoints(self.B, self.C)\n self.b = distanceBetweenPoints(self.A, self.C)\n self.c = distanceBetweenPoints(self.A, self.B)\n return self.a, self.b, self.c\n\n def height_a(self):\n a, b, c = self.sides()\n s = (a + b + c) / 2\n h = 2 * sqrt(s * (s - a) * (s - b) * (s - c)) / a\n return h\n\n\ndef optimizePointPath(p, dist=0.49):\n # print(\"Input number of points:\", len(p))\n num_points = len(p)\n p0 = p[0]\n optimized = [p0]\n i = 0\n j = 1\n while i < num_points - 2:\n p1 = p[i + 1]\n p2 = p[i + 2]\n t = Triangle(p0, p2, p1)\n # h = t.height_a()\n # print(i, h)\n if t.height_a() > dist:\n optimized.extend([p1])\n p0 = p[i]\n else:\n pass\n # print(\"Skip:\", i+1, p1)\n i += 1\n j += 1\n # if j > 13:\n # break\n optimized.extend([p[-1]])\n # print(\"Optimized number of points:\", len(optimized))\n return optimized\n","sub_path":"lib/nibLib/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62250435","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n\nimport socket\nimport time\n\nhost = 'localhost'\nport = 8083\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\nclient.connect((host, port))\nwhile True:\n\tclient.send('hello world\\r\\n'.encode())\n\tprint('send data')\n\ttime.sleep(1)\n","sub_path":"socket/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222382226","text":"import random\nfrom tkinter import *\nfrom Modules_1 import Module_1 as mod1\nfrom Modules_1 import Module_2 as mod2\nfrom all_func_set import *\nfrom time import *\n\nroot = Tk()\nroot.title('Лабораторна робота №1')\nroot.geometry('700x370+100+100')\n\ndef file1(event):\n with open(r'D:\\Novosadenko_Vlad\\DM\\setA', 'w') as f:\n a = str(M.A)\n b=a.replace('{', '')\n c=b.replace('}', '')\n f.write(c)\n\ndef file2 (event):\n with open(r'D:\\Novosadenko_Vlad\\DM\\setB', 'w') as q:\n a = str(M.B)\n b=a.replace('{', '')\n c=b.replace('}', '')\n q.write(c)\n\ndef file3 (event):\n with open(r'D:\\Novosadenko_Vlad\\DM\\setC', 'w') as l:\n a = str(M.C)\n b=a.replace('{', '')\n c=b.replace('}', '')\n l.write(c)\n\nclass Main_win:\n def __init__(self, main):\n#-------Menu------------------------------------------------------------------------------------------------------------\n self.main_menu = Menu(main)\n root.configure(menu=self.main_menu)\n\n self.first_item = Menu(self.main_menu, tearoff=0)\n self.main_menu.add_cascade(label = 'Меню', menu = self.first_item)\n\n self.first_item.add_command(\n label='Вікно №2',\n command = self.new_win2,\n\n )\n self.first_item.add_separator()\n\n self.first_item.add_command(\n label='Вікно №3',\n command = self.new_win3,\n\n )\n self.first_item.add_separator()\n\n self.first_item.add_command(\n label='Вікно №4',\n command = self.new_win4,\n\n )\n self.first_item.add_separator()\n\n self.first_item.add_command(\n label='Вікно №5',\n command = self.new_win5,\n\n )\n#-----------------------------------------------------------------------------------------------------------------------\n\n#-------INFORM----------------------------------------------------------------------------------------------------------\n self.frame1 = Frame(main)\n self.frame1.pack()\n\n self.but1 = Button(\n self.frame1,\n text = 'ПІБ',\n width = 15,\n bg = '#B4FFE6'\n )\n self.but2 = Button(\n self.frame1,\n text = 'Група',\n width = 15,\n bg = '#B4FFE6'\n )\n self.but3 = Button(self.frame1,\n text = '№ у списку',\n width = 15,\n bg = '#B4FFE6')\n self.but4 = Button(self.frame1,\n text = 'Варіант',\n width = 15,\n bg = '#B4FFE6')\n\n self.lab1 = Label(self.frame1,\n width = 23,\n fg = '#F94C02',\n font =\n 'Times 13 bold')\n self.lab2 = Label(self.frame1,\n width = 23,\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.lab3 = Label(self.frame1,\n width = 23,\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.lab4 = Label(self.frame1,\n width = 23,\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n self.N = 17\n self.G = 64\n self.Var = (self.N+self.G%60)%30+1\n\n self.but1.grid(row = 0, column = 0)\n self.but2.grid(row = 1, column = 0)\n self.but3.grid(row = 2, column = 0)\n self.but4.grid(row = 3, column = 0)\n self.lab1.grid(row = 0, column = 1)\n self.lab2.grid(row = 1, column = 1)\n self.lab3.grid(row = 2, column = 1)\n self.lab4.grid(row = 3, column = 1)\n\n self.but1.bind('', self.info1)\n self.but2.bind('', self.info2)\n self.but3.bind('', self.info3)\n self.but4.bind('', self.info4)\n#-----------------------------------------------------------------------------------------------------------------------\n\n#-------Power; Set A, B, C----------------------------------------------------------------------------------------------\n self.labpot1 = Label(self.frame1,\n text = 'Потужність А:',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.labpot2 = Label(self.frame1,\n text = 'Потужність В:',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.labpot3 = Label(self.frame1,\n text = 'Потужність С:',\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n self.entrypot1 = Entry(self.frame1,\n width = 10,\n bg = '#F4D304')\n self.entrypot2 = Entry(self.frame1,\n width = 10,\n bg = '#F4D304')\n self.entrypot3 = Entry(self.frame1,\n width = 10,\n bg = '#F4D304')\n\n self.butAset = Button(self.frame1,\n text = 'Сформувати А',\n bg = '#B4FFE6')\n self.butBset = Button(self.frame1,\n text = 'Сформувати B',\n bg = '#B4FFE6')\n self.butCset = Button(self.frame1,\n text = 'Сформувати C',\n bg = '#B4FFE6')\n\n self.butwrA = Button(self.frame1,\n text = 'Записати',\n bg = '#B4FFE6')\n self.butwrB = Button(self.frame1,\n text = 'Записати',\n bg = '#B4FFE6')\n self.butwrC = Button(self.frame1,\n text = 'Записати',\n bg = '#B4FFE6')\n\n self.labpot1.grid(row = 0, column = 2)\n self.labpot2.grid(row = 1, column = 2)\n self.labpot3.grid(row = 2, column = 2)\n\n self.entrypot1.grid(row = 0, column = 3)\n self.entrypot2.grid(row = 1, column = 3)\n self.entrypot3.grid(row = 2, column = 3)\n\n self.butAset.grid(row = 0, column = 4)\n self.butBset.grid(row = 1, column = 4)\n self.butCset.grid(row = 2, column = 4)\n\n self.butwrA.grid(row = 0, column = 5)\n self.butwrB.grid(row = 1, column = 5)\n self.butwrC.grid(row = 2, column = 5)\n\n self.butAset.bind('', self.formA)\n self.butBset.bind('', self.formB)\n self.butCset.bind('', self.formC)\n\n self.butwrA.bind('', file1)\n self.butwrB.bind('', file2)\n self.butwrC.bind('', file3)\n#-----------------------------------------------------------------------------------------------------------------------\n\n#-------HandEnter-------------------------------------------------------------------------------------------------------\n self.framehe = Frame(main)\n self.framehe.pack(fill=X, padx=5, pady=5)\n\n self.hesetAlab = Label(self.framehe,\n text = 'Задайте мн. А',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.hesetBlab = Label(self.framehe,\n text = 'Задайте мн. В',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.hesetClab = Label(self.framehe,\n text = 'Задайте мн. С',\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n self.text1 = Text(self.framehe,\n wrap=NONE,\n width = 20,\n height = 1,\n bg = '#F4D304')\n self.v1scrollbar = Scrollbar(self.framehe,\n orient='hor',\n command=self.text1.xview)\n self.text1['xscrollcommand'] = self.v1scrollbar.set\n\n self.text2 = Text(self.framehe,\n wrap=NONE,\n width = 20,\n height = 1,\n bg = '#F4D304')\n self.v2scrollbar = Scrollbar(self.framehe,\n orient='hor',\n command=self.text2.xview)\n self.text2['xscrollcommand'] = self.v2scrollbar.set\n\n self.text3 = Text(self.framehe,\n wrap=NONE,\n width = 20,\n height = 1,\n bg = '#F4D304')\n self.v3scrollbar = Scrollbar(self.framehe,\n orient='hor',\n command=self.text3.xview)\n self.text3['xscrollcommand'] = self.v3scrollbar.set\n\n self.hesetAlab.grid(row = 0, column = 0)\n self.hesetBlab.grid(row = 2, column = 0)\n self.hesetClab.grid(row = 4, column = 0)\n\n self.text1.grid(row = 0,\n column = 1,\n sticky='nsew')\n self.v1scrollbar.grid(row=1,\n column=1,\n sticky='ew')\n\n self.text2.grid(row = 2,\n column = 1,\n sticky='nsew')\n self.v2scrollbar.grid(row=3,\n column=1,\n sticky='ew')\n\n self.text3.grid(row = 4,\n column = 1,\n sticky='nsew')\n self.v3scrollbar.grid(row=5,\n column=1,\n sticky='ew')\n\n self.hebtnA = Button(self.framehe,\n text = 'Сформувати А',\n bg = '#B4FFE6')\n self.hebtnB = Button(self.framehe,\n text = 'Сформувати B',\n bg = '#B4FFE6')\n self.hebtnC = Button(self.framehe,\n text = 'Сформувати C',\n bg = '#B4FFE6')\n\n self.butwrA1 = Button(self.framehe,\n text = 'Записати',\n bg = '#B4FFE6')\n self.butwrB1 = Button(self.framehe,\n text = 'Записати',\n bg = '#B4FFE6')\n self.butwrC1 = Button(self.framehe,\n text = 'Записати',\n bg = '#B4FFE6')\n\n self.butwrA1.grid(row = 0, column = 3)\n self.butwrB1.grid(row = 2, column = 3)\n self.butwrC1.grid(row = 4, column = 3)\n\n self.butwrA1.bind('', file1)\n self.butwrB1.bind('', file2)\n self.butwrC1.bind('', file3)\n\n self.hebtnA.grid(row = 0, column = 2)\n self.hebtnB.grid(row = 2, column = 2)\n self.hebtnC.grid(row = 4, column = 2)\n\n self.hebtnA.bind('', self.formA1)\n self.hebtnB.bind('', self.formB1)\n self.hebtnC.bind('', self.formC1)\n#-----------------------------------------------------------------------------------------------------------------------\n\n#-------Universal set---------------------------------------------------------------------------------------------------\n self.frm = Frame(main)\n self.frm.pack(fill=X, padx=12, pady=12)\n self.Ulab = Label(self.frm,\n text = 'Універсальна множина:',\n fg = '#F94C02', font = 'Times 18 bold')\n self.Ulab.pack(side = LEFT)\n\n self.Uframe = Frame(main)\n self.Uframe.pack(fill=X, padx=13, pady=13)\n\n self.startlab = Label(self.Uframe,\n text = 'Поч.:',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.endlab = Label(self.Uframe,\n text = 'Кін.:',\n fg = '#F94C02',\n font = 'Times 13 bold')\n self.entrUs = Entry(self.Uframe,\n width = 10,\n bg = '#F4D304')\n self.entrUe = Entry(self.Uframe,\n width = 10,\n bg = '#F4D304')\n self.Ubtn = Button(self.Uframe,\n text = 'Задати',\n bg = '#B4FFE6')\n\n self.startlab.grid(row = 0, column = 0)\n self.entrUs.grid(row = 0, column = 1)\n self.endlab.grid(row = 0, column = 2)\n self.entrUe.grid(row = 0, column = 3)\n self.Ubtn.grid(row = 0, column = 4)\n\n self.Ubtn.bind('', self.ran)\n\n self.A = set()\n self.B = set()\n self.C = set()\n\n self.U = set()\n\n self.timeD = float(0)\n self.timeDs = float(0)\n self.timeZ = float(0)\n self.timeZ_b = float(0)\n#-----------------------------------------------------------------------------------------------------------------------\n\n#---functions-----------------------------------------------------------------------------------------------------------\n def ran(self, event):\n start = self.entrUs.get()\n end = self.entrUe.get()\n self.U = {x for x in range(int(start), int(end)+1)}\n return self.U\n #Universal Set\n\n def info1(self, event):\n self.lab1['text'] = 'Новосаденко Владислав'\n self.lab1['fg'] = '#8A05AA'\n #Name\n\n def info2(self, event):\n self.lab2['text'] = 'ІО-64'\n self.lab2['fg'] = '#8A05AA'\n #Group\n\n def info3(self, event):\n self.lab3['text'] = '17'\n self.lab3['fg'] = '#8A05AA'\n #Num in list\n\n def info4(self, event):\n self.lab4['text'] = str(self.Var)\n self.lab4['fg'] = '#8A05AA'\n #Variant\n\n def formA(self, event):\n read = self.entrypot1.get()\n ot = -255\n do = 255\n self.A = {random.randint(ot, do) for x in range(int(read))}\n for i in range(len(self.A)):\n if len(self.A) < int(read):\n self.A.add(random.randint(ot, do))\n return self.A\n #Random A\n\n def formB(self, event):\n\n read = self.entrypot2.get()\n\n ot = -255\n do = 255\n self.B = {random.randint(ot, do) for x in range(int(read))}\n for i in range(len(self.B)):\n if len(self.B) < int(read):\n self.B.add(random.randint(ot, do))\n return self.B\n #Random B\n\n def formC(self, event):\n\n read = self.entrypot3.get()\n\n ot = -255\n do = 255\n self.C = {random.randint(ot, do) for x in range(int(read))}\n for i in range(len(self.C)):\n if len(self.C) < int(read):\n self.C.add(random.randint(ot, do))\n return self.C\n #Random C\n\n def formA1(self, event):\n read = self.text1.get('1.0', END)\n\n y = read.split(', ')\n z = []\n for i in y:\n i = i.replace('\\n', '')\n i = int(i)\n z.append(i)\n\n self.A = set(z)\n #Handwrite A\n\n def formB1(self, event):\n read = self.text2.get('1.0', END)\n\n y = read.split(', ')\n z = []\n for i in y:\n i = i.replace('\\n', '')\n i = int(i)\n z.append(i)\n\n self.B = set(z)\n #Handwrite B\n\n def formC1(self, event):\n read = self.text3.get('1.0', END)\n\n y = read.split(', ')\n z = []\n for i in y:\n i = i.replace('\\n', '')\n i = int(i)\n z.append(i)\n\n self.C = set(z)\n #Handwrite C\n\n def new_win2(self):\n tl = Toplevel(root)\n tl.title('Вікно №2')\n tl.geometry('620x380+100+100')\n\n frm1 = Frame(tl)\n frm1.pack()\n\n labelA = Label(frm1,\n text = 'Множина А',\n fg = '#F94C02',\n font = 'Times 13 bold')\n labelB = Label(frm1,\n text = 'Множина B',\n fg = '#F94C02',\n font = 'Times 13 bold')\n labelC = Label(frm1,\n text = 'Множина C',\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n txt1 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v1scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt1.xview)\n txt1['xscrollcommand'] = v1scrollbar.set\n\n txt2 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v2scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt2.xview)\n txt2['xscrollcommand'] = v2scrollbar.set\n\n txt3 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v3scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt3.xview)\n txt3['xscrollcommand'] = v3scrollbar.set\n\n labelA.grid(row = 0, column = 0)\n labelB.grid(row = 2, column = 0)\n labelC.grid(row = 4, column = 0)\n\n txt1.grid(row = 0,\n column = 1,\n sticky='nsew')\n v1scrollbar.grid(row=1,\n column=1,\n sticky='ew')\n\n txt2.grid(row = 2,\n column = 1,\n sticky='nsew')\n v2scrollbar.grid(row=3,\n column=1,\n sticky='ew')\n\n txt3.grid(row = 4,\n column = 1,\n sticky='nsew')\n v3scrollbar.grid(row=5,\n column=1,\n sticky='ew')\n\n txt1.insert('1.0', self.A)\n txt2.insert('1.0', self.B)\n txt3.insert('1.0', self.C)\n\n frm2 = Frame(tl)\n frm2.pack(fill=X, padx=10, pady=10)\n\n ins_frm1 = Frame(frm2)\n ins_frm1.pack()\n\n ins_lab1 = Label(ins_frm1,\n text = 'Виконання виразу:',\n fg = '#F94C02', font = 'Times 18 bold')\n ins_lab2 = Label(ins_frm1,\n text = r'D = (B∆C) ∪ (B⋂C) ∆ (((A\\B) ⋂ B) ∆ A)',\n fg = '#F94C02', font = 18)\n ins_lab3 = Label(ins_frm1,\n text = 'Кроки:',\n fg = '#F94C02', font = 'Times 15 bold')\n\n ins_lab1.pack()\n ins_lab2.pack()\n ins_lab3.pack()\n\n ins_frm2 = Frame(frm2)\n ins_frm2.pack(fill=X, padx=10, pady=10)\n\n krok_btn1 = Button(ins_frm2,\n text = 'B∆C',\n bg = '#B4FFE6',\n width = 40)\n krok_btn2 = Button(ins_frm2,\n text = 'B⋂C',\n bg = '#B4FFE6',\n width = 40)\n krok_btn3 = Button(ins_frm2,\n text = '(B∆C) ∪ (B⋂C)',\n bg = '#B4FFE6',\n width = 40)\n krok_btn4 = Button(ins_frm2,\n text = 'A\\B',\n bg = '#B4FFE6',\n width = 40)\n krok_btn5 = Button(ins_frm2,\n text = '(A\\B) ⋂ B',\n bg = '#B4FFE6',\n width = 40)\n krok_btn6 = Button(ins_frm2,\n text = 'Результат',\n bg = '#B4FFE6',\n width = 63,\n font = 'Times 11 bold')\n krok_btn8 = Button(ins_frm2,\n text = '((A\\B) ⋂ B) ∆ A',\n bg = '#B4FFE6',\n width = 40\n )\n\n\n krok_btn1.grid(row = 0, column = 0)\n krok_btn2.grid(row = 1, column = 0)\n krok_btn3.grid(row = 2, column = 0)\n krok_btn4.grid(row = 0, column = 1)\n krok_btn5.grid(row = 1, column = 1)\n krok_btn6.grid(row = 3, columnspan = 2)\n krok_btn8.grid(row = 2, column = 1)\n\n\n def d_1(event):\n d = sym_diff(self.B, self.C)\n nw = Toplevel(root)\n nw.title('B∆C')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn1.bind('', d_1)\n\n def d_2(event):\n d = peretyn(self.B, self.C)\n nw = Toplevel(root)\n nw.title('B⋂C')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn2.bind('', d_2)\n\n def d_3(event):\n d = uni(sym_diff(self.B, self.C), peretyn(self.B, self.C))\n nw = Toplevel(root)\n nw.title('(B∆C) ∪ (B⋂C)')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn3.bind('', d_3)\n\n def d_4(event):\n d = diff(self.A, self.B)\n nw = Toplevel(root)\n nw.title('A\\B')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn4.bind('', d_4)\n\n def d_5(event):\n d = peretyn(diff(self.A, self.B), self.B)\n nw = Toplevel(root)\n nw.title('(A\\B) ⋂ B')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn5.bind('', d_5)\n\n def r1(event):\n t = clock()\n d = mod1.po4_vuraz()\n self.timeD = (clock() - t)\n nw = Toplevel(root)\n nw.title('Результат')\n lab = Label(nw, text = d)\n lab.pack()\n q = open(r'D:\\Novosadenko_Vlad\\DM\\setD', 'w+')\n a = str(d)\n b = a.replace('{', '')\n c = b.replace('}', '')\n q.write(c)\n q.close()\n krok_btn6.bind('', r1)\n\n def d_6(event):\n d = sym_diff(peretyn(diff(self.A, self.B), self.B), self.A)\n nw = Toplevel(root)\n nw.title('((A\\B) ⋂ B) ∆ A')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn8.bind('', d_6)\n #Window 2\n\n def new_win3(self):\n tl = Toplevel(root)\n tl.title('Вікно №3')\n tl.geometry('600x360+100+100')\n\n frm1 = Frame(tl)\n frm1.pack()\n\n labelA = Label(frm1,\n text = 'Множина А',\n fg = '#F94C02',\n font = 'Times 13 bold')\n labelB = Label(frm1,\n text = 'Множина B',\n fg = '#F94C02',\n font = 'Times 13 bold')\n labelC = Label(frm1,\n text = 'Множина C',\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n txt1 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v1scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt1.xview)\n txt1['xscrollcommand'] = v1scrollbar.set\n\n txt2 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v2scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt2.xview)\n txt2['xscrollcommand'] = v2scrollbar.set\n\n txt3 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v3scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt3.xview)\n txt3['xscrollcommand'] = v3scrollbar.set\n\n labelA.grid(row = 0, column = 0)\n labelB.grid(row = 2, column = 0)\n labelC.grid(row = 4, column = 0)\n txt1.grid(row = 0,\n column = 1,\n sticky='nsew')\n v1scrollbar.grid(row=1,\n column=1,\n sticky='ew')\n txt2.grid(row = 2,\n column = 1,\n sticky='nsew')\n v2scrollbar.grid(row=3,\n column=1,\n sticky='ew')\n txt3.grid(row = 4,\n column = 1,\n sticky='nsew')\n v3scrollbar.grid(row=5,\n column=1,\n sticky='ew')\n\n txt1.insert('1.0', self.A)\n txt2.insert('1.0', self.B)\n txt3.insert('1.0', self.C)\n\n frm2 = Frame(tl)\n frm2.pack(fill=X, padx=20, pady=20)\n\n ins_frm1 = Frame(frm2)\n ins_frm1.pack(side = TOP)\n\n ins_lab1 = Label(ins_frm1,\n text = 'Виконання виразу:',\n fg = '#F94C02',\n font = 'Times 18 bold')\n ins_lab2 = Label(ins_frm1,\n text = r'D = (B∪C) ∆ A)',\n fg = '#F94C02',\n font = 18)\n ins_lab3 = Label(ins_frm1,\n text = 'Кроки:',\n fg = '#F94C02',\n font = 'Times 14 bold')\n\n ins_lab1.pack()\n ins_lab2.pack()\n ins_lab3.pack()\n\n ins_frm2 = Frame(frm2)\n ins_frm2.pack(fill=X, padx=10, pady=10)\n\n krok_btn1 = Button(ins_frm2,\n text = 'B∪C',\n bg = '#B4FFE6',\n width = 50)\n krok_btn7 = Button(ins_frm2,\n text = 'Результат спрощ. виразу',\n bg = '#B4FFE6',\n width = 50)\n\n\n krok_btn1.pack()\n krok_btn7.pack()\n\n\n def d_1(event):\n d = uni(self.B, self.C)\n nw = Toplevel(root)\n nw.title('B∪C')\n lab = Label(nw, text = d)\n lab.pack()\n krok_btn1.bind('', d_1)\n\n def d_7(event):\n t = clock()\n d = mod2.sprosch_vuraz()\n self.timeDs = (clock() - t)\n nw = Toplevel(root)\n nw.title('Результат спрощ. виразу')\n lab = Label(nw, text = d)\n lab.pack()\n q = open(r'D:\\Novosadenko_Vlad\\DM\\setDs', 'w+')\n a = str(d)\n b = a.replace('{', '')\n c = b.replace('}', '')\n q.write(c)\n q.close()\n krok_btn7.bind('', d_7)\n #Window 3\n\n def new_win4(self):\n tl = Toplevel(root)\n tl.title('Вікно №4')\n tl.geometry('600x250+100+100')\n\n frm1 = Frame(tl)\n frm1.pack()\n\n labelA = Label(frm1,\n text = 'Множина X (¬В)',\n fg = '#F94C02',\n font = 'Times 13 bold')\n labelB = Label(frm1,\n text = 'Множина Y (A)',\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n txt1 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v1scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt1.xview)\n txt1['xscrollcommand'] = v1scrollbar.set\n\n txt2 = Text(frm1,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v2scrollbar = Scrollbar(frm1,\n orient='hor',\n command=txt2.xview)\n txt2['xscrollcommand'] = v2scrollbar.set\n\n labelA.grid(row = 0, column = 0)\n labelB.grid(row = 2, column = 0)\n\n\n txt1.grid(row = 0,\n column = 1,\n sticky='nsew')\n v1scrollbar.grid(row=1,\n column=1,\n sticky='ew')\n\n txt2.grid(row = 2,\n column = 1,\n sticky='nsew')\n v2scrollbar.grid(row=3,\n column=1,\n sticky='ew')\n\n txt1.insert('1.0', diff(self.U, self.B))\n txt2.insert('1.0', self.A)\n\n frm2 = Frame(tl)\n frm2.pack(fill=X, padx=5, pady=5)\n\n ins_frm1 = Frame(frm2)\n ins_frm1.pack()\n\n ins_lab1 = Label(ins_frm1,\n text = 'Виконання виразу:',\n fg = '#F94C02',\n font = 'Times 18 bold')\n ins_lab2 = Label(ins_frm1,\n text = r'Z = X ⋂ Y',\n fg = '#F94C02',\n font = 18)\n\n ins_lab1.pack()\n ins_lab2.pack()\n\n ins_frm2 = Frame(frm2)\n ins_frm2.pack(fill=X, padx=5, pady=5)\n\n krok_btn1 = Button(ins_frm2,\n text = 'Виконати вираз',\n bg = '#B4FFE6',\n width = 40)\n krok_btn1.pack()\n\n\n def d_6(event):\n t = clock()\n d = peretyn(diff(self.U, self.B), self.A)\n self.timeZ = (clock() - t)\n nw = Toplevel(root)\n nw.title('Результат')\n lab = Label(nw, text = d)\n lab.pack()\n with open(r'D:\\Novosadenko_Vlad\\DM\\setZ', 'w+') as q:\n a = str(d)\n b=a.replace('{', '')\n c=b.replace('}', '')\n q.write(c)\n krok_btn1.bind('', d_6)\n #Window 4\n\n def new_win5(self):\n tl = Toplevel(root)\n tl.title('Вікно №5')\n tl.geometry('800x320+100+100')\n\n lb1 = Label(tl,\n text = 'Поч. вираз D = ',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n lb2 = Label(tl,\n text = 'Спрощ. вираз D = ',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n lb3 = Label(tl,\n text = 'Власна ф-я: Z = ',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n lb4 = Label(tl,\n text = 'Вбуд. ф-я: Z = ',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n lb5 = Label(tl,\n text = 'Чи поч. вираз D = спрощ. вираз D?',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n lb6 = Label(tl,\n text = 'Чи власна ф-я: Z = вбуд. ф-я: Z',\n width = 38,\n fg = '#F94C02',\n font = 'Times 13 bold')\n\n lb1.grid(row = 0, column = 0)\n lb2.grid(row = 2, column = 0)\n lb3.grid(row = 4, column = 0)\n lb4.grid(row = 6, column = 0)\n lb5.grid(row = 8, column = 0)\n lb6.grid(row = 10, column = 0)\n\n txt1 = Text(tl,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v1scrollbar = Scrollbar(tl,\n orient='hor',\n command=txt1.xview)\n txt1['xscrollcommand'] = v1scrollbar.set\n\n txt2 = Text(tl,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v2scrollbar = Scrollbar(tl,\n orient='hor',\n command=txt2.xview)\n txt2['xscrollcommand'] = v2scrollbar.set\n\n txt3 = Text(tl,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v3scrollbar = Scrollbar(tl,\n orient='hor',\n command=txt3.xview)\n txt3['xscrollcommand'] = v3scrollbar.set\n\n txt4 = Text(tl,\n wrap=NONE,\n width = 40,\n height=1,\n bg = '#F4D304')\n v4scrollbar = Scrollbar(tl,\n orient='hor',\n command=txt4.xview)\n txt4['xscrollcommand'] = v4scrollbar.set\n\n txt1.grid(row = 0,\n column = 1,\n sticky='nsew')\n v1scrollbar.grid(row=1,\n column=1,\n sticky='ew')\n\n txt2.grid(row = 2,\n column = 1,\n sticky='nsew')\n v2scrollbar.grid(row=3,\n column=1,\n sticky='ew')\n\n txt3.grid(row = 4,\n column = 1,\n sticky='nsew')\n v3scrollbar.grid(row=5,\n column=1,\n sticky='ew')\n\n txt4.grid(row = 6,\n column = 1,\n sticky='nsew')\n v4scrollbar.grid(row=7,\n column=1,\n sticky='ew')\n\n with open(r'D:\\Novosadenko_Vlad\\DM\\setD', 'r') as Dp:\n poch = Dp.read()\n P = poch.split(', ')\n po = []\n try:\n for l in P:\n l = l.replace('\\n', '')\n l = int(l)\n po.append(l)\n except ValueError:\n pass\n Dpp = set(po)\n txt1.insert('1.0', Dpp)\n\n with open(r'D:\\Novosadenko_Vlad\\DM\\setDs', 'r') as Ds:\n sprosch = Ds.read()\n S = sprosch.split(', ')\n spr = []\n try:\n for i in S:\n i = i.replace('\\n', '')\n i = int(i)\n spr.append(i)\n except ValueError:\n pass\n Dss = set(spr)\n txt2.insert('1.0', Dss)\n\n with open(r'D:\\Novosadenko_Vlad\\DM\\setZ', 'r') as Z:\n mZ = Z.read()\n zzz = mZ.split(', ')\n ZZ = []\n try:\n for k in zzz:\n k = k.replace('\\n', '')\n k = int(k)\n ZZ.append(k)\n except ValueError:\n pass\n mZZZZ = set(ZZ)\n txt3.insert('1.0', mZZZZ)\n\n t = clock()\n qwer = self.U.difference(self.B)\n Vstr = qwer.intersection(self.A)\n self.timeZ_b = (clock() - t)\n txt4.insert('1.0', Vstr)\n\n lab_result1 = Label(tl,\n bg = '#F4D304',\n width = 46)\n lab_result2 = Label(tl,\n bg = '#F4D304',\n width = 46)\n\n lab_result1.grid(row = 8, column = 1)\n lab_result2.grid(row = 10, column = 1)\n\n if Dpp == Dss:\n lab_result1['text'] = 'Так'\n else:\n lab_result1['text'] = 'Ні'\n\n if mZZZZ == Vstr:\n lab_result2['text'] = 'Так'\n else:\n lab_result2['text'] = 'Ні'\n\n timebut1 = Button(tl,\n text = 'Час вик. D',\n bg = '#B4FFE6',\n width = 38)\n timebut1.grid(row = 11, column = 0)\n\n def tbut1 (event):\n tl1 = Toplevel()\n tlab1 = Label(tl1,\n text = (str(self.timeD) + ' sec'),\n bg = '#F4D304')\n tlab1.pack()\n timebut1.bind('', tbut1)\n\n timebut2 = Button(tl,\n text = 'Час вик. D спрощ.',\n bg = '#B4FFE6',\n width = 46)\n timebut2.grid(row = 11, column = 1)\n\n def tbut2 (event):\n tl1 = Toplevel()\n tlab1 = Label(tl1,\n text = (str(self.timeDs) + ' sec'),\n bg = '#F4D304')\n tlab1.pack()\n timebut2.bind('', tbut2)\n\n timebut3 = Button(tl,\n text = 'Час вик. власного алгоритму',\n bg = '#B4FFE6',\n width = 38)\n timebut3.grid(row = 12, column = 0)\n\n def tbut3 (event):\n tl1 = Toplevel()\n tlab1 = Label(tl1,\n text = (str(self.timeZ) + ' sec'),\n bg = '#F4D304')\n tlab1.pack()\n timebut3.bind('', tbut3)\n\n timebut4 = Button(tl,\n text = 'Час вик. вбудованної ф-ї',\n bg = '#B4FFE6',\n width = 46)\n timebut4.grid(row = 12, column = 1)\n\n def tbut4 (event):\n tl1 = Toplevel()\n tlab1 = Label(tl1,\n text = (str(self.timeZ_b) + ' sec'),\n bg = '#F4D304')\n tlab1.pack()\n timebut4.bind('', tbut4)\n\n collation1 = Label(tl, bg = '#F4D304', width = 50)\n collation1.grid(row = 13, columnspan = 2)\n\n if self.timeDs < self.timeD:\n collation1['text'] = 'Поч. вираз виконується повільніше за спрощ.'\n else:\n collation1['text'] = 'Спрощ. вираз виконується повільніше за поч.'\n\n collation2 = Label(tl, bg = '#F4D304', width = 50)\n collation2.grid(row = 14, columnspan = 2)\n\n if self.timeZ_b < self.timeZ:\n collation2['text'] = 'Вл. алгоритм повільніший за вбуд. ф-ї'\n else:\n collation2['text'] = 'Вл. алгоритм швидший за вбуд. ф-ї'\n #Window 5\n\nM = Main_win(root)\n\nroot.mainloop()\n\n\n","sub_path":"Lab_1.py","file_name":"Lab_1.py","file_ext":"py","file_size_in_byte":39565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"238811718","text":"# extract readme data from bigquery:\n\nimport time\nfrom collections import defaultdict\nfrom google.cloud import bigquery\nimport os\nfrom pypidesckeywords.manifestload import LocalLoadProcess\n\n\n\nclass ExtractReadme:\n \"\"\" get readme from bigquery for pypi packages \"\"\"\n def __init__(self):\n self.repo_orgrepo = {}\n self.all_orgrepo_readme = {}\n self.repo_orgrepo_readme = {}\n self.elapsedtime_f1 = 0\n self.elapsedtime_f2 = 0\n self.stats = defaultdict(int)\n\n def get_all_readme(self):\n\n \"\"\" get all org-repo and readme from bigquery \"\"\"\n\n starttime = time.time()\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/antrived/Dropbox/Redhat_Work/Docs/4-Important-Credentials/Bigquery-ServiceAccount1.json'\n client = bigquery.Client()\n query = (\n \"\"\"\n # SELECT files.repo_name, files.path, con.id, con.content # files.path gives file name info\n SELECT files.repo_name, con.content\n FROM `bigquery-public-data.github_repos.contents` AS con\n INNER JOIN `bigquery-public-data.github_repos.files` AS files\n ON files.id = con.id\n WHERE files.path LIKE \"README%\"\n # LIMIT 10\n \"\"\"\n )\n # count:'3138803'\n query_job = client.query(query) # API request\n rows = query_job.result() # Waits for query to finish\n\n for row in rows:\n self.all_orgrepo_readme[row[0]] = row[1]\n\n self.elapsedtime_f1 = time.time() - starttime\n\n def readme_for_pypi(self, repo_org_names):\n\n \"\"\" get readme for pypi org-repo \"\"\"\n\n starttime = time.time()\n\n for key in repo_org_names.keys():\n self.repo_orgrepo[key] = repo_org_names[key]+\"/\"+key\n\n print(\"-----------------------------------------------------\")\n # print(self.repo_orgrepo)\n print(\"-----------------------------------------------------\")\n # print(self.all_orgrepo_readme)\n\n # merge repo_orgrepo with all_orgrepo_readme\n d1 = self.repo_orgrepo\n d2 = self.all_orgrepo_readme\n temp = dict([(d1v, \"\") for (d1k, d1v) in d1.items()])\n temp.update(d2)\n d3 = dict([(d1k, [d1v, temp[d1v]]) for (d1k, d1v) in d1.items()])\n\n self.repo_orgrepo_readme = d3\n\n print(\"-----------------------------------------------------\")\n # print(self.repo_orgrepo_readme)\n\n self.elapsedtime_f2 = time.time() - starttime\n\n def generate_stats(self):\n\n \"\"\" count packages with readme\n saves runtime of above 2 functions \"\"\"\n\n total_count = 0\n readme_count = 0\n for key in self.repo_orgrepo_readme.keys():\n total_count += 1\n if self.repo_orgrepo_readme[key][1] != \"\":\n readme_count += 1\n\n self.stats['total-package-count'] = total_count\n self.stats['packages-with-readme-count'] = readme_count\n self.stats['get_all_readme-fn-hours'] = self.elapsedtime_f1/60/60\n self.stats['readme_for_pypi-fn-hours'] = self.elapsedtime_f2/60/60\n\n\ndef main():\n load1 = LocalLoadProcess()\n repo_org_names = load1.JsonUploader(localpath=\"/home/antrived/Dump/SearchEngineData/PypiData/PypiDataOrgRepo\", filename=\"repo_org_pairs.json\")\n\n getreadme = ExtractReadme()\n getreadme.get_all_readme()\n getreadme.readme_for_pypi(repo_org_names)\n getreadme.generate_stats()\n\n load1.JsonSaver(dictfile=getreadme.repo_orgrepo_readme,\n localpath=\"/home/antrived/Dump/SearchEngineData/GithubReadmeData/bigquery\",\n filename=\"repo_orgrepo_readme.json\")\n load1.JsonSaver(dictfile=getreadme.stats,\n localpath=\"/home/antrived/Dump/SearchEngineData/GithubReadmeData/bigquery\",\n filename=\"stats.json\")\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"PypiProject/readmefetch/Random9.py","file_name":"Random9.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288909643","text":"import pygame\nimport random\nfrom pygame.locals import *\nimport colores\nimport objetos\nimport lugares\nimport disparo\n\n# variables globales\nvel_y = 2\nvelocidad = 0\nvidas = 1\npuntos = 0\ndelay_y = 2\nenemy_box = 96\nn_enemigos = 5\nmover = False\nchoco = False\ngame = False\nintro = False\nscreen_height = 480\nwidth, height = 800, 600\nterminar = False\nhelice = False\n# inicializacion\npygame.init()\nwin = pygame.display.set_mode((width, height))\n\n# objetos\ncol = colores.col\nbase = lugares.Lugar(win, width, 8, -100)\navion = objetos.Obj(win, 370, 420, 49, 42, 0, 0, 0)\nenemigos = [n_enemigos]\ndata_enemigos = [n_enemigos]\ntiro = disparo.Disparo(win, 1, 0, 1)\n\nfor i in range(n_enemigos):\n enemigos.append(i)\n enemigos[i] = objetos.Obj(win, 0, 0, 0, 0, 0, 0, 0)\n data_enemigos.append(i)\n\n\ndef reiniciar():\n global puntos, vidas, mover, choco, data_enemigos\n\n vidas = 3\n puntos = 0\n base.y = -100\n mover = False\n avion.out = True\n\n for i in range(n_enemigos):\n data_enemigos[i] = [\n 100, i * enemy_box - screen_height, 42, 30, 6, 1, 0\n ]\n data_enemigos[4] = [350, -96, 81, 24, 8, 0]\n data_enemigos[3] = [450, -192, 42, 30, 6, 0]\n data_enemigos[1] = [500, -384, 37, 72, 11, 0]\n\n\n# funcion para verificar si la persona quiere terminar el juego\ndef terminar_juego():\n global game, intro, vidas\n\n if base.y < 238 and not game:\n intro = True\n base.y += 1\n for i in range(n_enemigos):\n enemigos[i].y += 1\n\n if base.y == -100 and avion.out:\n leer_pos()\n\n if base.y < 238 and game and intro:\n base.y += 1\n for i in range(n_enemigos):\n if enemigos[i].y > base.y + 4:\n enemigos[i].out = True\n enemigos[i].y += 1\n\n if avion.out and not avion.t_expl and game and vidas > 0 and not intro:\n intro = True\n avion.x = 370\n avion.ty = 0\n base.y = -100\n vidas -= 1\n\n if base.y == 238 and game and intro:\n avion.out = False\n\n for e in pygame.event.get():\n if e.type == QUIT or e.type == KEYDOWN and e.key == K_ESCAPE:\n return True\n if (e.type == KEYDOWN) and e.key == K_s:\n reiniciar()\n game = True\n if base.y == 238 and game and intro and (e.type == KEYDOWN):\n intro = False\n\n return False\n\n\ndef leer_pos():\n global game, data_enemigos\n if vidas < 0:\n game = False\n\n for i in range(n_enemigos):\n enemigos[i].x = data_enemigos[i][0]\n enemigos[i].y = data_enemigos[i][1]\n enemigos[i].w = data_enemigos[i][2]\n enemigos[i].h = data_enemigos[i][3]\n enemigos[i].ty = data_enemigos[i][4]\n enemigos[i].out = data_enemigos[i][5]\n\n\n# funcion para determinar si hubo choque con algun color particular\ndef hit_color_test(obj, col):\n col = hex(col)\n col = col.lstrip('0x')\n col = tuple(\n int(col[i:i + int(6 / 3)], 16) for i in range(0, 6, int(6 / 3)))\n\n if obj.x >= 0 and obj.x + obj.w <= width and obj.y >= 0 and obj.y + obj.h <= height:\n for i in range(int(obj.w)):\n for j in range(int(obj.h)):\n if (not i and\n (not j\n or j == int(obj.h) - 1)) or not j and i == int(obj.w) - 1:\n if win.get_at((int(obj.x + i), int(obj.y + j))) == col:\n return True\n return False\n\n\ndef colisionan(a, b):\n return a.x + a.w > b.x and a.x < b.x + b.w and a.y + a.h > b.y and a.y < b.y + b.h\n\n\ndef hit_test():\n global choco, mover\n\n if (hit_color_test(avion, col[2]) or choco) and not avion.out:\n mover = False\n choco = False\n avion.out = True\n avion.t_expl = 80\n\n for i in range(n_enemigos):\n if enemigos[i].ty == 5 or enemigos[i].ty == 6 or enemigos[\n i].ty == 8 or enemigos[i].ty == 9:\n enemigos[i].dir = -1\n else:\n enemigos[i].dir = 1\n\n hit = enemigos[i].w\n enemigos[i].w = hit / 2\n if hit_color_test(enemigos[i], col[2]):\n if enemigos[i].ty == 5 or enemigos[i].ty == 6:\n enemigos[i].ty = 4\n enemigos[i].x += 2\n if enemigos[i].ty == 8:\n enemigos[i].x += 2\n enemigos[i].ty = 7\n\n enemigos[i].x += hit / 2\n if hit_color_test(enemigos[i], col[2]):\n if enemigos[i].ty == 4 or enemigos[i].ty == 3:\n enemigos[i].x -= 2\n enemigos[i].ty = 6\n if enemigos[i].ty == 7:\n enemigos[i].x -= 2\n enemigos[i].ty = 8\n enemigos[i].x -= hit / 2\n enemigos[i].w = hit\n\n if colisionan(tiro,\n enemigos[i]) and not enemigos[i].out and tiro.y >= 0:\n enemigos[i].t_expl = 40\n enemigos[i].out = True\n tiro.y = -tiro.h\n\n if colisionan(\n avion,\n enemigos[i]) and enemigos[i].ty < 11 and not enemigos[i].out:\n enemigos[i].t_expl = 40\n enemigos[i].out = True\n choco = True\n\n\ndef check_enemigos():\n global helice\n\n helice = not helice\n\n hit_test()\n\n for i in range(n_enemigos):\n if helice and enemigos[i].ty == 3 or enemigos[i].ty == 5:\n enemigos[i].ty += 1\n elif enemigos[i].ty == 4 or enemigos[i].ty == 6:\n enemigos[i].ty -= 1\n\n if game and not intro:\n enemigos[i].y += mover * vel_y\n\n if 2 < enemigos[i].ty < 9 and enemigos[i].y > 200 and not enemigos[\n i].out:\n enemigos[i].x += enemigos[i].dir\n\n if enemigos[i].ty == 10 or enemigos[i].ty == 9:\n if enemigos[i].x > width and enemigos[i].ty == 10:\n enemigos[i].x = 0\n if enemigos[i].x < 0 and enemigos[i].ty == 9:\n enemigos[i].x = width\n if not enemigos[i].out and not avion.out:\n enemigos[i].x += enemigos[i].dir\n\n if enemigos[i].y == screen_height - enemy_box / 3:\n enemigos[i].y = 0\n if base.y < enemigos[i].y < base.y + 400:\n enemigos[i].out = True\n else:\n enemigos[i].out = False\n\n tipos_enemigos = [4, 6, 7, 8, 9, 10, 11]\n rnd = random.randint(0, 6)\n enemigos[i].ty = tipos_enemigos[rnd]\n if rnd == 0 or rnd == 1:\n enemigos[i].w = 42\n enemigos[i].h = 30\n elif rnd == 2 or rnd == 3:\n enemigos[i].w = 81\n enemigos[i].h = 24\n elif rnd == 4 or rnd == 5:\n enemigos[i].w = 48\n enemigos[i].h = 18\n elif rnd == 6:\n enemigos[i].w = 37\n enemigos[i].h = 72\n\n pos = True\n while pos:\n enemigos[i].x = random.randint(0, 8) * 84 + 23\n pos = hit_color_test(enemigos[i], col[2])\n\n enemigos[i].y = -enemy_box / 3\n\n enemigos[i].mostrar()\n\n\ndef pintar():\n pygame.display.update()\n # agua\n win.fill(col[3])\n\n # verificando los controles\n if not avion.out and not intro:\n control()\n\n tiro.show(avion.x + avion.w / 2, avion.y + avion.h / 2)\n\n # fondo\n win.fill(col[2], rect=[0, 0, 20, height])\n win.fill(col[2], rect=[width - 20, 0, 20, height])\n\n if -screen_height < base.y < screen_height:\n base.show()\n\n # mover a base\n base.y += mover * vel_y\n\n check_enemigos()\n avion.mostrar()\n\n # panel\n win.fill(col[7], rect=[0, screen_height, width, 130])\n win.fill(col[14], rect=[0, height - 117, width, 112])\n\n # medidor\n pygame.draw.rect(win, col[7], [320, 515, 204, 44], 4)\n pygame.draw.rect(win, col[7], [335, 515, 11, 13])\n pygame.draw.rect(win, col[7], [422, 515, 5, 13])\n pygame.draw.rect(win, col[7], [500, 515, 11, 13])\n\n\ndef control():\n global delay_y, velocidad, mover\n\n if game and not intro:\n velocidad += 1\n if velocidad > delay_y:\n mover = True\n velocidad = 0\n else:\n mover = False\n\n # movimiento del avion\n avion.ty = 0\n key = pygame.key.get_pressed()\n if key[K_LEFT] and avion.x > 10:\n avion.x -= 1\n avion.ty = 2\n if key[K_RIGHT] and avion.x < 734:\n avion.x += 1\n avion.ty = 1\n\n if key[K_SPACE]:\n tiro.shoting = True\n\n\nreiniciar()\n\nwhile not terminar:\n pintar()\n terminar = terminar_juego()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"424272244","text":"# --------------------------------------------------------\r\n# PYTHON PROGRAM\r\n# Here is where we are going to define our set of...\r\n# - Imports\r\n# - Global Variables\r\n# - Functions\r\n# ...to achieve the functionality required.\r\n# When executing > python 'this_file'.py in a terminal,\r\n# the Python interpreter will load our program,\r\n# but it will execute nothing yet.\r\n# --------------------------------------------------------\r\n\r\nimport pyspark\r\nimport pyspark.streaming\r\n\r\nimport os\r\nimport shutil\r\nimport time\r\n\r\n# ------------------------------------------\r\n# FUNCTION process_line\r\n# ------------------------------------------\r\ndef process_line(line):\r\n # 1. We create the output variable\r\n res = ()\r\n\r\n # 2. We remove the end of line character\r\n line = line.replace(\"\\n\", \"\")\r\n\r\n # 3. We split the line by tabulator characters\r\n params = line.split(\";\")\r\n\r\n # 4. We assign res\r\n if (len(params) == 7):\r\n res = tuple(params)\r\n\r\n # 5. We return res\r\n return res\r\n\r\ndef my_state_update(events_list, current_state):\r\n # 1. We create the output variable\r\n res = None\r\n\r\n # 2. If this is the first time we find the key, we initialise it\r\n if current_state is None:\r\n current_state = 0\r\n\r\n # 3. We update the state\r\n res = sum(events_list) + current_state\r\n\r\n # 4. We return res\r\n return res\r\n\r\n# ------------------------------------------\r\n# FUNCTION my_model\r\n# ------------------------------------------\r\ndef my_model(ssc, monitoring_dir, result_dir, time_step_interval, window_duration, sliding_duration):\r\n inputDStream = ssc.textFileStream(monitoring_dir)\r\n\r\n windowDStream = inputDStream.window(window_duration * time_step_interval, sliding_duration * time_step_interval)\r\n\r\n allDStream = windowDStream.map(process_line)\r\n\r\n filterDStream = allDStream.filter(lambda my_tuple: (int(my_tuple[0]) == 0) and (int(my_tuple[5]) == 0))\r\n\r\n xDStream = filterDStream.map(lambda x : (x[1], 1))\r\n\r\n solutionDStream = xDStream.updateStateByKey(my_state_update)\r\n sortedDStream = solutionDStream.transform(lambda rdd: rdd.sortBy(lambda x: x[1], ascending=False))\r\n sortedDStream.cache()\r\n sortedDStream.pprint()\r\n\r\n# ------------------------------------------\r\n# FUNCTION create_ssc\r\n# ------------------------------------------\r\ndef create_ssc(sc, monitoring_dir, result_dir, max_micro_batches, time_step_interval, window_duration, sliding_duration):\r\n # 1. We create the new Spark Streaming context.\r\n # This is the main entry point for streaming functionality. It requires two parameters:\r\n # (*) The underlying SparkContext that it will use to process the data.\r\n # (**) A batch interval, specifying how often it will check for the arrival of new data,\r\n # so as to process it.\r\n ssc = pyspark.streaming.StreamingContext(sc, time_step_interval)\r\n\r\n # 2. We configure the maximum amount of time the data is retained.\r\n # Think of it: If you have a SparkStreaming operating 24/7, the amount of data it is processing will\r\n # only grow. This is simply unaffordable!\r\n # Thus, this parameter sets maximum time duration past arrived data is still retained for:\r\n # Either being processed for first time.\r\n # Being processed again, for aggregation with new data.\r\n # After the timeout, the data is just released for garbage collection.\r\n\r\n # We set this to the maximum amount of micro-batches we allow before considering data\r\n # old and dumping it times the time_step_interval (in which each of these micro-batches will arrive).\r\n ssc.remember(max_micro_batches * time_step_interval)\r\n\r\n # 3. We model the ssc.\r\n # This is the main function of the Spark application:\r\n # On it we specify what do we want the SparkStreaming context to do once it receives data\r\n # (i.e., the full set of transformations and ouptut operations we want it to perform).\r\n my_model(ssc, monitoring_dir, result_dir, time_step_interval, window_duration, sliding_duration)\r\n\r\n # 4. We return the ssc configured and modelled.\r\n return ssc\r\n\r\n\r\n# ------------------------------------------\r\n# FUNCTION get_source_dir_file_names\r\n# ------------------------------------------\r\ndef get_source_dir_file_names(local_False_databricks_True, source_dir, valid_files, verbose):\r\n # 1. We create the output variable\r\n res = []\r\n\r\n # 2. We get the FileInfo representation of the files of source_dir\r\n fileInfo_objects = []\r\n if local_False_databricks_True == False:\r\n fileInfo_objects = os.listdir(source_dir)\r\n else:\r\n fileInfo_objects = dbutils.fs.ls(source_dir)\r\n\r\n # 3. We traverse the fileInfo objects, to get the name of each file\r\n for item in fileInfo_objects:\r\n # 3.1. We get a string representation of the fileInfo\r\n file_name = str(item)\r\n\r\n # 3.2. If the file is processed in DBFS\r\n if local_False_databricks_True == True:\r\n # 3.2.1. We look for the pattern name= to remove all useless info from the start\r\n lb_index = file_name.index(\"name='\")\r\n file_name = file_name[(lb_index + 6):]\r\n\r\n # 3.2.2. We look for the pattern ') to remove all useless info from the end\r\n ub_index = file_name.index(\"',\")\r\n file_name = file_name[:ub_index]\r\n\r\n # 3.3. If file_name is a valid_file then we append the name to the list\r\n if (file_name in valid_files):\r\n res.append(file_name)\r\n if verbose == True:\r\n print(file_name)\r\n\r\n # 4. We sort the list in alphabetic order\r\n res.sort()\r\n\r\n # 5. We return res\r\n return res\r\n\r\n\r\n# ------------------------------------------\r\n# FUNCTION streaming_simulation\r\n# ------------------------------------------\r\ndef streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, valid_files, verbose):\r\n # 1. We get the names of the files on source_dir\r\n files = get_source_dir_file_names(local_False_databricks_True, source_dir, valid_files, verbose)\r\n\r\n # 2. We get the starting time of the process\r\n time.sleep(time_step_interval * 0.1)\r\n\r\n start = time.time()\r\n\r\n # 2.1. If verbose mode, we inform of the starting time\r\n if (verbose == True):\r\n print(\"Start time = \" + str(start))\r\n\r\n # 3. We set a counter in the amount of files being transferred\r\n count = 0\r\n\r\n # 4. We simulate the dynamic arriving of such these files from source_dir to dataset_dir\r\n # (i.e, the files are moved one by one for each time period, simulating their generation).\r\n for file in files:\r\n # 4.1. We copy the file from source_dir to dataset_dir#\r\n if local_False_databricks_True == False:\r\n shutil.copyfile(source_dir + file, monitoring_dir + file)\r\n else:\r\n dbutils.fs.cp(source_dir + file, monitoring_dir + file)\r\n\r\n # 4.2. We increase the counter, as we have transferred a new file\r\n count = count + 1\r\n\r\n # 4.3. If verbose mode, we inform from such transferrence and the current time.\r\n if (verbose == True):\r\n print(\"File \" + str(count) + \" transferred. Time since start = \" + str(time.time() - start))\r\n\r\n # 4.4. We wait the desired transfer_interval until next time slot.\r\n time.sleep((start + (count * time_step_interval)) - time.time())\r\n\r\n\r\n# ------------------------------------------\r\n# FUNCTION my_main\r\n# ------------------------------------------\r\ndef my_main(sc,\r\n local_False_databricks_True,\r\n source_dir,\r\n monitoring_dir,\r\n checkpoint_dir,\r\n result_dir,\r\n max_micro_batches,\r\n time_step_interval,\r\n verbose,\r\n window_duration,\r\n sliding_duration,\r\n race_conditions_extra_delay,\r\n valid_files\r\n ):\r\n # 1. We setup the Spark Streaming context\r\n # This sets up the computation that will be done when the system receives data.\r\n ssc = pyspark.streaming.StreamingContext.getActiveOrCreate(checkpoint_dir,\r\n lambda: create_ssc(sc,\r\n monitoring_dir,\r\n result_dir,\r\n max_micro_batches,\r\n time_step_interval,\r\n window_duration,\r\n sliding_duration\r\n )\r\n )\r\n\r\n # 2. We start the Spark Streaming Context in the background to start receiving data.\r\n # Spark Streaming will start scheduling Spark jobs in a separate thread.\r\n\r\n # Very important: Please note a Streaming context can be started only once.\r\n # Moreover, it must be started only once we have fully specified what do we want it to do\r\n # when it receives data (i.e., the full set of transformations and ouptut operations we want it\r\n # to perform).\r\n ssc.start()\r\n\r\n # 3. As the jobs are done in a separate thread, to keep our application (this thread) from exiting,\r\n # we need to call awaitTermination to wait for the streaming computation to finish.\r\n ssc.awaitTerminationOrTimeout(time_step_interval)\r\n\r\n # 4. Super interesting topic: RACE CONDITIONS\r\n if (race_conditions_extra_delay == True):\r\n time.sleep((sliding_duration - 1) * time_step_interval)\r\n\r\n # 5. We simulate the streaming arrival of files (i.e., one by one) from source_dir to monitoring_dir.\r\n streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, valid_files, verbose)\r\n\r\n # 6. Once we have transferred all files and processed them, we are done.\r\n # Thus, we stop the Spark Streaming Context\r\n ssc.stop(stopSparkContext=False)\r\n\r\n # 7. Extra security stop command: It acts directly over the Java Virtual Machine,\r\n # in case the Spark Streaming context was not fully stopped.\r\n\r\n # This is crucial to avoid a Spark application working on the background.\r\n # For example, Databricks, on its private version, charges per cluster nodes (virtual machines)\r\n # and hours of computation. If we, unintentionally, leave a Spark application working, we can\r\n # end up with an unexpected high bill.\r\n if (not sc._jvm.StreamingContext.getActive().isEmpty()):\r\n sc._jvm.StreamingContext.getActive().get().stop(False)\r\n\r\n\r\n# ---------------------------------------------------------------\r\n# PYTHON EXECUTION\r\n# This is the main entry point to the execution of our program.\r\n# It provides a call to the 'main function' defined in our\r\n# Python program, making the Python interpreter to trigger\r\n# its execution.\r\n# ---------------------------------------------------------------\r\nif __name__ == '__main__':\r\n # 1. Extra input arguments\r\n\r\n # 1.1. We select the subset of files of the dataset we want to simulate by streaming\r\n # In this case we are interested in the first week of May (1st-7th).\r\n # Unfortunately, we don't have data for May 6th, so we can exclude it.\r\n valid_files = [\"bikeMon_20170501.csv\",\r\n \"bikeMon_20170502.csv\",\r\n \"bikeMon_20170503.csv\",\r\n \"bikeMon_20170504.csv\",\r\n \"bikeMon_20170505.csv\",\r\n \"bikeMon_20170507.csv\"\r\n ]\r\n\r\n # 2. Local or Databricks\r\n local_False_databricks_True = False\r\n\r\n # 3. We set the path to my_dataset and my_result\r\n my_local_path = \"../\"\r\n my_databricks_path = \"/FileStore/tables/A02/\"\r\n\r\n source_dir = \"my_dataset/\"\r\n monitoring_dir = \"my_monitoring/\"\r\n checkpoint_dir = \"my_checkpoint/\"\r\n result_dir = \"my_result/\"\r\n\r\n if local_False_databricks_True == False:\r\n source_dir = my_local_path + source_dir\r\n monitoring_dir = my_local_path + monitoring_dir\r\n checkpoint_dir = my_local_path + checkpoint_dir\r\n result_dir = my_local_path + result_dir\r\n else:\r\n source_dir = my_databricks_path + source_dir\r\n monitoring_dir = my_databricks_path + monitoring_dir\r\n checkpoint_dir = my_databricks_path + checkpoint_dir\r\n result_dir = my_databricks_path + result_dir\r\n\r\n # 4. We set the Spark Streaming parameters\r\n\r\n # 4.1. We specify the number of micro-batches (i.e., files) of our dataset.\r\n dataset_micro_batches = 6\r\n\r\n # 4.2. We specify the time interval each of our micro-batches (files) appear for its processing.\r\n time_step_interval = 10\r\n\r\n # 4.3. We specify the maximum amount of micro-batches that we want to allow before considering data\r\n # old and dumping it.\r\n max_micro_batches = dataset_micro_batches + 1\r\n\r\n # 4.4. We configure verbosity during the program run\r\n verbose = False\r\n\r\n # 4.5. window_duration, i.e., how many previous batches of data are considered on each window.\r\n window_duration = 2\r\n\r\n # 4.6. sliding duration, i.e., how frequently the new DStream computes results.\r\n sliding_duration = 1\r\n\r\n # 4.7. RACE Conditions: Discussed above. Basically, in which moment of the sliding_window do I want to start.\r\n # This performs an extra delay at the start of the file transferred to sync SparkContext with file transferrence.\r\n race_conditions_extra_delay = True\r\n\r\n # 5. We remove the directories\r\n if local_False_databricks_True == False:\r\n # 5.1. We remove the monitoring_dir\r\n if os.path.exists(monitoring_dir):\r\n shutil.rmtree(monitoring_dir)\r\n\r\n # 5.2. We remove the result_dir\r\n if os.path.exists(result_dir):\r\n shutil.rmtree(result_dir)\r\n\r\n # 5.3. We remove the checkpoint_dir\r\n if os.path.exists(checkpoint_dir):\r\n shutil.rmtree(checkpoint_dir)\r\n else:\r\n # 5.1. We remove the monitoring_dir\r\n dbutils.fs.rm(monitoring_dir, True)\r\n\r\n # 5.2. We remove the result_dir\r\n dbutils.fs.rm(result_dir, True)\r\n\r\n # 5.3. We remove the checkpoint_dir\r\n dbutils.fs.rm(checkpoint_dir, True)\r\n\r\n # 6. We re-create the directories again\r\n if local_False_databricks_True == False:\r\n # 6.1. We re-create the monitoring_dir\r\n os.mkdir(monitoring_dir)\r\n\r\n # 6.2. We re-create the result_dir\r\n os.mkdir(result_dir)\r\n\r\n # 6.3. We re-create the checkpoint_dir\r\n os.mkdir(checkpoint_dir)\r\n else:\r\n # 6.1. We re-create the monitoring_dir\r\n dbutils.fs.mkdirs(monitoring_dir)\r\n\r\n # 6.2. We re-create the result_dir\r\n dbutils.fs.mkdirs(result_dir)\r\n\r\n # 6.3. We re-create the checkpoint_dir\r\n dbutils.fs.mkdirs(checkpoint_dir)\r\n\r\n # 7. We configure the Spark Context\r\n sc = pyspark.SparkContext.getOrCreate()\r\n sc.setLogLevel('WARN')\r\n print(\"\\n\\n\\n\")\r\n\r\n # 8. We call to our main function\r\n my_main(sc,\r\n local_False_databricks_True,\r\n source_dir,\r\n monitoring_dir,\r\n checkpoint_dir,\r\n result_dir,\r\n max_micro_batches,\r\n time_step_interval,\r\n verbose,\r\n window_duration,\r\n sliding_duration,\r\n race_conditions_extra_delay,\r\n valid_files\r\n )\r\n","sub_path":"my_python_spark/A02_Hint4.py","file_name":"A02_Hint4.py","file_ext":"py","file_size_in_byte":15665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92018660","text":"import os\n\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchvision\nfrom torchvision import transforms\n\nclass EmbeddingsExtractor:\n \"\"\"\n A class to extract embeddings from RGB images of dogs.\n \n Attributes\n ----------\n device : torch.device\n object for applying inputs, outputs and models to GPU or CPU\n model : torch.nn\n model for making the dog embeddings prediction\n transform : torch.transforms\n input preprocessing pipeline\n \n Methods\n -------\n embedder_model(n_embeddings)\n Generates a CNN ResNet50-based embedder\n \n get_embeddings_batch(img)\n Predicts the embeddings from a batch of dog images..\n \n get_embeddings(img)\n Predicts the embeddings of a dog image.\n \n distance_two_embeddings(embeddings_a, embeddings_b)\n Computes the difference between two embeddings.\n \"\"\"\n \n def __init__(self, model_ckpt_path):\n '''\n Constructs all the attributes for the embedder object.\n \n Parameters\n ----------\n model_ckpt_path : str\n path to the file containing the result of the trained model\n '''\n \n # Initialize device (GPU or CPU)\n self.device = torch.device(\n 'cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # Load checkpoint of the trained model (`model_ckpt`)\n model_ckpt = torch.load(model_ckpt_path, map_location=self.device)\n \n # Get the number of embeddings from the trained model\n n_embeddings = model_ckpt['n_embeddings']\n \n # Get the trained model weights from the checkpoint\n state_dict = model_ckpt['state_dict']\n \n # Initialize the model architecture (`model`)\n self.model = self.embedder_model(n_embeddings)\n \n # Load the weights into the model\n self.model.load_state_dict(state_dict)\n self.model.eval()\n self.model = torch.jit.script(self.model).to(self.device)\n \n # Do first predict, which is always the slowest\n self.model(torch.rand(1, 3, 224, 224))\n \n # Initialize preprocessing input pipeline\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n def embedder_model(self, n_embeddings):\n '''\n Generates a CNN ResNet50-based embedder.\n \n Parameters\n ----------\n n_embeddings\n number of embeddings to be outputted\n \n Returns\n -------\n x : torch.nn\n the model\n '''\n \n # First, `x` is a new ResNet50 CNN model\n x = torchvision.models.resnet50(pretrained=False)\n \n # Change the final fully connected layer so that the output size\n # matches the desired `n_embeddings` size. Also, apply sigmoid\n # function\n x.fc = torch.nn.Sequential(\n torch.nn.Linear(2048, n_embeddings),\n torch.nn.Sigmoid())\n \n return x\n \n def get_embeddings_batch(self, imgs):\n '''\n Predicts the embeddings from a batch of dog images.\n \n Parameters\n ----------\n imgs : list\n batch of dog images\n \n Returns\n -------\n y : np.array\n embeddings from each dog image\n '''\n \n # Convert each dog image (`img`) to red-green-blue channels (RGB),\n # ensuring the input will have 3 channels\n imgs = [img.convert('RGB') for img in imgs]\n \n # Apply preprocessing pipeline to each input (`xs`) \n xs = [self.transform(img) for img in imgs]\n \n # Concatenate the list of preprocessed input tensors to a single one\n x = torch.stack(xs)\n\n # Pass the input tensor to the used device (GPU or CPU)\n x = x.to(self.device)\n \n # Calculate the list of embeddings (`y`) from the input `x` according\n # to `model`\n y = self.model(x)\n \n # Convert from torch.Tensor to np.array\n # y = y.detach().numpy()\n y = y.detach().cpu().numpy()\n \n return y\n \n def get_embeddings(self, img):\n '''\n Predicts the embeddings of a dog image.\n \n Parameters\n ----------\n img : PIL.Image\n image containing a dog\n\n Returns\n -------\n y : np.array\n array of embeddings from the input image\n '''\n \n # Read image (`img`) and convert to red-green-blue channels (RGB),\n # ensuring the input will have 3 channels\n img = img.convert('RGB')\n \n # `x` refers to the image when the preprocessing pipeline\n # (`self.transform`) is applied to the image (`img`)\n x = self.transform(img)\n \n # Add a new dimension to the tensor (simulate a 1-batch size)\n x.unsqueeze_(0)\n \n # Pass the input tensor to the used device (GPU or CPU)\n x = x.to(self.device)\n \n # Calculate the embeddings (`y`) from the input `x` according to `model`\n y = self.model(x)\n \n # Remove extra dimension 0\n y.squeeze_(0)\n \n # Convert from torch.Tensor to np.array\n # y = y.detach().numpy()\n y = y.detach().cpu().numpy()\n \n return y\n \n def distance_two_embeddings(self, embeddings_a, embeddings_b):\n '''\n Computes the difference between two embeddings.\n \n Parameters\n ----------\n embeddings_a : np.array\n embeddings from the first image\n embeddings_b : np.array\n embeddings from the second image\n\n Returns\n -------\n distance : float\n the distance between the two embeddings\n '''\n \n # `distance` computes the euclidean distance between the embeddings\n distance = np.linalg.norm(embeddings_a - embeddings_b)\n \n return distance\n \nif __name__ == '__main__':\n model_ckpt_path = os.path.join('..', 'models', 'embedder.pth')\n embeddings_extractor = EmbeddingsExtractor(model_ckpt_path)\n '''\n img1_path = os.path.join('..', 'dogs', 'train',\n 'n02085620-Chihuahua', 'n02085620_199.jpg')\n img2_path = os.path.join('..', 'dogs', 'train',\n 'n02085620-Chihuahua', 'n02085620_242.jpg')\n img3_path = os.path.join('..', 'dogs', 'train',\n 'n02085936-Maltese_dog', 'n02085936_352.jpg')\n '''\n '''\n img1_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02090379-redbone', 'n02090379_91.jpg')\n img2_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02090379-redbone', 'n02090379_223.jpg')\n img3_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02087394-Rhodesian_ridgeback', 'n02087394_889.jpg')\n '''\n \n img1_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02105056-groenendael', 'n02105056_143.jpg')\n img2_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02105056-groenendael', 'n02105056_1960.jpg')\n img3_path = os.path.join('..', 'dogs', 'recognition', 'enroll',\n 'n02099429-curly-coated_retriever', 'n02099429_227.jpg')\n \n \n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n img3 = Image.open(img3_path)\n \n e1 = embeddings_extractor.get_embeddings(img1)\n e2 = embeddings_extractor.get_embeddings(img2)\n e3 = embeddings_extractor.get_embeddings(img3)\n \n print(embeddings_extractor.distance_two_embeddings(e1, e2))\n print(embeddings_extractor.distance_two_embeddings(e1, e3))\n print(embeddings_extractor.distance_two_embeddings(e2, e3))\n","sub_path":"src/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593290882","text":"import getopt as getOpt\nimport sys\n\nimport MongoConstants as Constants\nimport MongoReader as Reader\nimport MongoSaver as Saver\n\n\ndef main():\n \"\"\"Reads command line options\"\"\"\n\n # Default values\n zipcode = ''\n state = ''\n city = ''\n selected = 'zip'\n file_to_save = Constants.FILE_NAME\n save = False\n read = False\n\n # parse command line options\n try:\n opts, args = getOpt.getopt(sys.argv[1:], \"vhz:s:c:\", [\"help\", \"verbose\", \"zip=\", \"state=\", \"city=\",\n \"select=\", \"save=\", \"save\", \"clear\"])\n except getOpt.GetoptError as err:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n # Only call with options\n if len(opts) == 0:\n usage()\n sys.exit(2)\n\n # process options\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit(0)\n elif o in (\"-v\", \"--verbose\"):\n Constants.VERBOSE = True\n elif o in (\"-z\", \"--zip\"):\n zipcode = a\n read = True\n elif o in (\"-s\", \"--state\"):\n state = a\n read = True\n elif o in (\"-c\", \"--city\"):\n city = a\n read = True\n elif o in \"--select\":\n selected = a\n read = True\n elif o in \"--save\":\n if a != '':\n file_to_save = a\n save = True\n elif o in \"--clear\":\n Saver.delete_all()\n else:\n assert False, \"unhandled option\"\n\n if save:\n count = Saver.save_file(file_to_save)\n print(str(count) + ' documents saved!')\n if read:\n for result in Reader.find_and_select(zipcode, state, city, selected):\n print(result)\n\n\ndef usage():\n print(\n \"Usage: \\n\"\n \" - Saving: Use --save to save a File from resource folder \\n\"\n \" - Reading: Use [--zip, --state, --city] and --select ['zip', 'state', 'city'] \\n\"\n \" - Deleting: Use --clear to delete all from database\")\n\n\n# Define main method\nif __name__ == \"__main__\":\n main()\n","sub_path":"_07_mongo_db/src/MainApp.py","file_name":"MainApp.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10493062","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport logging, random\nfrom threading import Thread\nfrom flask import json\n\nclass TamingClient():\n\n def __init__(self, addresses, timeout):\n '''\n @param timeout: número de segundos máximos de espera. Puede\n ser un valor con decimales.\n '''\n self.addresses = addresses\n self.timeout = timeout\n self.connection = None\n \n def open_connection(self):\n if self.connection is None:\n try:\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.connect(random.choice(self.addresses))\n self.connection.settimeout(self.timeout)\n except Exception as e:\n logging.exception(\"Error connecting to taming service.\")\n return False\n return True\n \n def close_connection(self):\n if not self.connection is None:\n try:\n self.connection.close()\n except Exception as e:\n logging.exception(\"Error closing taming service connection.\")\n return False\n \n def tameText(self, text, weights, limit, maxdist, minsimil, dym=1, rel=1):\n # Si no estaba conectado, se conecta y se desconectará al final\n wasnt_connected = self.connection is None\n if wasnt_connected and not self.open_connection(): return None\n\n try:\n params = json.dumps({\"t\": text, \"w\":weights, \"l\":limit, \"s\":minsimil, \"md\":maxdist, \"d\":dym, \"r\":rel})\n paramslen = len(params)\n self.connection.send(chr(paramslen/256)+chr(paramslen%256)+params)\n lenrec = self.connection.recv(2)\n lenrec = ord(lenrec[0])*256+ord(lenrec[1])\n line = self.connection.recv(lenrec)\n result = json.loads(line[:-1])\n except socket.timeout as e:\n logging.warn(\"Timeout when calling taming service.\")\n result = None\n except Exception as e:\n logging.exception(\"Error talking to taming service.\")\n result = None\n finally:\n if wasnt_connected: self.connection.close()\n return result\n","sub_path":"foofind/utils/taming.py","file_name":"taming.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408172823","text":"#! /usr/bin/python3\nfrom random import randrange\nrep = 'n'\nwhile (rep!='O'):\n n = int(input(\"Nombre de joueur : \"))\n essais=[]\n while n <=0 :\n print(\"Nombre de joueurs ivalide\")\n n = int(input(\"Nombre de joueur : \"))\n for j in range(1, n+1):\n #b = randrange(0, 250)\n b =200\n print(\"================== Joueur\", j, \"====================\")\n essai = 10\n for i in range(1, 11):\n nombre=int(input(\"Veuillez entre un nombre : \"))\n if (nombre > b):\n print(\"Ce nombre est trop grand DIMINUE\")\n print(\"Essai restant : \", essai - i)\n elif (nombre < b):\n print(\"Ce nombre est trop petit AUGMENTER\")\n print(\"Essai restant : \", essai - i)\n else :\n print(\"HOURRA C'est le bon nombre.\\nVous avez gagne!!!!\")\n essais.append( 10 - essai + i)\n break\n vainqueur = []\n vainqueurs = []\n nbrvaiqueur = 0\n for j in range(1, n+1):\n print(\"\\n+++++++++++ Joueur \", j, \": \", essais[j-1], \" tentatives.\")\n if (essais[j-1] == min(essais)):\n vainqueur.append(1)\n nbrvaiqueur +=1\n else:\n vainqueur.append(0)\n if(nbrvaiqueur> 1):\n for j in range(len(vainqueur)):\n if vainqueur[j] == 1:\n vainqueurs.append(j+1)\n print(\"\\nLes joueurs \", end='')\n for i in range(len(vainqueurs)):\n print(vainqueurs[i], end=', ')\n print(\" ont GAGNE!!!!!!\")\n else :\n print(\"\\nJoueur\", vainqueur,\" a GAGNE\")\n rep = input(\"Voulez-vous quitter? [O/n] : \")\n ","sub_path":"nombre.py","file_name":"nombre.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10044613","text":"from puzzle.TraversingAlgorithm import TraversingAlgorithm\nfrom puzzle.PuzzleState import PuzzleState\n\nfrom collections import deque\nimport time\n\nclass AStar(TraversingAlgorithm):\n\n def __init__(self):\n super().__init__()\n self.frontier = deque()\n self.search_order = \"RULD\"\n\n def search(self, file_name):\n root = super().load_root_from_file(file_name)\n\n if root.is_target():\n self.update_info(root)\n return True\n root.cost_of_the_path = self.calculate_cost_of_the_path(root.puzzle_data)\n\n self.add_to_frontier_beginning(root)\n start = time.time()\n\n while True:\n node = self.frontier.popleft()\n\n for operator in self.search_order:\n if self.is_move_zero_possible(node, operator):\n new_puzzle_data = self.get_puzzle_data(node, operator)\n if new_puzzle_data.__hash__() in self.explored:\n continue\n\n solution_length = node.solution_length + 1\n cost_of_the_path = self.calculate_cost_of_the_path(new_puzzle_data) + solution_length\n new_puzzle_state = PuzzleState(new_puzzle_data,\n operator,\n node,\n solution_length,\n cost_of_the_path)\n\n self.update_recursion_depth(new_puzzle_state)\n\n if new_puzzle_data.is_target():\n end = time.time()\n self.computing_time_miliseconds = (end - start) * 1000\n self.update_info(new_puzzle_state)\n return True\n if new_puzzle_state.cost_of_the_path <= new_puzzle_state.parent.cost_of_the_path:\n self.add_to_frontier_beginning(new_puzzle_state) # rowne rodzicowi\n else:\n self.add_to_frontier_end(new_puzzle_state) # nierowne rodzicowi\n\n self.explored_count += 1\n\n def add_to_frontier_beginning(self, puzzle_state):\n self.frontier.appendleft(puzzle_state)\n self.frontier_count += 1\n self.explored.add(puzzle_state.__hash__())\n\n def add_to_frontier_end(self, puzzle_state):\n self.frontier.append(puzzle_state)\n self.frontier_count += 1\n self.explored.add(puzzle_state.__hash__())\n\n def calculate_cost_of_the_path(self, puzzle_data):\n raise Exception(\"Called AStar without heuristics\")\n","sub_path":"zad1/puzzle/AStar.py","file_name":"AStar.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"521694950","text":"import tensorflow as tf\nimport glob\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nfrom tensorflow.keras import layers\nimport time\nfrom IPython import display\nimport pickle\nfrom pathlib import Path\nfrom skimage.transform import resize\n\n# TODO: reference original script\n\n#print(tf.__version__) # check we indeed have tensorflow version 2.4\nobjects = []\norigin_path = Path(\"./\")\nfor filename in origin_path.glob(\"*.pkl\"):\n\twith (open(filename, \"rb\")) as openfile:\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tobjects.append(pickle.load(openfile))\n\t\t\texcept EOFError:\n\t\t\t\tbreak\n\ntrain_images = []\ntrain_labels = []\nwidth = 128\nheight = 32\nEPOCHS = 10\nnum_examples_to_generate = 100\n\nfor inner_layer in objects:\n\tlabel = inner_layer[0]\n\tfor i in range(1, len(inner_layer)):\n\t\tfor soundfile in inner_layer[i]:\n\t\t\tnum_images = len(soundfile[0]) // width\n\t\t\tfor j in range(num_images):\n\t\t\t\tindex1 = j*width\n\t\t\t\tindex2 = (j+1)*width\n\t\t\t\ttrain_images.append(soundfile[0:width, index1:index2])\n\t\t\t\ttrain_labels.append(label)\n\nprint(f\"=========== Amount of training images: {len(train_labels)} ===========\")\n# MNIST has 60'000 training images, each of size 28x28\n# replace code below by loading our bird spectrograms of size 28x28\n#(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()\n#print(train_images.shape)\ntrain_images = np.array(train_images)\ntrain_labels = np.array(train_labels)\n# parse data to floats:\ntrain_images = train_images.reshape(train_images.shape[0], height, width, 1).astype('float32')\n# TODO: find min, max of our data to normalize to [-1,1]\n#train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]\n\nBUFFER_SIZE = len(train_images)\nBATCH_SIZE = 503 # define a batch size\n\n# Batch and shuffle the data\ntrain_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\ndef make_generator_model():\n\tmodel = tf.keras.Sequential()\n\tmodel.add(layers.Dense((height//4)*(width//4)*256, use_bias=False, input_shape=(100,)))\n\tmodel.add(layers.BatchNormalization())\n\tmodel.add(layers.LeakyReLU())\n\n\tmodel.add(layers.Reshape((height//4, width//4, 256)))\n\tassert model.output_shape == (None, height//4, width//4, 256) # Note: None is the batch size\n\n\tmodel.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n\tassert model.output_shape == (None, height//4, width//4, 128)\n\tmodel.add(layers.BatchNormalization())\n\tmodel.add(layers.LeakyReLU())\n\n\tmodel.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n\tassert model.output_shape == (None, height//2, width//2, 64)\n\tmodel.add(layers.BatchNormalization())\n\tmodel.add(layers.LeakyReLU())\n\n\tmodel.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n\tassert model.output_shape == (None, height, width, 1)\n\n\treturn model\n\ngenerator = make_generator_model()\n\n#noise = tf.random.normal([1, 100])\n#generated_image = generator(noise, training=False)\n\n#plt.imshow(generated_image[0, :, :, 0], cmap='gray')\n\ndef make_discriminator_model():\n\tmodel = tf.keras.Sequential()\n\tmodel.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n\t\t\t\t\t\t\t\t\t input_shape=[height, width, 1]))\n\tmodel.add(layers.LeakyReLU())\n\tmodel.add(layers.Dropout(0.3))\n\n\tmodel.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n\tmodel.add(layers.LeakyReLU())\n\tmodel.add(layers.Dropout(0.3))\n\n\tmodel.add(layers.Flatten())\n\tmodel.add(layers.Dense(1))\n\n\treturn model\n\ndiscriminator = make_discriminator_model()\n#decision = discriminator(generated_image)\n#print (decision)\n\n# This method returns a helper function to compute cross entropy loss\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n\treal_loss = cross_entropy(tf.ones_like(real_output), real_output)\n\tfake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n\ttotal_loss = real_loss + fake_loss\n\treturn total_loss\n\ndef generator_loss(fake_output):\n\treturn cross_entropy(tf.ones_like(fake_output), fake_output)\n\ngenerator_optimizer = tf.keras.optimizers.Adam(1e-4)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n\t\t\t\t\t\t\t\t discriminator_optimizer=discriminator_optimizer,\n\t\t\t\t\t\t\t\t generator=generator,\n\t\t\t\t\t\t\t\t discriminator=discriminator)\n\n\nnoise_dim = 100\n\n\n# We will reuse this seed overtime (so it's easier)\n# to visualize progress in the animated GIF)\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n# Notice the use of `tf.function`\n# This annotation causes the function to be \"compiled\".\n@tf.function\ndef train_step(images):\n\tnoise = tf.random.normal([BATCH_SIZE, noise_dim])\n\n\twith tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n\t\tgenerated_images = generator(noise, training=True)\n\n\t\treal_output = discriminator(images, training=True)\n\t\tfake_output = discriminator(generated_images, training=True)\n\n\t\tgen_loss = generator_loss(fake_output)\n\t\tdisc_loss = discriminator_loss(real_output, fake_output)\n\n\tgradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n\tgradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n\tgenerator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n\tdiscriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n\ndef train(dataset, epochs):\n\tprint(\"========= Starting first epoch ============\")\n\tfor epoch in range(epochs):\n\t\tstart = time.time()\n\n\t\tfor image_batch in dataset:\n\t\t\ttrain_step(image_batch)\n\n\t\t# Produce images for the GIF as we go\n\t\tdisplay.clear_output(wait=True)\n\t\tgenerate_and_save_images(generator, epoch + 1, seed)\n\n\t\t# Save the model every 5 epochs\n\t\t#if (epoch + 1) % 5 == 0:\n\t\tcheckpoint.save(file_prefix = checkpoint_prefix)\n\n\t\tprint ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\n\n\t# Generate after the final epoch\n\tdisplay.clear_output(wait=True)\n\tgenerate_and_save_images(generator, epochs, seed)\n\ndef generate_and_save_images(model, epoch, test_input):\n # Notice `training` is set to False.\n # This is so all layers run in inference mode (batchnorm).\n predictions = model(test_input, training=False)\n \n fig = plt.figure(figsize=(4,4))\n \n for i in range(predictions.shape[0]):\n #plt.subplot(4, 4, i+1)\n #plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.imshow(predictions[i, :, :, 0], cmap='viridis')\n plt.axis('off')\n \n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n # plt.show()\n\n# scale an array of images to a new size\ndef scale_images(images, new_shape):\n\timages_list = list()\n\tfor image in images:\n\t\t# resize with nearest neighbor interpolation\n\t\tnew_image = resize(image, new_shape, 0)\n\t\t# store\n\t\timages_list.append(new_image)\n\treturn np.asarray(images_list)\n\n\n# train(train_dataset, EPOCHS)\n\n# .restore function requires absolute path for some reason\ncheckpoint_dir = 'C:/Path/To/training_checkpoints' # path to folder with checkpoints\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))\n\n# Change num_examples_to_generate to change number of generated images\ngenerated_imgs = checkpoint.generator(seed, training=False).numpy()\n\n# Upsample images to be at least 75x75 for evaluation (InceptionV3)\ngenerated_imgs_upsampled = scale_images(generated_imgs, (3*height,3*width,3))\n","sub_path":"DCGAN/dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":7625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287016802","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n lenA = 0\n headA_c1 = headA\n while headA_c1:\n lenA += 1\n headA_c1 = headA_c1.next\n lenB = 0\n headB_c1 = headB\n while headB_c1:\n lenB += 1\n headB_c1 = headB_c1.next\n headA_c2 = headA\n headB_c2 = headB\n if lenA > lenB:\n for i in range(lenA-lenB):\n headA_c2 = headA_c2.next\n elif lenA < lenB:\n for i in range(lenB-lenA):\n headB_c2 = headB_c2.next\n while headA_c2 and headB_c2:\n if headA_c2 == headB_c2:\n return headA_c2\n headB_c2 = headB_c2.next\n headA_c2 = headA_c2.next\n return None\n\n","sub_path":"160. Intersection of Two Linked Lists/Intersection of Two Linked Lists.py","file_name":"Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630094094","text":"#!/usr/bin/python\n\nimport RPi.GPIO as GPIO\nimport time\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(4,GPIO.OUT) # This pin connected to servo PWM\nGPIO.setup(18,GPIO.OUT) # The rest are connect to LEDs\nGPIO.output(18,GPIO.LOW)\nGPIO.setup(23,GPIO.OUT)\nGPIO.output(23,GPIO.LOW)\nGPIO.setup(25,GPIO.OUT)\nGPIO.output(25,GPIO.LOW)\nGPIO.setup(24,GPIO.OUT)\nGPIO.output(24,GPIO.LOW)\n\np = GPIO.PWM(4,50) # Setting PWM for the servo\np.start(0) # Starting at 0 keeps it from buzzing while waiting for a button push\n\ndef toggle1(r): # RED\n if GPIO.input(18):\n p.ChangeDutyCycle(0)\n GPIO.output(18, GPIO.LOW)\n else:\n p.ChangeDutyCycle(12.0)\n time.sleep(0.5) # you can play around with this timing, but this and the next line quiets the servo after moving\n p.ChangeDutyCycle(0)\n GPIO.output(18, GPIO.HIGH)\n GPIO.output(23, GPIO.LOW) # these next lines turn off all others when one is picked\n GPIO.output(24, GPIO.LOW)\n GPIO.output(25, GPIO.LOW)\n\ndef toggle2(y): # YELLOW\n if GPIO.input(23):\n p.ChangeDutyCycle(0)\n GPIO.output(23, GPIO.LOW)\n else:\n p.ChangeDutyCycle(8.5)\n time.sleep(0.4)\n p.ChangeDutyCycle(0)\n GPIO.output(23, GPIO.HIGH)\n GPIO.output(18, GPIO.LOW)\n GPIO.output(24, GPIO.LOW)\n GPIO.output(25, GPIO.LOW)\n\ndef toggle3(g): # GREEN\n if GPIO.input(24):\n p.ChangeDutyCycle(0)\n GPIO.output(24, GPIO.LOW)\n else:\n p.ChangeDutyCycle(4.5)\n time.sleep(0.4)\n p.ChangeDutyCycle(0)\n GPIO.output(24, GPIO.HIGH)\n GPIO.output(18, GPIO.LOW)\n GPIO.output(23, GPIO.LOW)\n GPIO.output(25, GPIO.LOW)\n\ndef toggle4(b): # BLUE\n if GPIO.input(25):\n p.ChangeDutyCycle(0)\n GPIO.output(25, GPIO.LOW)\n else:\n p.ChangeDutyCycle(1.0)\n time.sleep(0.5)\n p.ChangeDutyCycle(0)\n GPIO.output(25, GPIO.HIGH)\n GPIO.output(18, GPIO.LOW)\n GPIO.output(23, GPIO.LOW)\n GPIO.output(24, GPIO.LOW)\n\ndef quitRoutine(q): #found that I needed to add this to the quit button so the LEDs would turn off on exit\n GPIO.cleanup()\n exit()\n\nroot = Tk()\nroot.geometry(\"620x200+300+300\")\nroot.title(\"Servo & LED Toggler\")\nroot.bind('r',toggle1)\nroot.bind('y',toggle2)\nroot.bind('g',toggle3)\nroot.bind('b',toggle4)\n\na = toggle1\nb = toggle2\nc = toggle3\nd = toggle4\ne = quitRoutine\n\nb1 = Button(root, text=\"RED\", command=lambda: toggle1(a)) #Just happened to have these colors of LEDs, change to whatever you have\nb1.config(width=12, height=3)\nb1.pack(side=LEFT)\nb2 = Button(root, text=\"YELLOW\", command=lambda: toggle2(b))\nb2.config(width=12, height=3)\nb2.pack(side=LEFT)\nb3 = Button(root, text=\"GREEN\", command=lambda: toggle3(c))\nb3.config(width=12, height=3)\nb3.pack(side=LEFT)\nb4 = Button(root, text=\"BLUE\", command=lambda: toggle4(d))\nb4.config(width=12, height=3)\nb4.pack(side=LEFT)\n\nquitButton = Button(root, text=\"QUIT\", command=lambda: quitRoutine(e))\nquitButton.config(width=12, height=3)\nquitButton.pack(side=LEFT)\nroot.bind('q',quitRoutine)\n\nroot.mainloop()\n","sub_path":"python-servo/Project2/servo-led-console.py","file_name":"servo-led-console.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"245408901","text":"# MIT License\n#\n# Copyright (c) 2019 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\nimport os\n\nimport click\n\nfrom packit.local_project import LocalProject\nfrom packit.utils.repo import git_remote_url_to_https_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalProjectParameter(click.ParamType):\n \"\"\"\n Path or url.\n \"\"\"\n\n name = \"path_or_url\"\n\n def __init__(self, branch_param_name: str = None) -> None:\n \"\"\"\n :param branch_param_name: name of the cli function parameter (not the option name)\n \"\"\"\n super().__init__()\n self.branch_param_name = branch_param_name\n\n def convert(self, value, param, ctx):\n try:\n branch_name = None\n if self.branch_param_name:\n if self.branch_param_name in ctx.params:\n branch_name = ctx.params[self.branch_param_name]\n else: # use the default\n for param in ctx.command.params:\n if param.name == self.branch_param_name:\n branch_name = param.default\n\n if os.path.isdir(value):\n absolute_path = os.path.abspath(value)\n logger.debug(f\"Input is a directory: {absolute_path}\")\n local_project = LocalProject(\n working_dir=absolute_path,\n ref=branch_name,\n remote=ctx.obj.upstream_git_remote,\n )\n elif git_remote_url_to_https_url(value):\n logger.debug(f\"Input is a URL to a git repo: {value}\")\n local_project = LocalProject(\n git_url=value, ref=branch_name, remote=ctx.obj.upstream_git_remote\n )\n else:\n self.fail(\n \"Provided input path_or_url is not a directory nor an URL of a git repo.\"\n )\n\n if not (local_project.working_dir or local_project.git_url):\n self.fail(\n \"Parameter is not an existing directory nor correct git url.\",\n param,\n ctx,\n )\n return local_project\n except Exception as ex:\n self.fail(ex, param, ctx)\n","sub_path":"packit/cli/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491877286","text":"import numpy as np\n# import matplotlib.pyplot as plt\nimport pylab as plt\nimport mpl_toolkits.axes_grid1 as axgrid\nimport os\nfrom ras_amp_pka import model\n\nfigs = os.path.join('..', 'FIGS')\nif not os.path.exists(figs):\n os.makedirs(figs)\n\nproteins_of_interest = []\nfor i in model.initial_conditions:\n proteins_of_interest.append(i[1].name)\n\ncolors = 'RdGy'\ncolors = 'PiYG'\ncolors = 'seismic'\n\n\n\n\nvals = np.logspace(-.1, .1, 20)\nimage1 = np.loadtxt('../sens_ras_matrix.csv')\nall_runs_1 = []\nall_runs_2 = []\nfor i in range(0, len(image1), len(vals)):\n tmp = image1[:, i:i + len(vals)].flatten()\n tmp = tmp[tmp != 0]\n all_runs_1.append(tmp)\n\n\nvmax = max(np.abs(image1.min()), image1.max())\nvmin = -1 * vmax\nfig = plt.figure(figsize=(12, 7))\nax1 = fig.add_subplot(1, 2, 1)\n\n\nn = len(image1)\nim = ax1.imshow(image1, interpolation='nearest',\n origin='lower', cmap=plt.get_cmap(colors),\n vmin=vmin, vmax=vmax,\n extent=[0, n, 0, n]\n )\nshape_label = np.arange(len(vals) / 2, len(image1), len(vals))\nplt.xticks(shape_label, proteins_of_interest, rotation='vertical', fontsize=14)\nplt.yticks(shape_label, proteins_of_interest, fontsize=14)\nxticks = ([i for i in range(0, len(image1), len(vals))])\nax1.set_xticks(xticks, minor=True)\nax1.set_yticks(xticks, minor=True)\nplt.grid(True, which='minor', axis='both', linestyle='--')\ndivider = axgrid.make_axes_locatable(ax1)\ncax = divider.append_axes(\"top\", size=\"5%\", pad=0.3)\nplt.colorbar(im, cax=cax, orientation='horizontal')\n\n\nplt.grid(True, which='minor', axis='both', linestyle='--')\n\nax1.annotate('A', xy=(0, 1), xycoords='axes fraction', fontsize=20,\n xytext=(0, 75), textcoords='offset points',\n ha='left', va='top')\n\nplt.tight_layout()\n\n\nax2 = plt.subplot(1, 2, 2)\n\n\nax2.boxplot(all_runs_1, vert=False, labels=None, showfliers=False)\nax2.set_xlabel('Change in maximum accumulation of cAMP (%)', fontsize=14)\nxtickNames = plt.setp(ax2, yticklabels=proteins_of_interest)\nax2.yaxis.tick_left()\n\n\n\nax2.annotate('B', xy=(0, 1), xycoords='axes fraction', fontsize=20,\n xytext=(0, 25), textcoords='offset points',\n ha='left', va='top')\n\nplt.tight_layout()\nplt.subplots_adjust(top=0.9)\nplt.savefig(os.path.join(figs, 'ras_boxplot.eps'), bbox_tight='True')\nplt.savefig(os.path.join(figs, 'ras_boxplot.png'), bbox_tight='True',dpi=300)\nplt.show()\n\n","sub_path":"ANALYSIS/create_sens_boxplot_ras.py","file_name":"create_sens_boxplot_ras.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575728475","text":"# -*- coding: utf-8 -*\n\nfrom common import *\n\nclass EpisodeInfo:\n def __init__(self):\n self.title = None\n self.episodeUrl = None\n self.thumbNailUrl = None\n self.info = None\n self.length = 0\n self.qualities = dict()\n self.flashArgs = dict()\n self.isLive = False\n def GetContentUrl(self):\n return GetContentUrlFromUserQualSettings(self)\n\n def GetMediaItem(self):\n contentUrl = self.GetContentUrl()\n item = VideoClipObject(url=self.episodeUrl,\n title=self.title,\n duration=self.length,\n summary=self.info,\n thumb=Callback(GetThumb, url=self.thumbNailUrl)\n )\n return item\n\n\ndef GetContentUrlFromUserQualSettings(epInfo):\n url = None\n try:\n url = epInfo.qualities[QUAL_T_HIGHEST]\n url = epInfo.qualities[Prefs['quality']]\n except KeyError:\n if(url == None):\n return \"\"\n\n if(string.find(url, \"rtmp\") > -1):\n if (url.endswith('.mp4')):\n #special case rmpte stream with mp4 ending.\n url = URL_PLEX_PLAYER + url.replace(\"_definst_/\",\"_definst_&clip=mp4:\")\n else:\n url = URL_PLEX_PLAYER + url.replace(\"_definst_/\",\"_definst_&clip=\")\n\n if(epInfo.isLive):\n return epInfo.episodeUrl\n\n return url\n\ndef GetEpisodeUrlsFromPage(url):\n epUrls = []\n pageElement = HTML.ElementFromURL(url)\n xpathbase = TAG_DIV_ID % \"sb\"\n episodeElements = pageElement.xpath(xpathbase + \"//a[starts-with(@href,'/v/')]/@href\")\n\n for epElem in episodeElements:\n epUrl = URL_SITE + epElem\n epUrls.append(epUrl)\n\n return epUrls\n\ndef GetEpisodeInfo(episodeUrl, forceRefetch = False, isRecursed = False):\n Log(episodeUrl)\n epInfo = EpisodeInfo()\n\n if(forceRefetch == True):\n pageElement = HTML.ElementFromURL(episodeUrl, cacheTime = 0)\n else:\n pageElement = HTML.ElementFromURL(episodeUrl, cacheTime = CACHE_TIME_EPISODE)\n \n try:\n (contentUrls, flashArgs) = GetContentUrls(pageElement)\n except:\n if(isRecursed == False):\n return GetEpisodeInfo(episodeUrl, True, True)\n else:\n return None\n\n episodeTitle = pageElement.xpath(\"//meta[@property='og:title']/@content\")[0]\n episodeTitle = string.split(episodeTitle, \"|\")[0]\n\n infoElements = pageElement.xpath(\"//div[@id='description-episode']\")\n episodeInfo = TEXT_NO_INFO\n moreInfoUrl = \"\"\n if (len(infoElements) > 0):\n episodeInfo = infoElements[0].text.strip()\n \n moreInfoUrl = pageElement.xpath(\"//div[@class='info']//li[@class='episode']/a/@href\")\n if(len(moreInfoUrl) > 0):\n infoUrl = URL_SITE + moreInfoUrl[0]\n Log(\"moreInfoUrl: %s \" % infoUrl)\n infoElement = HTML.ElementFromURL(infoUrl, cacheTime=CACHE_TIME_EPISODE)\n infoText = MoreInfoPopup(infoElement).EpisodeInfo()\n Log(\"episodeInfo: %s\" % infoText)\n if(infoText != None):\n episodeInfo = infoText\n\n try:\n epLength = flashArgs['length']\n if(len(epLength) > 0):\n epLength = int(epLength)\n else:\n epLength = 0\n #Log(\"New Length millis: %d\" % (int(epLength) * 1000))\n epLength = epLength * 1000\n except KeyError:\n Log(\"No length found :(\")\n\n try:\n if(len(flashArgs['liveStart']) > 0):\n epInfo.isLive = True\n try:\n if(int(flashArgs['liveStart']) <= 0):\n episodeTitle = TEXT_LIVE_CURRENT + episodeTitle\n else:\n episodeTitle = TEXT_LIVE + episodeTitle\n except:\n episodeTitle = TEXT_LIVE + episodeTitle\n Log(\"IS LIVE\")\n except KeyError:\n Log('liveStart not found')\n\n hiresImage = GetHiResThumbNail(pageElement)\n if(hiresImage != None):\n episodeImageUrl = hiresImage\n else:\n episodeImageUrl = str(pageElement.xpath(\"//meta[@property='og:image']/@content\")[0])\n HTTP.PreCache(episodeImageUrl, cacheTime = CACHE_TIME_EPISODE)\n\n epInfo.title = episodeTitle\n epInfo.episodeUrl = episodeUrl\n epInfo.thumbNailUrl = episodeImageUrl \n epInfo.info = episodeInfo \n epInfo.length = epLength \n epInfo.qualities = contentUrls\n epInfo.flashArgs = flashArgs\n\n return epInfo \n\ndef GetHiResThumbNail(pageElement):\n imageTag = \"background=\"\n flashvars = pageElement.xpath(\"(//div[@class='video']//param[@name='flashvars'])[1]/@value\") \n\n if(len(flashvars) > 0):\n flashvars = flashvars[0]\n else:\n return None\n\n index = string.find(flashvars, imageTag) \n index = index + len(imageTag)\n indexAnd = string.find(flashvars, \"&\", index)\n #Log(\"INDEXES %d, %d\" % (index, indexAnd))\n if(index > -1 and indexAnd > index):\n bgimage = flashvars[index:indexAnd]\n #Log(\"NEW BG IMAGE: %s\" % bgimage)\n return bgimage\n return None\n\ndef GetContentUrls(pageElement):\n flashvars = pageElement.xpath(\"(//div[@class='video']//param[@name='flashvars'])[1]/@value\") \n d = dict()\n\n #Log(\"Flashvars: %s\" % flashvars)\n if(len(flashvars) > 0):\n flashvars = flashvars[0]\n#We can either get rtmp streams or flv\n if(string.find(flashvars, \"dynamicStreams\") > -1):\n urls = string.split(flashvars, \"url:\")\n for url in urls:\n #Log(\"Content URLS: %s\" % url)\n if(string.find(url, \"rtmp\") > -1):\n SetQuality(url, d)\n #Log(\"QualDict: %s\" % d)\n SetHighestQuality(d)\n else:\n tag = \"pathflv=\"\n index = string.find(flashvars, tag)\n index = index + len(tag)\n indexAnd = string.find(flashvars, \"&\", index)\n if(index > -1 and indexAnd > -1):\n url = flashvars[index:indexAnd]\n Log(\"FLV file: %s\" % url)\n d[QUAL_T_HIGHEST] = url \n d[QUAL_T_HD] = url \n d[QUAL_T_HIGH] = url \n d[QUAL_T_MED] = url \n d[QUAL_T_LOW] = url \n\n args = GetUrlArgs(flashvars) \n return (d, args)\n\ndef SetQuality(contentUrl, d):\n s = string.split(contentUrl, ',')\n Log(\"SetQuality: %s \" % s)\n if(string.find(s[1], '2400') > -1):\n d[QUAL_T_HIGHEST] = s[0]\n d[QUAL_T_HD] = s[0]\n elif(string.find(s[1], '1400') > -1):\n d[QUAL_T_HIGH] = s[0]\n elif(string.find(s[1], '850') > -1):\n d[QUAL_T_MED] = s[0]\n else:\n d[QUAL_T_LOW] = s[0]\n\ndef SetHighestQuality(d):\n try:\n highest = d[QUAL_T_LOW]\n d[QUAL_T_HIGHEST] = highest\n except KeyError:\n Log(\"No low quality\")\n try:\n highest = d[QUAL_T_MED]\n d[QUAL_T_HIGHEST] = highest\n except KeyError:\n Log(\"No med quality\")\n \n try:\n highest = d[QUAL_T_HIGH]\n d[QUAL_T_HIGHEST] = highest\n except KeyError:\n Log(\"No high quality\")\n\n try:\n highest = d[QUAL_T_HD]\n d[QUAL_T_HIGHEST] = highest\n except KeyError:\n Log(\"No hd quality\")\n\n\n \n","sub_path":"Contents/Code/episode.py","file_name":"episode.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296501525","text":"import tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.estimator import regression\nimport pandas as pd\nimport numpy as np\n\ndf_train = pd.read_csv('./data/train.csv', header=0)\ndf_test = pd.read_csv('./data/test.csv', header=0)\n\nlabels = pd.DataFrame(pd.get_dummies(df_train['label']))\ndf_train = df_train.drop(['label'], axis=1)\n\n\nmsk = np.random.rand(len(df_train)) < 0.8\n\ntrain_data = df_train[msk].as_matrix().reshape([-1, 28, 28, 1])\ntrain_labels = labels[msk].as_matrix()\n\nvalidation_data = df_train[~msk].as_matrix().reshape([-1, 28, 28, 1])\nvalidation_labels = labels[~msk].as_matrix()\n\ntest = df_test.as_matrix().reshape([-1, 28, 28, 1])\n\ndel df_train\ndel df_test\ndel msk\n\n# Building convolutional network\nnetwork = input_data(shape=[None, 28, 28, 1], name='input')\nnetwork = conv_2d(network, 32, 3, activation='relu')\nnetwork = max_pool_2d(network, 2)\nnetwork = conv_2d(network, 64, 3, activation='relu')\nnetwork = max_pool_2d(network, 2)\nnetwork = fully_connected(network, 1024, activation='relu')\nnetwork = dropout(network, 0.5)\nnetwork = fully_connected(network, 10, activation='softmax')\nnetwork = regression(network, optimizer='adam', learning_rate=1e-5, loss='categorical_crossentropy', name='target')\n\n# Training\nmodel = tflearn.DNN(network, tensorboard_verbose=0)\nmodel.fit({'input': train_data}, {'target': train_labels}, n_epoch=10,\n validation_set=({'input': validation_data}, {'target': validation_labels}), show_metric=True, run_id='convnet_mnist')\n\npredictions = None\nfor chunk in np.split(test, 4):\n if predictions is None:\n predictions = model.predict(chunk)\n else:\n predictions = np.append(predictions, model.predict(chunk), axis=0)\n\npredictions = pd.DataFrame(predictions).idxmax(axis=1)\npredictions.index += 1\npredictions.to_csv('./data/predictions.csv', header=['Label'], index=True, index_label='ImageId')\n","sub_path":"kaggle_mnist/02_mnist_conv.py","file_name":"02_mnist_conv.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252522286","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\nimport numpy as np\r\nfrom openpyxl import *\r\n\r\n\r\nent=[0,0,0,0,0,0,0,0,0,0]\r\n#################MENU##############################\r\ndef new(event=\"<'Button-1'>\"):\r\n temp_win = Toplevel(window)\r\n temp_win.title(\"New\")\r\n temp_win.transient(window)\r\n temp_win.focus_set()\r\n temp_win.minsize(250, 100)\r\n temp_win.maxsize(250, 100)\r\n temp_win.geometry(\"250x100+450+250\")\r\n temp_win.wm_iconbitmap('excel_icon.ico')\r\n name_lbl=Label(temp_win,text=\"Enter File name where data to be saved \")\r\n name_lbl.focus_set()\r\n name_lbl.pack()\r\n name_entry=Entry(temp_win)\r\n name_entry.pack()\r\n \r\n def create(event=''):\r\n Excel_file.set(name_entry.get()+\".xlsx\")\r\n temp_win.destroy()\r\n \r\n name_entry.bind('',create) \r\n create_btn=Button(temp_win,command=create,text=\"Create\")\r\n create_btn.pack()\r\n create_btn.place(height=25,width=100,x=75,y=50)\r\n\r\n\r\ndef exit_win(event=\"<'Button-1'>\"):\r\n window.destroy()\r\n########################MENU ENDS################\r\n\r\n##############INITIALIZING GUI###################\r\ndef create_gui():\r\n lbl1=Label(window,text=\"Select Variables\",relief=\"sunken\",font='Comic 10 bold',bg=\"white\")\r\n lbl1.pack()\r\n lbl1.place(x=150,y=50,height=40,width=180)\r\n \r\n option=OptionMenu(window,var,1,2,3,4,5,command=start)\r\n option.pack()\r\n option.place(x=350,y=50,height=40)\r\n var.set(2); #default selection\r\n start('');\r\n \r\n lbl4=Label(window,relief=\"groove\")\r\n lbl4.pack()\r\n lbl4.place(x=150,y=355,height=100,width=755)\r\n \r\n lbl_c1=Label(window,text=\"C1\",relief=\"raised\",font='Comic 10 bold')\r\n lbl_c1.pack()\r\n lbl_c1.place(x=205,y=360)\r\n \r\n lbl_c2=Label(window,text=\"C2\",relief=\"raised\",font='Comic 10 bold')\r\n lbl_c2.pack()\r\n lbl_c2.place(x=360,y=360)\r\n \r\n lbl_wmax=Label(window,text=\"Wmax\",relief=\"raised\",font='Comic 10 bold')\r\n lbl_wmax.pack()\r\n lbl_wmax.place(x=505,y=360)\r\n \r\n lbl_wmin=Label(window,text=\"Wmin\",relief=\"raised\",font='Comic 10 bold')\r\n lbl_wmin.pack()\r\n lbl_wmin.place(x=662,y=360)\r\n \r\n lbl_np=Label(window,text=\"Num_Particles\",relief=\"raised\",font='Comic 10 bold')\r\n lbl_np.pack()\r\n lbl_np.place(x=788,y=360)\r\n \r\n lbl_fact=Label(window,text=\"Factors\",relief=\"sunken\",font='Comic 10 bold',bg=\"white\")\r\n lbl_fact.pack()\r\n lbl_fact.place(x=10,y=400,height=20,width=100)\r\n \r\n lbl_eqn=Label(window,text=\"Equation\",relief=\"sunken\",font='Comic 10 bold',bg=\"white\")\r\n lbl_eqn.pack()\r\n lbl_eqn.place(x=10,y=510,height=20,width=100)\r\n global ent_c1\r\n global ent_c2\r\n global ent_wmin\r\n global ent_wmax\r\n global ent_eqn\r\n global ent_np\r\n ent_c1=Entry(window,textvariable=c1)\r\n ent_c1.pack()\r\n ent_c1.place(x=155,y=400)\r\n \r\n ent_c2=Entry(window,textvariable=c2)\r\n ent_c2.pack()\r\n ent_c2.place(x=310,y=400)\r\n \r\n ent_wmax=Entry(window,textvariable=wmax)\r\n ent_wmax.pack()\r\n ent_wmax.place(x=465,y=400)\r\n \r\n ent_wmin=Entry(window,textvariable=wmin)\r\n ent_wmin.pack()\r\n ent_wmin.place(x=620,y=400)\r\n \r\n ent_np=Entry(window,textvariable=num_p)\r\n ent_np.pack()\r\n ent_np.place(x=775,y=400)\r\n\r\n ent_eqn=Entry(window,textvariable=eqn,font='fixedsys 16')\r\n ent_eqn.pack()\r\n ent_eqn.place(x=150,y=500,height=40,width=750)\r\n\r\n menubar=Menu(window)\r\n filemenu=Menu(menubar,tearoff=0)\r\n filemenu.add_command(label=\"New Ctrl + N\",command=new)\r\n filemenu.add_separator()\r\n filemenu.add_command(label=\"Exit Ctrl + Q\",command=exit_win)\r\n menubar.add_cascade(label=\"File\",menu=filemenu)\r\n window.config(menu=menubar)\r\n window.bind('',new)\r\n window.bind('',exit_win)\r\n \r\n global t\r\n t=Text(window,height=41,width=55,yscrollcommand=scrollbar.set,xscrollcommand=scrollbarx.set)\r\n t.pack()\r\n t.place(x=905)\r\n t.insert(END,\"\\t RESULT IN EACH ITERATION\\n\")\r\n t.config(state=DISABLED,wrap=NONE)\r\n scrollbar.config(command=t.yview)\r\n scrollbarx.config(command=t.xview)\r\n \r\ndef start(event):\r\n\r\n lbl2=Label(window,relief=\"groove\")\r\n lbl2.pack()\r\n lbl2.place(x=150,y=150,height=200,width=755)\r\n lbl4=Label(window,text=\"Minimum\",relief=\"sunken\",font='Comic 10 bold',bg=\"white\")\r\n lbl4.pack()\r\n lbl4.place(x=10,y=200,height=20,width=100)\r\n lbl5=Label(window,text=\"Maximum\",relief=\"sunken\",font='Comic 10 bold',bg=\"white\")\r\n lbl5.pack()\r\n lbl5.place(x=10,y=300,height=20,width=100)\r\n for i in range(0,var.get()):\r\n lbl3=Label(window,text=str(Vars[i]),font='Comic 12 bold',relief='raised')\r\n lbl3.pack()\r\n lbl3.place(x=210+(i*155),y=155)\r\n for j in range(2):\r\n ent[i*2+j]=Entry(window)\r\n ent[i*2+j].pack()\r\n ent[i*2+j].place(x=(i+1)*155,y=(j+1)*100+100)\r\n\r\n#####################GUI ENDS############################\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n####################FINISH MSG############################\r\n \r\ndef showmsg():\r\n temp_win = Toplevel(window)\r\n temp_win.title(\"SUCCESS!\")\r\n temp_win.transient(window)\r\n temp_win.focus_set()\r\n temp_win.minsize(250, 50)\r\n temp_win.maxsize(250, 50)\r\n temp_win.geometry(\"250x100+450+250\")\r\n temp_win.wm_iconbitmap('success.ico')\r\n name_lbl=Label(temp_win,text=str(\"DATA SAVED IN \"+Excel_file.get()),font='fixedsys 16')\r\n name_lbl.focus_set()\r\n name_lbl.pack(fill=X)\r\n name_lbl.place(y=20)\r\n \r\n########################################################### \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n###########################################################\r\n\r\n\r\n\r\n\r\n\r\n \r\nfrom numpy import *\r\nfrom random import *\r\nfrom py_expression_eval import Parser\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n########################PSO##############################\r\ndef pso():\r\n\r\n\r\n \r\n ##################Loading Excel File#####################\r\n global wb\r\n global ws\r\n wb=Workbook()\r\n wb.save(str(\".\\\\\"+Excel_file.get()));\r\n wb.close()\r\n wb=load_workbook(Excel_file.get())\r\n ws=wb.active\r\n ##################Loading Excel File Done################\r\n\r\n\r\n\r\n \r\n t.delete(1.0,END)\r\n t.insert(END,\"\\t RESULT AT EACH ITERATION\\n\\n\")\r\n window.update_idletasks()\r\n parser=Parser()\r\n n=var.get()\r\n itermax=500\r\n Pmin= 0\r\n Pmax= 1000\r\n c1=float(ent_c1.get())\r\n c2=float(ent_c2.get())\r\n pop=int(ent_np.get())\r\n error=zeros((itermax,1),dtype=float)\r\n global P\r\n global pbest\r\n global gbest\r\n global fit\r\n global v\r\n global w\r\n global Lp\r\n global Lg\r\n global fmin\r\n global fmax\r\n global Vmax_n\r\n global gbt\r\n global L\r\n global vmax\r\n P=zeros((pop,1),dtype=float)\r\n fmax=zeros((n,1),dtype=int)\r\n fmin=zeros((n,1),dtype=int)\r\n pbest=zeros((pop,1),dtype=float)\r\n gbest=0\r\n fit = zeros((pop,1),dtype=float)\r\n v = zeros((pop,n),dtype=float)\r\n vmax = zeros((n,1),dtype=float)\r\n wmax=float(ent_wmax.get())\r\n wmin=float(ent_wmin.get())\r\n Lp = zeros((pop,n),dtype=float)\r\n err = zeros((pop,1),dtype=float)\r\n Lg = zeros((n,1),dtype=float)\r\n for i in range (n):\r\n fmax[i]=ent[2*i+1].get()\r\n\r\n \r\n for i in range (n):\r\n fmin[i]=ent[2*i].get()\r\n \r\n Vmax_n = [100,100,100,100,100]\r\n gbt=0\r\n L = zeros((pop,n),dtype=float)\r\n parser.ops1['sin'] = sin\r\n parser.ops1['cos'] = cos\r\n parser.ops1['exp'] = exp\r\n parser.ops1['sqrt'] = sqrt\r\n parser.ops2['^'] = power\r\n pi=3.14159\r\n t.config(state=NORMAL)\r\n\r\n\r\n for j in range(pop):\r\n for k in range(n):\r\n L[j,k] = (randint(fmin[k],fmax[k]))+randint(1,100)/100;\r\n print(L)\r\n for i in range(n):\r\n vmax[i] = (fmax[i]-fmin[i]/Vmax_n[i]);\r\n if(n==1):\r\n print('x fcn value ')\r\n t.insert(END,'x fcn value\\n')\r\n ws.cell(row=1,column=1).value=\"X\"\r\n ws.cell(row=1,column=2).value=\"F(x)\"\r\n ws.cell(row=1,column=3).value=\"ITERATION\"\r\n elif(n==2):\r\n print('x y fcn value ')\r\n t.insert(END,'x y fcn value\\n')\r\n ws.cell(row=1,column=1).value=\"X\"\r\n ws.cell(row=1,column=2).value=\"Y\"\r\n ws.cell(row=1,column=3).value=\"F(x,y)\"\r\n ws.cell(row=1,column=4).value=\"ITERATION\"\r\n elif(n==3):\r\n print('x y z fcn value ')\r\n t.insert(END,'x y z fcn value\\n')\r\n ws.cell(row=1,column=1).value=\"X\"\r\n ws.cell(row=1,column=2).value=\"Y\"\r\n ws.cell(row=1,column=3).value=\"Z\"\r\n ws.cell(row=1,column=4).value=\"F(x,y,z)\"\r\n ws.cell(row=1,column=5).value=\"ITERATION\"\r\n elif(n==4):\r\n print('x y z r fcn value \\n')\r\n t.insert(END,'x y z r fcn value')\r\n ws.cell(row=1,column=1).value=\"X\"\r\n ws.cell(row=1,column=2).value=\"Y\"\r\n ws.cell(row=1,column=3).value=\"Z\"\r\n ws.cell(row=1,column=4).value=\"R\"\r\n ws.cell(row=1,column=5).value=\"F(x,y,z,r)\"\r\n ws.cell(row=1,column=6).value=\"ITERATION\"\r\n elif(n==5):\r\n print('x y z r s fcn value \\n')\r\n t.insert(END,'x y z r s fcn value')\r\n ws.cell(row=1,column=1).value=\"X\"\r\n ws.cell(row=1,column=2).value=\"Y\"\r\n ws.cell(row=1,column=3).value=\"Z\"\r\n ws.cell(row=1,column=4).value=\"R\"\r\n ws.cell(row=1,column=5).value=\"S\"\r\n ws.cell(row=1,column=6).value=\"F(x,y,z,r,s)\"\r\n ws.cell(row=1,column=7).value=\"ITERATION\"\r\n window.update_idletasks()\r\n\r\n for iteration in range(1,itermax+1):\r\n for i in range (pop):\r\n for j in range (n):\r\n if L[i][j]<=fmin[j]:\r\n L[i][j]=float(fmin[j]);\r\n if L[i][j]>=fmax[j]:\r\n L[i][j]=float(fmax[j]);\r\n if(n==1):\r\n X=array(L[:,0])\r\n P=parser.parse(ent_eqn.get()).evaluate({'x':X,'pi':pi})\r\n elif(n==2):\r\n X=array(L[:,0])\r\n Y=array(L[:,1])\r\n P=parser.parse(ent_eqn.get()).evaluate({'x':X,'y':Y,'pi':pi})\r\n elif(n==3):\r\n X=array(L[:,0])\r\n Y=array(L[:,1])\r\n Z=array(L[:,2])\r\n P=parser.parse(ent_eqn.get()).evaluate({'x':X,'y':Y,'z':Z,'pi':pi})\r\n elif(n==4):\r\n X=array(L[:,0])\r\n Y=array(L[:,1])\r\n Z=array(L[:,2])\r\n R=array(L[:,3])\r\n P=parser.parse(ent_eqn.get()).evaluate({'x':X,'y':Y,'z':Z,'r':R,'pi':pi})\r\n elif(n==5):\r\n X=array(L[:,0])\r\n Y=array(L[:,1])\r\n Z=array(L[:,2])\r\n R=array(L[:,3])\r\n S=array(L[:,4])\r\n P=parser.parse(ent_eqn.get()).evaluate({'x':X,'y':Y,'z':Z,'r':R,'s':S,'pi':pi})\r\n \r\n \r\n for i in range (pop):\r\n if P[i]<=Pmin:\r\n P[i]=Pmin\r\n elif P[i]>=Pmax:\r\n P[i]=Pmax\r\n\r\n for i in range(pop):\r\n fit[i]=1/(1+abs(P[i]))\r\n \r\n if fit[i]>=pbest[i]:\r\n pbest[i]=fit[i]\r\n Lp[i][0]=L[i][0]\r\n Lp[i][1]=L[i][1]\r\n\r\n if max(pbest)>=gbest:\r\n gbest=max(pbest)\r\n for i in range(pop):\r\n \r\n if fit[i]==gbest:\r\n for j in range(n):\r\n Lg[j]=L[i][j]\r\n \r\n w=wmax-(wmax-wmin)*iteration/itermax; \r\n\r\n for i in range(pop):\r\n for j in range(n):\r\n v[i][j]=(w*v[i][j]+c1*(randint(0,1000)/1000)*(Lp[i][j]-L[i][j])+c2*(randint(0,1000)/1000)*(Lg[j]-L[i][j]))\r\n if v[i][j]>vmax[j]:\r\n v[i][j]=vmax[j]\r\n elif v[i][j]<=-vmax[j]:\r\n v[i][j]=-vmax[j]\r\n\r\n for i in range(pop):\r\n for j in range(n):\r\n L[i][j]=L[i][j]+v[i][j]\r\n global e1\r\n e1 = (1/gbest)-1;\r\n if(n==1):\r\n t.insert(END,str(str(Lg[0])+' '+str(e1)+' '+str(iteration)+'\\n'))\r\n ws.cell(row=iteration+1,column=1).value=str(Lg[0]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=2).value=str(e1).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=3).value=iteration\r\n elif(n==2):\r\n t.insert(END,str(str(Lg[0])+' '+str(Lg[1])+' '+str(e1)+' '+str(iteration)+'\\n'))\r\n ws.cell(row=iteration+1,column=1).value=str(str(Lg[0])).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=2).value=str(Lg[1]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=3).value=str(e1).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=4).value=iteration\r\n elif(n==3):\r\n t.insert(END,str(str(Lg[0])+' '+str(Lg[1])+' '+str(Lg[2])+' '+str(e1)+' '+str(iteration)+'\\n'))\r\n ws.cell(row=iteration+1,column=1).value=str(Lg[0]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=2).value=str(Lg[1]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=3).value=str(Lg[2]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=4).value=str(e1).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=5).value=iteration\r\n elif(n==4):\r\n t.insert(END,str(str(Lg[0])+' '+str(Lg[1])+' '+str(Lg[2])+' '+str(Lg[3])+' '+str(e1)+' '+str(iteration)+'\\n'))\r\n ws.cell(row=iteration+1,column=1).value=str(Lg[0]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=2).value=str(Lg[1]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=3).value=str(Lg[2]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=4).value=str(Lg[3]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=5).value=str(e1).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=6).value=iteration\r\n elif(n==5):\r\n t.insert(END,str(str(Lg[0])+' '+str(Lg[1])+' '+str(Lg[2])+' '+str(Lg[3])+' '+str(Lg[4])+' '+str(e1)+' '+str(iteration)+'\\n'))\r\n ws.cell(row=iteration+1,column=1).value=str(Lg[0]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=2).value=str(Lg[1]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=3).value=str(Lg[2]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=4).value=str(Lg[3]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=5).value=str(Lg[4]).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=6).value=str(e1).replace('[','').replace(']','')\r\n ws.cell(row=iteration+1,column=7).value=iteration\r\n \r\n \r\n if e1<=0:\r\n print('Converged in iterations ', (iteration))\r\n savefile()\r\n showmsg()\r\n break\r\n elif iteration==itermax-1:\r\n print(\"Not converged \")\r\n \r\n window.update_idletasks()\r\n t.see(\"end\")\r\n scrollbar.config(command=t.yview)\r\n scrollbarx.config(command=t.xview)\r\n\r\n \r\n###################################pso###################################\r\n\r\n\r\n\r\n\r\nVars=['x','y','z','r','s']\r\nwindow =Tk()\r\nwindow.geometry('500x600')\r\nwindow.state('zoomed')\r\nwindow.title(\"Particle Swarm Optimizer\");\r\nwindow.wm_iconbitmap('icon_ZAw_icon.ico')\r\nvar=IntVar()\r\nc1=StringVar()\r\nc2=StringVar()\r\nwmax=StringVar()\r\nwmin=StringVar()\r\nnum_p=StringVar()\r\nExcel_file=StringVar()\r\nExcel_file.set(\"\")\r\nlim=[[0,0],[0,0],[0,0],[0,0],[0,0]]\r\neqn=StringVar()\r\nglobal scrollbar\r\nscrollbar = Scrollbar(window)\r\nscrollbar.pack(side=RIGHT,fill=Y)\r\nglobal scrollbarx\r\nscrollbarx = Scrollbar(window)\r\nscrollbarx.pack()\r\nscrollbarx.place(x=905,y=661,width=448,height=40)\r\nscrollbarx.config(orient=HORIZONTAL)\r\ncreate_gui()\r\nglobal ws\r\nglobal wb\r\n\r\ndef savefile():\r\n wb.save(str(\".\\\\\"+Excel_file.get()));\r\n\r\ndef get_file_and_opt():\r\n if(Excel_file.get()==\"\"):\r\n new('')\r\n pso()\r\n \r\nstart=Button(window,text=\"OPTIMIZE\",command=get_file_and_opt,font='fixedsys 16')\r\nstart.pack();\r\nstart.place(x=500,y=600)\r\nt.config(state=DISABLED)\r\nmainloop()\r\n\r\n\r\n \r\n","sub_path":"gui+pso.py","file_name":"gui+pso.py","file_ext":"py","file_size_in_byte":16264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4034394","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport json\nimport logging\n\nfrom tornado.ioloop import IOLoop\nfrom tornado import gen\nfrom tornado.iostream import StreamClosedError\nfrom tornado.tcpserver import TCPServer\nfrom tornado.options import options, define\n\nfrom litepipeline.manager import logger\n\nLOG = logging.getLogger(__name__)\n\ncwd = os.path.split(os.path.realpath(__file__))[0]\n\ndefine(\"port\", default=6001, help=\"TCP port to listen on\")\n\n\nclass EchoServer(TCPServer):\n @gen.coroutine\n def handle_stream(self, stream, address):\n while True:\n try:\n data = yield stream.read_until(b\"\\n\")\n LOG.info(\"Received bytes: %s\", data)\n if not data.endswith(b\"\\n\"):\n data = data + b\"\\n\"\n yield stream.write(data)\n except StreamClosedError:\n LOG.warning(\"Lost client at host %s\", address[0])\n break\n except Exception as e:\n print(e)\n\n\n\nif __name__ == \"__main__\":\n logger.config_logging(file_name = \"test_tornado_tcp.log\",\n log_level = \"DEBUG\",\n dir_name = os.path.join(cwd, \"logs\"),\n day_rotate = False,\n when = \"D\",\n interval = 1,\n max_size = 20,\n backup_count = 5,\n console = True)\n\n LOG.debug(\"test start\")\n \n try:\n options.parse_command_line()\n server = EchoServer()\n server.listen(options.port)\n LOG.info(\"Listening on TCP port %d\", options.port)\n IOLoop.current().start()\n except Exception as e:\n LOG.exception(e)\n\n LOG.debug(\"test end\")\n","sub_path":"test/test_tornado_tcp_server.py","file_name":"test_tornado_tcp_server.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"86311977","text":"\n\nfrom xai.brain.wordbase.nouns._sidestroke import _SIDESTROKE\n\n#calss header\nclass _SIDESTROKES(_SIDESTROKE, ):\n\tdef __init__(self,): \n\t\t_SIDESTROKE.__init__(self)\n\t\tself.name = \"SIDESTROKES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sidestroke\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sidestrokes.py","file_name":"_sidestrokes.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"469218637","text":"from .Common import *\nclass GradeDownloader(OucJw):\n\t\"\"\"docstring for GradeDownloader\"\"\"\n\tdef __init__(self,opener = None):\n\t\tsuper(GradeDownloader, self).__init__(opener)\n\n\tdef getPersonGrade(self,username,xn = None,xq = None):\n\t\t\turl = self.HOST + \"student/xscj.stuckcj_data.jsp\"\n\t\t\trefer = self.HOST + \"student/xscj.stuckcj.jsp?menucode=JW130705\"\n\t\t\tif xn == None or xq == None:\n\t\t\t\tparam = \"xn=2017&xn1=2016&xq=1&ysyx=yscj&sjxz=sjxz1&userCode=%s&ysyxS=on&sjxzS=on\"%(username)\n\t\t\telse:\n\t\t\t\tparam = \"xn=%s&xq=%s&ysyx=yscj&sjxz=sjxz3&userCode=%s&ysyxS=on&sjxzS=on\"%(xn,xq,username)\n\t\t\ttoken = self.getKeytoken()\n\t\t\tdata = self.encodeParam(param,token)\n\t\t\tdata = self.opener.get(url, data, {'Referer':refer})\n\t\t\tif(data != -1):\n\t\t\t\tdata = deal_table(data)\n\t\t\treturn data\n\n\tdef getClassGrade(self,Year,Term,kcdm,skbjdm,flag='2'):\n\t\turl = self.HOST + 'wjstgdfw/cjlr.ckxscj.fkcaskbjckcj_rptOrigina_data_exp.jsp'\n\t\turl2 = self.HOST + 'wjstgdfw/cjlr.ckxscj.fkcaskbjckcj_rptEffecy_data_exp.jsp'\n\t\trefer = self.HOST + 'wjstgdfw/cjlr.ckxscj.fkcaskbjckcj_rpt.jsp'\n\t\tdata = {\n\t\t\t'xn':Year,\n\t\t\t'xq':Term,\n\t\t\t'kcdm':kcdm,\n\t\t\t'skbjdm':skbjdm,\n\t\t\t'pxfs':'xhsx',\n\t\t}\n\t\tdata = self.opener.post(url, data, {'Referer':refer})\n\t\tif data != -1:\n\t\t\tdata = deal_table(data)\n\t\treturn data\n\n\tdef personGrade(self,username,xn = None,xq = None):\n\t\tdata = self.getPersonGrade(username,xn,xq)\n\t\tif(data == -1 or len(data) == 0):\n\t\t\treturn []\n\t\tdataset = []\n\t\tfor x in data:\n\t\t\tif(len(x) <= 2):\n\t\t\t\ttry:\n\t\t\t\t\txn = re.findall(r'\\d+',x[0])[0]\n\t\t\t\t\tcode = {'秋': 1, '春': 2, '夏': 0}\n\t\t\t\t\tfor k,v in code.items():\n\t\t\t\t\t\tif(k in x[0]):\n\t\t\t\t\t\t\txq = v\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif(xq == 2):\n\t\t\t\t\t\txn = int(xn) - 1\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass \n\t\t\t\tcontinue\n\t\t\tif x[0] == '序号':\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tkcdm = re.findall(r'(\\w*[0-9]+)\\w*',x[1])[0]\n\t\t\texcept:\n\t\t\t\tkcdm = '0'\n\t\t\ttemp = {\n\t\t\t'username':username,\n\t\t\t'xn':str(xn),\n\t\t\t'xq':str(xq),\n\t\t\t'kcdm':kcdm,\n\t\t\t'course':re.sub(r'\\[\\d+\\]',\"\",x[1]),\n\t\t\t'kc':x[1],\n\t\t\t'xf':float(x[2]),\n\t\t\t'jd':cal_gpa(x[6]),\n\t\t\t'score':x[6],\n\t\t\t'type':x[3],\n\t\t\t'qdfs':x[-2]\n\t\t\t}\n\t\t\tdataset.append(temp)\n\t\treturn dataset\n\n\tdef classGrade(self,xn,xq,kcdm,skbjdm):\n\t\tdata = self.getClassGrade(xn,xq,kcdm,skbjdm)\n\t\tif(data == -1 or len(data) == 0):\n\t\t\tlog(\"getPersonGrade error\")\n\t\t\treturn -1\n\t\treturn data\n","sub_path":"Plugins/OucJw/GradeDownloader.py","file_name":"GradeDownloader.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228744590","text":"#Bike _1\r\n\r\n#Importing libraries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nwarnings.filterwarnings('ignore', category = DeprecationWarning)\r\nfrom scipy import stats\r\nimport math\r\n\r\n\r\n#First view into data\r\nBike = pd.read_csv('train.csv')\r\nBike_test = pd.read_csv('test.csv')\r\n\r\nBike.describe()\r\nBike_test.describe()\r\n#we may see that there is no missing data :D\r\n\r\nBike.dtypes\r\nBike_test.dtypes\r\n\r\nBike.skew()\r\nBike_test.skew()\r\n\r\nBike.hist(bins = 50)\r\nBike.select_dtypes([float, int]).apply(stats.normaltest)\r\n\r\n\r\n#Visualising the data\r\n#Scatter plot of atemp/temp ratio\r\nplt.scatter(x = Bike['temp'], y = Bike['atemp'])\r\nplt.scatter(x = Bike_test['temp'], y = Bike_test['atemp'])\r\n#It is quite linear connesction\r\n\r\n#Corelation of season/weather in case of weather\r\nsns.stripplot(x = 'season', y = 'count', hue = 'weather', data = Bike)\r\n#Truly? I think that weather is not connected with number of rents at all :(\r\n\r\n#Comparing number of casual rentiers to registered in case of season\r\nBike.groupby('season').agg('sum')[['registered', 'casual']].plot(kind = 'bar', stacked = True, colors = ['g', 'r'])\r\n#There is much moore registered riders than casuals, and the biggest number of rentiers is for fall season (3)\r\n\r\n#Comparing number of casual rentiers to registered in case of weather\r\nBike.groupby('weather').agg('sum')[['registered', 'casual']].plot(kind = 'bar', stacked = True, colors = ['g', 'r'])\r\n#There is a big diffrence beetwen riders in case of weather\r\n\r\n#Split datatime into year, month and hour column\r\nyear = set()\r\nfor datetime in Bike['datetime']:\r\n year.add(datetime.split('-')[0].strip())\r\nprint (year)\r\nYearDictionary = {'2011':2011, '2012':2012}\r\n\r\nmonth = set()\r\nfor datetime in Bike['datetime']:\r\n month.add(datetime.split('-')[1].split('-')[0].strip())\r\nprint (month)\r\nMonthDictionary = {'01':1, '02':2, '03':3, '04':4, '05':5, '06':6, '07':7, '08':8, '09':9, '10':10, '11':11, '12':12}\r\n\r\nhour = set()\r\nfor datetime in Bike['datetime']:\r\n hour.add(datetime.split(' ')[1].split(':')[0].strip())\r\nprint (hour)\r\nHourDictionary = {'01':1, '02':2, '03':3, '04':4, '05':5, '06':6, '07':7, '08':8, '09':9, '10':10, '11':11, '12':12,\r\n '13':13, '14':14, '15':15, '16':16, '17':17, '18':18, '19':19, '20':20, '21':21, '22':22, '23':23, '00':00}\r\n\r\n\r\n#Making new columns\r\ndef Bike_Year():\r\n global Bike\r\n Bike['year'] = Bike['datetime'].map(lambda datetime:datetime.split('-')[0].strip())\r\n Bike['year'] = Bike.year.map(YearDictionary)\r\n return Bike\r\nBike = Bike_Year()\r\n\r\ndef Bike_Month():\r\n global Bike\r\n Bike['month'] = Bike['datetime'].map(lambda datetime:datetime.split('-')[1].split('-')[0].strip())\r\n Bike['month'] = Bike.month.map(MonthDictionary)\r\n return Bike\r\nBike = Bike_Month()\r\n\r\ndef Bike_Hour():\r\n global Bike\r\n Bike['hour'] = Bike['datetime'].map(lambda datetime:datetime.split(' ')[1].split(':')[0].strip())\r\n Bike['hour'] = Bike.hour.map(HourDictionary)\r\n return Bike\r\nBike = Bike_Hour()\r\n\r\nBike.drop('datetime', axis = 1, inplace = True)\r\n\r\n#Comming back to visualisation :3\r\n#Number of rentings in each -> hour, month, year\r\nsns.stripplot(data = Bike, x = 'hour', y = 'count', hue = 'season')\r\nsns.stripplot(data = Bike, x = 'hour', y = 'count', hue = 'weather')\r\nsns.stripplot(data = Bike, x = 'hour', y = 'count', hue = 'holiday')\r\n#What about new dictionary? The number for each hour will be replaced by the dominant of rentings in this hour\r\n\r\nsns.stripplot(data = Bike, x = 'month', y = 'count', hue = 'season')\r\nsns.stripplot(data = Bike, x = 'month', y = 'count', hue = 'weather')\r\n\r\nsns.stripplot(data = Bike, x = 'year', y = 'count', hue = 'season')\r\nsns.stripplot(data = Bike, x = 'year', y = 'count', hue = 'weather')\r\n\r\nsns.stripplot(data = Bike, x = 'atemp', y = 'count')\r\nsns.stripplot(data = Bike, x = 'temp', y = 'count')\r\n\r\nsns.stripplot(data = Bike, x = 'humidity', y = 'count')\r\nsns.stripplot(data = Bike, x = 'windspeed', y = 'count')\r\n\r\nsns.boxplot(data = Bike, x = 'windspeed', y = 'count')\r\nsns.boxplot(data = Bike, x = 'atemp', y = 'count')\r\nsns.boxplot(data = Bike, x = 'humidity', y = 'count')\r\n\r\nsns.stripplot(data = Bike, x = 'humidity', y = 'humidity')\r\nsns.stripplot(data = Bike, x = 'atemp', y = 'atemp')\r\nsns.stripplot(data = Bike, x = 'windspeed', y = 'windspeed')\r\n\r\n\r\nX = Bike.iloc[:, [0,2,3,5,6,7,11,12,13]]\r\nY = Bike.iloc[:, 10].values\r\n\r\n\r\n\r\n#getting dummies\r\ndef WeatherDummies():\r\n global X\r\n Weather_Dummies = pd.get_dummies(X['weather'], prefix = 'weather')\r\n X = pd.concat([X, Weather_Dummies], axis = 1)\r\n X.drop('weather', axis = 1, inplace = True)\r\n return X\r\nX = WeatherDummies()\r\nX.drop('weather_4', axis = 1, inplace = True)\r\n\r\ndef SeasonDummies():\r\n global X\r\n Season_Dummies = pd.get_dummies(X['season'], prefix = 'season')\r\n X = pd.concat([X, Season_Dummies], axis = 1)\r\n X.drop('season', axis = 1, inplace = True)\r\n return X\r\nX = SeasonDummies()\r\nX.drop('season_4', axis = 1, inplace = True)\r\n\r\n#Scalling the variables\r\nfrom sklearn.preprocessing import StandardScaler\r\nX_sc = StandardScaler()\r\nX = X_sc.fit_transform(X)\r\n\r\n#Spliting dataset into train and validation set\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train, X_val, Y_train, Y_val = train_test_split(X, Y, train_size = 0.2, random_state = 0)\r\n\r\n#Random Forest Regression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nregressor = RandomForestRegressor(n_estimators = 200)\r\nregressor.fit(X_val, Y_val)\r\n\r\n\r\n#Choosing the features to train\r\nfeatures = pd.DataFrame()\r\nfeatures['feature'] = X.columns\r\nfeatures['importance'] = regressor.feature_importances_\r\nfeatures.sort_values(by = 'importance', ascending = True, inplace = True)\r\nfeatures.set_index('feature', inplace = True)\r\nfeatures.plot(kind = 'barh')\r\n\r\nimport xgboost as xg\r\nXGB = xg.XGBRegressor()\r\nXGB.fit(X_val, Y_val)\r\n\r\n#Grid search -> you have to finished that\r\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\r\nparameters = {'n_estimators':[10, 50, 100],\r\n 'max_features':['sqrt', 'auto', 'log2'],\r\n 'max_depth':[2, 4, 6, 8, 10],\r\n 'min_samples_split':[2,3,5,8],\r\n 'min_samples_leaf':[1,3,5,8]}\r\ncross_validation = StratifiedKFold(n_splits = 5)\r\ngrid_search = GridSearchCV(regressor, scoring = 'neg_mean_squared_log_error', param_grid = parameters, cv = cross_validation, verbose = 1)\r\ngrid_search.fit(X_val, Y_val)\r\nbests = grid_search.best_params_\r\nprint('Best score: {}'.format(grid_search.best_score_))\r\nprint('Best parameters: {}'.format(grid_search.best_params_))\r\n\r\n\r\n#Checking the prediction\r\nBike_pred = XGB.predict(X_train)\r\n\r\nassert len(Y_train) == len(Bike_pred)\r\nterms_to_sum = [(math.log(Bike_pred[i] + 1) - math.log(Y_train[i] + 1)) ** 2.0 for i,pred in enumerate(Bike_pred)]\r\nscore = (sum(terms_to_sum) * (1.0/len(Y_train))) ** 0.5\r\nprint(score)\r\n\r\n\r\n#Making new columns\r\ndef Bike_test_Year():\r\n global Bike_test\r\n Bike_test['year'] = Bike_test['datetime'].map(lambda datetime:datetime.split('-')[0].strip())\r\n Bike_test['year'] = Bike_test.year.map(YearDictionary)\r\n return Bike_test\r\nBike_test = Bike_test_Year()\r\n\r\ndef Bike_test_Month():\r\n global Bike_test\r\n Bike_test['month'] = Bike_test['datetime'].map(lambda datetime:datetime.split('-')[1].split('-')[0].strip())\r\n Bike_test['month'] = Bike_test.month.map(MonthDictionary)\r\n return Bike_test\r\nBike_test = Bike_test_Month()\r\n\r\ndef Bike_test_Hour():\r\n global Bike_test\r\n Bike_test['hour'] = Bike_test['datetime'].map(lambda datetime:datetime.split(' ')[1].split(':')[0].strip())\r\n Bike_test['hour'] = Bike_test.hour.map(HourDictionary)\r\n return Bike_test\r\nBike_test = Bike_test_Hour()\r\n\r\nBike_test.drop(['datetime', 'temp'], axis = 1, inplace = True)\r\n\r\ndef WeatherDummies_test():\r\n global Bike_test\r\n Weather_Dummies_test = pd.get_dummies(Bike_test['weather'], prefix = 'weather')\r\n Bike_test = pd.concat([Bike_test, Weather_Dummies_test], axis = 1)\r\n Bike_test.drop('weather', axis = 1, inplace = True)\r\n return Bike_test\r\nBike_test = WeatherDummies_test()\r\nBike_test.drop('weather_4', axis = 1, inplace = True)\r\n\r\ndef SeasonDummies_test():\r\n global Bike_test\r\n Season_Dummies_test = pd.get_dummies(Bike_test['season'], prefix = 'season')\r\n Bike_test = pd.concat([Bike_test, Season_Dummies_test], axis = 1)\r\n Bike_test.drop('season', axis = 1, inplace = True)\r\n return Bike_test\r\nBike_test = SeasonDummies_test()\r\nBike_test.drop('season_4', axis = 1, inplace = True)\r\n\r\nBike_test.drop(['holiday'], axis = 1, inplace = True)\r\n\r\nBike_test['windspeed'] = abs(Bike_test['windspeed'].max() - Bike_test['windspeed'])\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nBike_test_sc = StandardScaler()\r\nBike_test = Bike_test_sc.fit_transform(X)\r\n\r\n#Random Forest Regression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nRF = RandomForestRegressor(n_estimators = 200)\r\nRF.fit(X, Y)\r\n\r\nimport xgboost as xg\r\nXGB = xg.XGBRegressor()\r\nXGB.fit(X, Y)\r\n\r\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\r\nparameters = {'n_estimators':[ 50, 100, 200],\r\n 'max_features':['sqrt', 'auto', 'log2'],\r\n 'max_depth':[2, 4, 6, 10],\r\n 'min_samples_split':[2,3,5],\r\n 'min_samples_leaf':[1,3,5]}\r\ncross_validation = StratifiedKFold(n_splits = 5)\r\nEvaluation = GridSearchCV(RF, scoring = 'neg_mean_squared_log_error', param_grid = parameters, cv = cross_validation, verbose = 1)\r\nEvaluation.fit(X, Y)\r\nbests = Evaluation.best_params_\r\nprint('Best score: {}'.format(Evaluation.best_score_))\r\nprint('Best parameters: {}'.format(Evaluation.best_params_))\r\n\r\n#Checking the prediction\r\nRF_pred = RF.predict(Bike_test)\r\nXGB_pred = XGB.predict(Bike_test)\r\n\r\n#preparing the final folder\r\ndf_output = pd.DataFrame()\r\naux = pd.read_csv('test.csv')\r\ndf_output['datetime'] = aux['datetime']\r\ndf_output['count'] = XGB.predict(Bike_test)\r\ndf_output[['datetime','count']].to_csv('XGB.csv', index=False)","sub_path":"BikeAllInOne.py","file_name":"BikeAllInOne.py","file_ext":"py","file_size_in_byte":10089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"24203178","text":"class SemanticKernelTagger:\n def __init__(self,\n com_gen=None,\n cat_gens=None,\n top_m=50,\n top_n=1000):\n self.top_m = top_m\n self.top_n = top_n\n self.categories = {}\n self.common = []\n if com_gen:\n self.load_common(com_gen)\n if cat_gens:\n self.load_categories(cat_gens)\n\n def load_common(self, gen):\n self.common = [i for i, _ in gen]\n\n def get_sk(self, gen):\n unigrams = [i for i, _ in gen]\n sk_n_ord = set(unigrams) - set(self.common)\n return [el for el in unigrams if el in sk_n_ord]\n\n def load_categories(self, gens_dict):\n for key in gens_dict.keys():\n self.categories[key] = self.get_sk(gens_dict[key])\n\n def _metric(self, top_input, top_supportive):\n if len(top_input):\n return (1 - (len(set(top_input) - set(top_supportive)) / float(len(top_input))))\n else:\n return -1\n\n def tag(self, gen):\n res = {}\n sk = self.get_sk(gen)\n for cat in self.categories.keys():\n res[cat] = self._metric(sk[:self.top_m], self.categories[cat][:self.top_n])\n return res\n","sub_path":"nltools/ml/classify/sk_tagger.py","file_name":"sk_tagger.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379727299","text":"##################################################################\n# BrundleFuzzClient.py\n# The core (Python) reads the feedback information from\n# the PinTool (C++) from the shared memory\n##################################################################\n\n\nimport sys\nimport os\nimport mmap\nimport subprocess\nfrom array import array\nimport logging\nimport logging.handlers\nfrom datetime import datetime\nfrom ConfigParser import SafeConfigParser\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n# This is necessary because a MutationObject will\n# be unserialized from the server.\n# It needs to exist in this namespace\nfrom helpers.common import MutationObject\n\nfrom helpers.utils import Utils\nfrom helpers.crash_analysis import CrashAnalysis\nfrom helpers.rpc_client import BrundleFuzzRpcClient\nfrom helpers.fileops import FileOperations\nfrom helpers.aesthetics import Aesthetics\n\n# Some nice named constants\nCAUSED_CRASH = 3\n\n\nclass BrundleFuzzClient(object):\n def __init__(self):\n\n self.debug = False\n self.root_dir = os.path.dirname(os.path.abspath(__file__))\n self.mutations_dir = os.path.join(self.root_dir, 'mutations')\n self.hangs_dir = os.path.join(self.root_dir, 'hangs')\n self.cfg = self._initialize_config()\n self.ml = self._initialize_logging()\n self.mo = None\n\n # Shared memory\n self.shm = None\n self.shm_size = 0\n self.bitmap_size = 65536\n\n # PIN command line\n self.cmd_l = []\n\n # Setup helpers\n self.ae = Aesthetics(self)\n self.utils = Utils(self)\n self.fileops = FileOperations(self)\n self.crash_analysis = CrashAnalysis(self)\n self.rpc_client = BrundleFuzzRpcClient(self)\n\n self._initialize_shared_memory()\n self._initialize_pin_cmd()\n\n def _initialize_config(self):\n \"\"\"\n This config will be shared with helper\n modules via the parent attribute\n \"\"\"\n cfg = SafeConfigParser()\n cfg.read('config.ini')\n\n return cfg\n\n def _initialize_logging(self):\n \"\"\"\n Printing to console is dirty\n \"\"\"\n main_logger = logging.getLogger('main')\n\n log_filename = os.path.join('logs', 'log.txt')\n main_logger.setLevel(logging.DEBUG)\n\n # 5 rotating logs of 1 MB each\n handler = logging.handlers.RotatingFileHandler(\n log_filename,\n maxBytes = 1024 * 1024,\n backupCount = 1\n )\n\n main_logger.addHandler(handler)\n\n return main_logger\n\n def _initialize_shared_memory(self):\n \"\"\"\n This is the IPC channel between us (Python)\n and the PinTool (C/C++)\n \"\"\"\n s_uint32 = self.utils.get_size_uint32()\n shm_name = \"Local\\\\NaFlSharedMemory\"\n\n self.shm_size = self.bitmap_size * s_uint32 # architecture dependent :)\n self.shm = mmap.mmap(0,\n self.shm_size,\n shm_name,\n access = mmap.ACCESS_WRITE)\n\n if not self.shm:\n # Oops!\n self.ml.info('[!] Could not create the shared memory region')\n self.ml.info('[!] Aborting...')\n sys.exit(1)\n\n def _initialize_pin_cmd(self):\n \"\"\"\n Initializes fuzzing parameters with\n information stored in a config file\n \"\"\"\n self.cmd_l.append(self.cfg.get('pin_info', 'pin_bat'))\n self.cmd_l.append('-t')\n self.cmd_l.append(self.cfg.get('pin_info', 'pintool'))\n self.cmd_l.append('-timer')\n self.cmd_l.append(self.cfg.get('pin_info', 'timeout'))\n self.cmd_l.append('-module')\n self.cmd_l.append(self.cfg.get('target_info', 'module').lower())\n self.cmd_l.append('--')\n self.cmd_l.append(self.cfg.get('target_info', 'filename'))\n\n # Parse the cmd options\n try:\n _options = self.cfg.get('target_info', 'cmd_options')\n for _cmd in _options.split():\n self.cmd_l.append(_options.split())\n except NoOptionError:\n self.ml.info('[.] No command line options found.')\n\n self.debug = self.cfg.getboolean('runtime', 'debug')\n\n def _run_under_pin(self, input_filename):\n \"\"\"\n Runs the given file under PIN and\n gets the bitmap representing execution\n @returns: current execution bitmap\n \"\"\"\n self.cmd_l.append(input_filename)\n subprocess.call(self.cmd_l, shell = False)\n self.cmd_l.pop() # remove the filename from cmd :)\n\n # The PinTool has written its feedback into\n # the shared memory. Time to read it.\n self.shm.seek(0) # file-like interface\n\n # This coerces somehow the bitmap to an array of ulong's\n curr_bitmap = array('L', self.shm.read(self.shm_size)) # C ulong (4 bytes)\n\n return curr_bitmap\n\n def _fuzzing_loop(self):\n \"\"\"\n Fuzzing Loop.\n This loops (maybe indefinitely) creating several\n fuzzing processes\n \"\"\"\n iteration_nr = 0\n\n while True:\n # subprocess.call() is blocking, exactly what I need :)\n # Execution continues when this subprocess returns, either:\n # * instrumented process exits\n # * instrumented process crashes\n # * timeout expires (implemented in PinTool)\n\n if iteration_nr % 10 == 0:\n self.ae.m_info(\"* Iteration #%d\" % iteration_nr)\n self.ae.m_info(\"* PLACEHOLDER. PERIODIC MAINTENANCE PROCESSES\")\n\n iteration_nr += 1\n continue\n\n # Mutation objects are read from the queue\n smo = self.rpc_client.poll_mutation_queue()\n self.mo = pickle.loads(smo)\n\n if self.mo:\n input_filename = self.mo.filename\n\n data = self.mo.data\n input_path_filename = os.path.join(self.mutations_dir, input_filename)\n with open(input_path_filename, 'wb') as f:\n f.write(data)\n\n # Run with the newly created file unde PIN\n curr_bitmap = self._run_under_pin(input_path_filename)\n\n else:\n self.ae.m_alert(\"Problem getting MutationObject from server\")\n self.ae.m_alert(\"Continuing...\")\n continue\n\n #####################################################\n # Check if this was a crash on client side\n # This way I can analyze it inmediately\n #####################################################\n if curr_bitmap[0] == 0x41414141 \\\n and curr_bitmap[1] == 0x42424242:\n # Restore these first bytes to more appropriate values\n curr_bitmap[0] = 0\n curr_bitmap[1] = 0\n\n self.ml.info('**** CRASH ****' * 4)\n self.ml.info(input_filename)\n\n self.mo.priority == CAUSED_CRASH\n\n # Analyzes the crash (and saves it, if determined interesting)\n # This sets the MutationObject crash_data attribute\n cmd = [self.cfg.get('target_info', 'filename'), input_filename]\n self.crash_analysis.analyze_crash(cmd)\n\n # The bitmap regarding the current execution\n self.mo.arr = curr_bitmap\n\n # Delete the temporary file from disk\n if os.path.exists(input_path_filename):\n os.remove(input_path_filename)\n\n # Information is sent back to the server\n self.rpc_client.send_evaluation(self.mo)\n\n iteration_nr += 1\n\n def run(self):\n \"\"\"\n This prepares the run and starts the fuzzing loop\n \"\"\"\n\n victim_filename = self.cfg.get('target_info', 'filename')\n self.ml.info(\"\")\n self.ml.info(\"=\" * 80)\n self.ml.info(\"Fuzzing initiated from the command line.\")\n self.ml.info(\"Started fuzzing: %s\" % victim_filename)\n self.ml.info(\"Timestamp: %s\" % str(datetime.now()))\n\n try:\n self._fuzzing_loop() # never returns\n\n except KeyboardInterrupt:\n self.ae.m_alert(\"\"\"\n ============================================\n === ===\n === Fuzzing cancelled by user (Ctrl + C) ===\n === Exiting... ===\n === ===\n ============================================\n \"\"\")\n\n self.rpc_client.connection.close()\n sys.exit(1)\n\n\ndef main():\n \"\"\"\n This must be kept to the bare minimum\n \"\"\"\n bf = BrundleFuzzClient()\n bf.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"client_windows/BrundleFuzzClient.py","file_name":"BrundleFuzzClient.py","file_ext":"py","file_size_in_byte":8808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241866686","text":"# FIX THE MANUAL INITIALIZER TO INCLUDE DIFFERENT FAT TYPES\n\n\n\n\nclass Food(object):\n # Not thread-safe, race conditions etc.\n food_id = 0\n def __init__(self, food_name, amount,\n fat, carbs, protein,\n cholesterol, sodium, potassium,\n vitamin_a, vitamin_c, calcium,\n iron, vitamin_d, vitamin_b6,\n vitamin_b12, magnesium,\n calories=None, saturated_fat=None,\n trans_fat=None, other_fat=None,\n fiber=None, sugar=None, other_carb=None):\n ''' Macro nutrients are just their weight.\n Amount is a string, work it out manually for now.\n Micro nutrients (cholesterol to magnesium) are in\n percent of daily amount (for 2000 kcal diet). This\n will be modified by the actual diet thing for non\n 2k kcal diets.\n If calories is not specified, will work it out\n based on math (fats=9cal/g, prot/carb=4cal/g).\n Remaining values are subsets of macro nutrients,\n and do not need to be specified (but help, because\n fuck sugar and saturated fat).\n '''\n Food.food_id += 1\n self.id = Food.food_id\n self.name = food_name\n self.amount = amount\n self.fat, self.carbs = fat, carbs\n self.protein, self.cholesterol = protein, cholesterol\n self.sodium, self.potassium = sodium, potassium\n self.vitamin_a, self.calcium = vitamin_a, calcium\n self.iron, self.vitamin_d = iron, vitamin_d\n self.vitamin_b6, self.vitamin_b12 = vitamin_b6, vitamin_b12\n self.magnesium, self.vitamin_c = magnesium, vitamin_c\n self.calories, self.sat_fat = calories, saturated_fat\n self.trans_fat, self.other_fat = trans_fat, other_fat\n self.fiber, self.sugar = fiber, sugar\n self.other_carb = other_carb\n\n if self.calories == None:\n self.calories = (self.fat * 9) + (self.carbs * 4) +\\\n (self.protein * 4)\n\n\n def __init__(self):\n # For numeric amounts, -1 implies no information provided.\n # Also: What's tidiness. Or planning ahead?\n print(\"Welcome to the food initializer!\")\n print(\"Remember, don't include units unless asked.\")\n Food.food_id += 1\n self.id = Food.food_id\n self.name = raw_input(\"What is the name of this food item?\\n\")\n self.amount = raw_input(\"How much of this food do you have information for? Include units.\\n\")\n self.calories = raw_input(\"How many full Calories does this food have?\\n\" +\\\n \"Enter no value if you would like us to work this out for you.\\n\")\n if len(self.calories) > 0:\n self.calories = eval(self.calories)\n self.fat = eval(raw_input(\"How many grams of fat does this food contain?\\n\"))\n while True:\n self.sat_fat = raw_input(\"Of this fat, how many grams are saturated fat?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.sat_fat) > 0:\n self.sat_fat = eval(self.sat_fat)\n else:\n self.sat_fat = -1\n break\n if self.sat_fat > self.fat:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total fat).\")\n else:\n break\n while True:\n self.polyunsat_fat = raw_input(\"Of this fat, how many grams are polyunsaturated fat?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.polyunsat_fat) > 0:\n self.polyunsat_fat = eval(self.polyunsat_fat)\n else:\n self.polyunsat_fat = -1\n break\n if self.polyunsat_fat > self.fat:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total fat).\\n\")\n else:\n break\n while True:\n self.monounsat_fat = raw_input(\"Of this fat, how many grams are monounsaturated fat?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.monounsat_fat) > 0:\n self.monounsat_fat = eval(self.monounsat_fat)\n else:\n self.monounsat_fat = -1\n break\n if self.monounsat_fat > self.fat:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total fat).\")\n else:\n break\n while True:\n self.trans_fat = raw_input(\"Of this fat, how many grams are trans fat?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.trans_fat) > 0:\n self.trans_fat = eval(self.trans_fat)\n else:\n self.trans_fat = -1\n break\n if self.trans_fat > self.fat:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total fat).\")\n else:\n break\n self.cholesterol = raw_input(\"How much cholesterol does this food have?\\nAnswer either in mg or %DV for an average 2000Cal diet (include units!).\\n\" +\\\n \"You may skip this information.\\n\")\n if self.cholesterol.endswith(\"mg\"):\n self.cholesterol = eval(self.cholesterol.split(\"mg\")[0])\n self.cholesterol_percent = self.cholesterol / 300.0\n elif \"%\" in self.cholesterol:\n self.cholesterol_percent = eval(self.cholesterol.split(\"%\")[0]) / 100.0\n self.cholesterol = self.cholesterol_percent * 300.0\n else: # no value entered; skipped.\n self.cholesterol = -1 \n self.sodium = raw_input(\"How much sodium does this food have?\\nAnswer either in mg or %DV for an average 2000Cal diet (include units!).\\n\" +\\\n \"You may skip this information.\\n\")\n if self.sodium.endswith(\"mg\"):\n self.sodium = eval(self.sodium.split(\"mg\")[0])\n self.sodium_percent = self.sodium / 2400.0\n elif \"%\" in self.sodium:\n self.sodium_percent = eval(self.sodium.split(\"%\")[0]) / 100.0\n self.sodium = self.sodium_percent * 2400.0\n else:\n self.sodium = -1\n self.potassium = raw_input(\"How much potassium does this food have?\\nAnswer either in mg or %DV for an average 2000Cal diet (include units!).\\n\" +\\\n \"You may skip this information.\\n\")\n if self.potassium.endswith(\"mg\"):\n self.potassium = eval(self.potassium.split(\"mg\")[0])\n self.potassium_percent = self.potassium / 3500.0\n elif \"%\" in self.potassium:\n self.potassium_percent = eval(self.potassium.split(\"%\")[0]) / 100.0\n self.potassium = self.potassium_percent * 300.0\n else:\n self.potassium = -1\n self.carbs = eval(raw_input(\"How many grams of carbohydrates does this food contain?\\n\"))\n while True:\n self.fiber = raw_input(\"Of these carbs, how many grams are dietary fiber?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.fiber) > 0:\n self.fiber = eval(self.fiber)\n else:\n self.fiber = -1\n break\n if self.fiber > self.carbs:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total carbs).\")\n else:\n break\n while True:\n self.sugar = raw_input(\"Of these carbs, how many grams are sugar?\\n\" +\\\n \"You may skip this information.\\n\")\n if len(self.sugar) > 0:\n self.sugar = eval(self.sugar)\n else:\n self.sugar = -1\n break\n if self.sugar > self.carbs:\n print(\"I think you got your numbers mixed up.'n\" +\\\n \"Restart the program or reenter the amount (cannot exceed total carbs).\\n\")\n else:\n break\n self.protein = eval(raw_input(\"How many grams of protein are in this food?\\n\"))\n self.vitA = eval(raw_input(\"What %DV of vitamin A is present?\\n\"))\n self.vitC = eval(raw_input(\"What %DV of vitamin C is present?\\n\"))\n self.calcium = eval(raw_input(\"What %DV of calcium is present?\\n\"))\n self.iron = eval(raw_input(\"What %DV of iron is present?\\n\"))\n self.vitD = eval(raw_input(\"What %DV of vitamin D is present?\\n\"))\n self.vitB6 = eval(raw_input(\"What %DV of vitamin B-6 is present?\\n\"))\n self.vitB12 = eval(raw_input(\"What %DV of vitamin B-12 is present?\\n\"))\n self.magnesium = eval(raw_input(\"What %DV of magnesium is present?\\n\"))\n \n\n \n \n\n\n\n\n\n\n\n\n\n \n","sub_path":"food_class.py","file_name":"food_class.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554367577","text":"import logging\nfrom bs4 import BeautifulSoup\n\n\ndef response_code_check(code): # identifies if the response code is a Request redirect or error and displays it\n if 300 < code < 399:\n print(\"Request was redirected. Error code: \" + code)\n if 400 < code < 599:\n print(\"Request had an error. Error code: \" + code)\n\n\nasync def chapter_updates(session, series_title, chapter_str, url):\n async with session.get(url) as resp:\n print(resp.status)\n page_soup = BeautifulSoup(await resp.read(), 'html.parser')\n chapter = page_soup.find('div', class_='element')\n total_chapters = len(page_soup.find_all('div', class_='element'))\n print(\"Total Chapters: \" + str(total_chapters))\n chapter_exists: bool = False\n chapters = []\n field_values = []\n for count in range(0, total_chapters): # if the read chapter is count = 6, then it won't be added to the list\n if chapter.find('div', class_='title').text == chapter_str:\n chapter_exists = True\n if count > 6:\n for cnt in range(5, 2, -1):\n chapter = chapter.previous_sibling\n chapters[cnt] = chapter.find('div', class_='title').text\n field_values[cnt] = f\"[Read here]({chapter.find('a')['href']}) | \" \\\n f\"{chapter.find('div', class_='meta_r').text.split(',')[1].replace(' ', '')}\"\n print(chapters)\n break\n if count < 6: # stop appending after the 6th element\n chapters.append(chapter.find('div', class_='title').text)\n field_values.append(f\"[Read here]({chapter.find('a')['href']}) | \"\n f\"{chapter.find('div', class_='meta_r').text.split(',')[1].replace(' ', '')}\")\n earliest_chapter = chapter.find('div', class_='title').text\n chapter = chapter.next_sibling\n \"\"\"\n 1. Iterate down list until item is found, when found and count < 6, break loop\n 2. There should be no more than 6 elements in the array, after the 6th element is added, stop appending \n values into the array until the chapter is found but keep iterating through elements and tracking the \n earliest_chapter value\n 3. If found and count > 6, roll back the chapter to the previous 3 before the current one and put those \n values into the array\n Example: Count of 14 items, array elements 1 to 3: 1st 2nd & 3rd, array elements 4 to 6: 12th, 13th & 14th \n \"\"\"\n if chapter_exists:\n print(\"Chapter exists\")\n if count > 0:\n if count == 1:\n description = f\"1 update since your last read chapter, {chapter_str}.\"\n elif count <= 6:\n description = f\"{count} updates since your last read chapter, {chapter_str}.\"\n else:\n description = f\"{count} updates since your last read chapter, {chapter_str}.\\nDisplaying the 3 \" \\\n f\"chapters closest to your last read chapter and the 3 newest chapters found.\"\n print(description + \" returned status: Update Found\")\n manga = {'title': series_title, 'source_name': \"Kirei Cake\", 'source_link': url, 'chapters': chapters,\n 'value': field_values, 'description': description,\n 'status': \"Update found\"}\n try:\n manga['thumbnail'] = page_soup.find('div', class_='thumbnail').find('img')['src']\n except AttributeError:\n print(\"Manga thumbnail cannot be found.\")\n else:\n print(\"User is up to date with the latest released chapter of \" + series_title + \", \"\n + chapter_str + \" returned status: Up to date\")\n manga = {'status': \"Up to date\"}\n else:\n print(f\"{series_title} {chapter_str} cannot be found on the website, returned status: Failure.\")\n manga = {'title': series_title, 'source_name': \"Kirei Cake\", 'source_link': url,\n 'chapters': [], 'value': [], 'status': \"Failure\",\n 'description': f\"The chapter listed on your mangalist, {chapter_str}, cannot be found on the \"\n f\"website.\\nChapters found: {earliest_chapter} - \"\n f\"{page_soup.find('div', class_='element').find('div', class_='title').text}. \"\n f\"\\nTotal chapters: {str(total_chapters)}\"}\n try:\n manga['thumbnail'] = page_soup.find('div', class_='thumbnail').find('img')['src']\n except AttributeError:\n print(\"Manga thumbnail cannot be found.\")\n return manga\n # All conditions result in a return of the manga dict with a status variable:\n # \"Update found\", \"Up to date\" or \"Failure\"\n\n\nasync def manga_details(session, url):\n # return an object that contains: Manga Description, Author Name, Number of Chapters, Latest Chapter, Status:ongoing\n async with session.get(url) as resp:\n print(resp.status)\n if resp.status != 200:\n return {'request_status': \"Failure\"}\n page_soup = BeautifulSoup(await resp.read(), 'html.parser')\n try:\n title = page_soup.find('div', class_='info').find('li').text.replace(\"Title: \", \"\")\n except AttributeError:\n return {\"request_status\": \"Unable to gather information from link\"}\n else:\n description = page_soup.find('div', class_='info').find('li').find_next_sibling('li').text\n if description == \"\":\n description = \"Not found.\"\n manga = {\n 'title': title,\n 'description': description,\n 'chapters': len(page_soup.find_all('div', class_='element')),\n 'request_status': \"Success\"\n }\n try:\n manga['cover'] = page_soup.find('div', class_='thumbnail').find('img')['src']\n except AttributeError:\n logging.info(\"Manga thumbnail cannot be found.\")\n return manga\n","sub_path":"modules/KireiCakeModule.py","file_name":"KireiCakeModule.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"382004167","text":"import random\n\n\nclass Markov:\n def __init__(self, file):\n self.cache = {}\n self.START = []\n self.END = []\n self.words = self.file_to_word(file)\n self.word_size = len(self.words)\n self.database()\n \n def file_to_word(self, file):\n file.seek(0)\n data = file.read()\n return data.split()\n\n def triples(self):\n if len(self.words) < 3: return\n for i in range(len(self.words)-2):\n yield (self.words[i], self.words[i+1], self.words[i+2])\n\n def database(self):\n for w1, w2, w3 in self.triples():\n if w1[0].lower() != w1[0]: # 简单判断首字母大写即为START\n self.START.append((w1, w2))\n key = (w1, w2)\n if key in self.cache:\n self.cache[key].append(w3)\n else:\n self.cache[key] = [w3]\n\n def generate_text(self, sentence_len):\n # 使用START和END使句子更加优秀\n w1, w2 = random.choice(self.START)\n gen_words = []\n for i in range(sentence_len):\n gen_words.append(w1)\n w1, w2 = w2, random.choice(self.cache[(w1, w2)])\n gen_words.append(w2)\n return ' '.join(gen_words)\n\n\nif __name__ == \"__main__\":\n fileName = input(\"File name > \")\n with open(fileName) as f:\n markov = Markov(f)\n while True:\n action = input(\"The length of sentence > \")\n if action.isdigit():\n sentence = int(action)\n print(markov.generate_text(sentence_len=sentence))\n else:\n break\n","sub_path":"Data_Analysis/Markov/markovgen.py","file_name":"markovgen.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212331000","text":"import pprint\r\nimport requests\r\nimport lxml.html\r\nimport lxml.cssselect\r\nimport re\r\nimport my_utils\r\n \r\n\r\ndef get_pagination_type(doc_html, doc_obj):\r\n paginationType = 0\r\n \r\n paginationExist = doc_obj.xpath('//div[contains(concat(\" \", normalize-space(@class), \" \"), \" pagination \")]')\r\n if len(paginationExist):\r\n print('_pexist')\r\n paginationMoreExist = paginationExist[0].xpath('ul[contains(concat(\" \", normalize-space(@class), \" \"), \" forward \")]/li[@class=\"last\"]')\r\n print('_pmexist')\r\n if len(paginationMoreExist):\r\n paginationType = 'full_pagination'\r\n else:\r\n paginationType = 'little_pagination'\r\n else:\r\n paginationType = 'no_pagination'\r\n\r\n return paginationType\r\n \r\n\r\ndef get_pages_links_little_pagination(doc_html, url, doc_obj, first_page_url):\r\n #print('little')\r\n search_links_list = [first_page_url]\r\n\r\n pagination = doc_obj.xpath('//div[contains(concat(\" \", normalize-space(@class), \" \"), \" pagination \")]')\r\n links = pagination[0].xpath('ul[@class=\"ipsList_inline left pages\"]/li[@class=\"page\"]/a')\r\n \r\n for link in links:\r\n href = link.get('href')\r\n search_links_list.append(href)\r\n \r\n return search_links_list\r\n\r\n\r\ndef get_pages_links_full_pagination(doc_html, url, doc_obj, first_page_url):\r\n #print('full')\r\n search_links_list = [first_page_url]\r\n\r\n pagination = doc_obj.xpath('//div[contains(concat(\" \", normalize-space(@class), \" \"), \" pagination \")]')\r\n links = pagination[0].xpath('ul[@class=\"ipsList_inline forward left\"]/li[@class=\"last\"]/a')\r\n last_link = links[0].get('href')\r\n search = re.search('(.*?st=)(\\d+)', str(last_link))\r\n base_path = search.group(1)\r\n #print(base_path)\r\n last_page_index = search.group(2)\r\n last_page_index = int(last_page_index)\r\n #print(last_page_index)\r\n\r\n for index in range(25, last_page_index + 25, 25):\r\n index = str(index)\r\n link = base_path + index\r\n search_links_list.append(link)\r\n \r\n return search_links_list\r\n\r\n\r\ndef get_dict_data(search_links_list):\r\n data = {}\r\n data_dict = {}\r\n \r\n for url in search_links_list:\r\n req = my_utils.get_doc(url)\r\n html = req.text\r\n doc = lxml.html.document_fromstring(html)\r\n #print(html)\r\n table_tr = doc.xpath('//table[@id=\"forum_table\"]/tr')\r\n \r\n for item in table_tr:\r\n td = item.xpath('td')\r\n if len(td):\r\n td = td[1]\r\n \r\n title = td.xpath('h4/a/text()')\r\n if len(title):\r\n title = title[0].strip()\r\n\r\n section = td.xpath('span[contains(concat(\" \", normalize-space(@class), \" \"), \" blend_links \")]/a/text()')\r\n if len(section):\r\n section = section[0].strip()\r\n \r\n date = td.xpath('span[contains(concat(\" \", normalize-space(@class), \" \"), \" toggle_notify_off \")]/node()')\r\n if len(date):\r\n date = date[4].replace(',', '').strip()\r\n \r\n #print(title, section, date)\r\n data[title] = [section, date]\r\n\r\n data_dict.update(data)\r\n\r\n return data_dict\r\n\r\n\r\ndef construct_xml(data_dict):\r\n itemsQuantity = 0\r\n parent = lxml.etree.Element('data')\r\n for key in data_dict:\r\n childItem = lxml.etree.Element('item')\r\n childMessage = lxml.etree.Element('message')\r\n childMessage.text = key \r\n childSection = lxml.etree.Element('section')\r\n childSection.text = data_dict[key][0]\r\n childDate = lxml.etree.Element('date')\r\n childDate.text = data_dict[key][1]\r\n childItem.append(childMessage)\r\n childItem.append(childSection)\r\n childItem.append(childDate)\r\n parent.append(childItem)\r\n itemsQuantity =itemsQuantity + 1\r\n\r\n return parent, itemsQuantity\r\n\r\n\r\ndef record_xml_to_file(xml, fileName='xml.xml'):\r\n xmlPretty = lxml.etree.tounicode(xml, pretty_print=True)\r\n print(xmlPretty, 'quantity items: ', itemsQuantity)\r\n \r\n try:\r\n with open(fileName, \"w\") as file:\r\n file.write(xmlPretty)\r\n except OSError as exc:\r\n print('Error record. ', exc)\r\n else:\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n url = 'http://forum.saransk.ru/index.php?app=core&module=search§ion=search&do=search&fromsearch=1'\r\n \r\n post = {\r\n 'search_app':'forums',\r\n 'search_term':'',\r\n 'search_app':'forums',\r\n 'search_content':'titles',\r\n 'search_tags':'',\r\n 'search_author':'sergey kalinin',\r\n 'search_date_start':'',\r\n 'search_date_end':'',\r\n 'search_app_filters[core][sortKey]':'date',\r\n 'search_app_filters[core][sortDir]':'0',\r\n 'search_app_filters[members][searchInKey]':'members',\r\n 'search_app_filters[members][members][sortKey]':'date',\r\n 'search_app_filters[members][members][sortDir]':'0',\r\n 'search_app_filters[members][comments][sortKey]':'date',\r\n 'search_app_filters[members][comments][sortDir]':'0',\r\n 'search_app_filters[forums][noPreview]':'1',\r\n 'search_app_filters[forums][pCount]':'',\r\n 'search_app_filters[forums][pViews]':'',\r\n 'search_app_filters[forums][sortKey]':'date',\r\n 'search_app_filters[forums][sortDir]':'0',\r\n 'search_app_filters[calendar][sortKey]':'date',\r\n 'search_app_filters[calendar][sortDir]':'0',\r\n 'search_app_filters[blog][searchInKey]':'entries',\r\n 'search_app_filters[blog][entries][sortKey]':'date',\r\n 'search_app_filters[blog][entries][sortDir]':'0',\r\n 'search_app_filters[blog][comments][sortKey]':'date',\r\n 'search_app_filters[blog][comments][sortDir]':'0',\r\n 'search_app_filters[gallery][searchInKey]':'images',\r\n 'search_app_filters[gallery][images][sortKey]':'date',\r\n 'search_app_filters[gallery][images][sortDir]':'0',\r\n 'search_app_filters[gallery][comments][sortKey]':'date',\r\n 'search_app_filters[gallery][comments][sortDir]':'0',\r\n 'search_app_filters[gallery][albums][sortKey]':'date',\r\n 'search_app_filters[gallery][albums][sortDir]':'0',\r\n 'submit':'Найти'\r\n }\r\n\r\n headers = {\r\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\r\n 'Accept-Encoding':'gzip,deflate,sdch',\r\n 'Accept-Language':'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4',\r\n 'Cache-Control':'max-age=0',\r\n 'Connection':'keep-alive',\r\n 'Content-Length':'1623',\r\n 'Content-Type':'application/x-www-form-urlencoded',\r\n 'Cookie':'invbf_emoticon_sidebar=1; invbf_mobileApp=false; invbf_mobileBrowser=0; invbf_coppa=0; invbf_toggleCats=%2C77%2C; invbf_blog_view_type=all; invbf_mqtids=%2C; invbf_member_id=2018; invbf_pass_hash=7cd80bef9e60de34ba7b128330755ff6; invbf_sfc=1394124264; invbf_modtids=%2C; invbf_session_id=84cef0d43096c955fe8a612ea095cf59; invbf_uagent_bypass=1; invbf_rteStatus=rte',\r\n 'Host':'forum.saransk.ru',\r\n 'Origin:http':'//forum.saransk.ru',\r\n 'Referer:http':'//forum.saransk.ru/index.php?app=core&module=search&search_in=forums',\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'\r\n }\r\n\r\n first_page_url = 'http://forum.saransk.ru/index.php?app=core&module=search&do=search&andor_type=&sid=fef8dd7aa3cc1759da2e28ea3100281d&search_author=' + post[\"search_author\"] + '&search_app_filters[forums][sortKey]=date&search_content=' + post[\"search_content\"] + '&search_app_filters[forums][sortDir]=0&search_app_filters[forums][pCount]=&search_app_filters[forums][pViews]=&search_app_filters[forums][noPreview]=1&search_app_filters[forums][sortKey]=date&search_term=&search_app=forums&st=0'\r\n \r\n #получаем главную страницу с результатами поиска\r\n req = my_utils.get_doc(url, post=post, headers=headers)\r\n doc_html = req.text\r\n #print(doc_html)\r\n doc_obj = lxml.html.document_fromstring(doc_html)\r\n\r\n #получаем тип пагинации\r\n pagination_type = get_pagination_type(doc_html, doc_obj)\r\n #print(pagination_type)\r\n\r\n if pagination_type == 'no_pagination':\r\n search_links_list = [first_page_url]\r\n elif pagination_type == 'little_pagination':\r\n search_links_list = get_pages_links_little_pagination(doc_html=doc_html, url=url, doc_obj=doc_obj, first_page_url=first_page_url)\r\n elif pagination_type == 'full_pagination':\r\n search_links_list = get_pages_links_full_pagination(doc_html=doc_html, url=url, doc_obj=doc_obj, first_page_url=first_page_url)\r\n\r\n #pprint.pprint(search_links_list)\r\n\r\n #парсим данные\r\n data_dict = get_dict_data(search_links_list)\r\n #pprint.pprint(data_dict)\r\n\r\n #строим xml-дерево\r\n xml, itemsQuantity = construct_xml(data_dict)\r\n #print(itemsQuantity, ' items') \r\n\r\n #записываем xml в файл\r\n if(record_xml_to_file(xml)):\r\n print('record ok')\r\n \r\n print('--------------------full end-------------------')\r\n","sub_path":"python/EXAMPLES/projects/parse_html/10_forum_themes_new_approach/index - копия.py","file_name":"index - копия.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"191598581","text":"import os\nimport time\n\nfrom pycompss.api.api import compss_barrier, compss_wait_on\nfrom pycompss.api.constraint import constraint\nfrom pycompss.api.task import task\n\n\ndef measure(name, dataset_name, func, *args, **kwargs):\n print(\"==== STARTING ====\", name, dataset_name)\n compss_barrier()\n s_time = time.time()\n func(*args, **kwargs)\n compss_barrier()\n print(\"==== TIME ==== \", name, dataset_name, time.time() - s_time)\n print(\"In worker_working_dir: \", compss_wait_on(get_worker_working_dir()))\n\n\n@constraint(computing_units=\"${ComputingUnits}\")\n@task(returns=1)\ndef get_worker_working_dir():\n return os.getcwd()\n","sub_path":"tests/performance/mn4/scripts/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"77348013","text":"from flask import Flask, render_template\nimport requests\nimport json\nfrom secrets import api_key\n\nparams={'api-key': api_key}\n\n\napp = Flask(__name__) # app is named from the name of file\n\n@app.route('/')\ndef index():\n return '

Welcome!

'\n\n\n\n#nm = input(\"what is the name \")\n@app.route('/user/' ) #decorating function\ndef user_name(nm):\n baseurl ='https://api.nytimes.com/svc/topstories/v2/'\n global params\n url = baseurl + 'technology' +'.json'\n data =requests.get(url, params).json()\n count = 0\n data_list = []\n\n for i in data[\"results\"]:\n count += 1\n data_list.append(str(count) + \". \" + str(i[\"title\"]) + \" (\" + str(i[\"url\"]) + \") \")\n #print(count)\n if (count == 5):\n break\n return render_template('user.html', name=nm, my_list=data_list)\n\n\nif __name__ == '__main__':\n print('starting Flask app', app.name)\n app.run(debug=True)\n","sub_path":"hwk_11_ve/top_headlines.py","file_name":"top_headlines.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"54317270","text":"from django.template.loader import render_to_string\nfrom django.views.decorators.csrf import csrf_exempt\nfrom news.models import *\nfrom django.http import HttpResponse\nimport json as simplejson\nfrom html import unescape\nfrom knowledge.models import *\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n@csrf_exempt\ndef filter_resources(request):\n\tbody = simplejson.loads(request.body)\n\tresources = get_resources(body)\n\n\tmimetype = 'application/json'\n\t\n\thtml = render_to_string(\"knowledge/resource_list.html\", {'resources' : resources})\n\tres = {'html' : html}\n\treturn HttpResponse( simplejson.dumps(res), mimetype)\n\n@csrf_exempt\ndef update_pagination(request):\n\tbody = simplejson.loads(request.body)\n\tpages = get_pagination(body)\n\tmimetype = 'application/json'\n\thtml = render_to_string(\"includes/pagination.html\", {'subpages' : pages})\n\tres = {'html' : html}\n\treturn HttpResponse( simplejson.dumps(res), mimetype)\n\n\ndef get_pagination(body):\n\tdecoded_services = get_services(body)\n\n\turl = \"/home\"+body[\"page_url\"]\n\n\tpage = KnowledgePage.objects.all().filter(url_path=url).all()[0]\n\tresources = Resource.objects.all().filter(service__title__in=decoded_services).all().order_by('-date_published')\n\tpaginator = Paginator(resources, 12)\n\n\ttry:\n\t\tpage_number = body[\"page_number\"]\n\texcept:\n\t\tpage_number = 1\n\n\n\ttry:\n\t\tpages = paginator.page(page_number)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\treturn pages\n\n\ndef get_resources(body):\n\tdecoded_services = get_services(body)\n\ttry:\n\t\tpage_number = int(body[\"page_number\"])\n\texcept:\n\t\tpage_number = 1\n\n\tresources = Resource.objects.all().filter(service__title__in=decoded_services).all().order_by('-date_published')\n\tmin_len_resources = min(12*(page_number),len(resources))\n\tprint(12*(page_number-1))\n\tprint(min_len_resources)\n\tprint(resources[12:17])\n\tresources_live = resources[12*(page_number-1):min_len_resources]\n\tprint(resources_live)\n\treturn resources_live\n\n\ndef get_services(body):\n\tservices = body[\"services\"]\n\tdecoded_services = []\n\tfor service in services:\n\t\tdecoded_services.append(unescape(service))\n\n\treturn decoded_services","sub_path":"except/knowledge/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"191453619","text":"import os\nimport tornado.ioloop\nimport tornado.websocket\nimport tornado.web\nimport tornado.template\nimport json\nimport jsonpickle\n\nhostDir = os.path.dirname(__file__)\n\nusers = [] # Array for all of our users, this is a small project, but to scale it should probably just send this data to a database\n\nclass status(object):\n\tintent = \"\"\n\tstatus = \"\"\n\nclass chatter(websocket.WebSocketHandler):\n\tdef open(self):\n\t\t# User Connects\n\t\tmessage = status()\n\t\tmessage.intent = \"new_user\"\n\t\tmessage.status = self\n\t\toutboud = jsonpickle.encode(message, unpicklable=False)\n\t\tfor user in users:\n\t\t\tuser.write_message(outbound)\n\t\tusers.append(self)\n\n\tdef on_message(self, message):\n\t\t# A user sends a message to the server\n\t\trecieved = jsonpickle.decode(message)\n\t\tmessage = status()\n\t\tif recieved[u'intent'] is \"user_name\":\n\t\t\tname = recieved[u'status']\n\t\t\tself.user_name = name\n\t\t\tmessage.intent = \"name_change\"\n\t\t\tmessage.status = name\n\t\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\t\tfor user in users:\n\t\t\t\tif user is not self:\n\t\t\t\t\tuser.write_message(outbound)\n\n\t\tif recieved[u'intent'] is \"status_change\":\n\t\t\tpresent_state = recieved[u'status']\n\t\t\tmessage.intent = \"status_change\"\n\t\t\tmessage.status = {}\n\t\t\tmessage.status.user_name = self.user_name\n\t\t\tmessage.status.state = present_state\n\t\t\toutboud = jsonpickle.encode(message, unpicklable=False)\n\t\t\tfor user in users:\n\t\t\t\tif user is not self:\n\t\t\t\t\tuser.write_message(outbound)\n\t\t\n\t\tif recieved[u'intent'] is \"public_message\":\n\t\t\tcontent = recieved[u'status']\n\t\t\tmessage.intent = \"public_message\"\n\t\t\tmessage.status = content\n\t\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\t\tfor user in users:\n\t\t\t\tif user is not self:\n\t\t\t\t\tuser.write_message(outbound)\n\n\t\tif recieved[u'intent'] is \"private_message\":\n\t\t\tcontent = recieved[u'status']\n\t\t\ttarget = content.target\n\t\t\tmsg = content.msg\n\t\t\tmessage.intent = \"private_message\"\n\t\t\tmessage.status = {}\n\t\t\tmessage.status.sender = self.user_name\n\t\t\tmesssage.status.msg = msg\n\t\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\t\ttarget.write_message(outbound)\n\n\t\tif recieved[u'intent'] is \"bump\":\n\t\t\ttarget = received[u'status']\n\t\t\tmessage.intent = \"bump\"\n\t\t\tmessage.status = self\n\t\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\t\ttarget.write_message(outbound)\n\n\t\tif recieved[u'intent'] is \"admin-all\":\n\t\t\tmessage.intent = \"admin-all\"\n\t\t\tmessage.status = recieved[u'status']\n\t\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\t\tfor user in users:\n\t\t\t\tif user is not self:\n\t\t\t\t\tuser.write_message(outbound)\n\n\tdef on_close(self):\n\t\t# A user looses connection to the server\n\t\tusers.remove(self)\n\t\tmessage = status()\n\t\tmesssage.intent = \"user_disconnect\"\n\t\tmesssage.status = self\n\t\toutbound = jsonpickle.encode(message, unpicklable=False)\n\t\tfor user in users:\n\t\t\tuser.write_message(outbound)\n\nclass index(tornado.web.RequestHandler):\n\tdef get(self):\n\t\tloader = template.Loader(hostDir+'/res/')\n\t\tloader.load('index.html')\n\t\t# Load the index page from a template file\n\napplication = tornado.web.Application([\n\t(r\"/\", index),\n]);\n\nif __name__ == \"__main__\":\n\tapplication.listen(8080)\n\ttornado.ioloop.IOLoop.instance().start()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"635128435","text":"#!/usr/bin/env python2\n# Copyright 2018 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n'''Control file for the following tests\n\n rb_protection.py\n'''\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\n\ndef main(argv):\n if len(argv) > 0:\n sys.exit('Test takes no args!')\n iterations = 1\n output_to_stdout = ' 2>&1 | tee '\n python_prefix = 'python '\n test_list = ['rb_protection']\n\n for test in test_list:\n logs_dir = 'logs/' + test\n if os.path.exists(logs_dir):\n shutil.rmtree(logs_dir)\n os.makedirs(logs_dir)\n for i in range(iterations):\n iteration_num = i + 1\n print('==========================================================')\n print('TEST NAME: ' + test)\n print('ITERATION ' + str(iteration_num) + ' OF ' +\n str(iterations))\n print('==========================================================')\n cmd = '{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(python_prefix, test,\n '.py', output_to_stdout,\n logs_dir, '/', test,\n iteration_num, '.log')\n os.system(cmd)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"hammerd/hammertests/hammertests_control_rb.py","file_name":"hammertests_control_rb.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"371716846","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport calendar\nimport plotly.graph_objects as go\n# from jupyter_dash import JupyterDash\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport pandas as pd\nimport numpy as np \nimport dash_table\ndegree_sign = u\"\\N{DEGREE SIGN}\"\n\n\nmapbox_access_token = 'pk.eyJ1IjoidXJ2aTk3IiwiYSI6ImNrbmUzMG1hcDBhNjIydnFxdXZjbzNhZGoifQ.zKEd5FBU5ajxGXdLYe0erw'\n#Reading the csv file\ndata = pd.read_csv(\"withmonth.csv\")\ndata['Date'] = pd.to_datetime(data['Date'])\n\ndf = data.copy()\nactvy_options = data[\"Activity\"].unique()\nday_options = data[\"Day\"].unique()\ndf['text'] = \"Location: \" + df['Location'] + \", \" + df['Province'] + \" \\nActivity: \" + df[\"Activity\"] + \",\" +\" \\n Weather: \" + df[\"Temp\"].astype(str) + \" \"+ degree_sign + \"C\"\n\nday_df = data.groupby([\"Day\"],as_index=False).sum().sort_values(by=\"Day\")\nyear_df = data.groupby([\"Year\"],as_index=False).sum()\nmonth_df = data.groupby([\"Month\"],as_index=False).sum()\n\nbar_df = data.groupby([\"Activity\"],as_index=False).sum()\n\nday_act_df = data.groupby([\"Activity\",\"Day\"],as_index=False).sum()\nyr_act_df = data.groupby([\"Activity\",\"Year\"],as_index=False).sum()\nmonth_act_df = data.groupby([\"Activity\",\"Month\"],as_index=False).sum()\n\n\n# the style arguments for the sidebar.\nSIDEBAR_STYLE = {\n 'position': 'fixed',\n 'top': 0,\n 'left': 0,\n 'bottom': 0,\n 'width': '20%',\n 'padding': '20px 10px',\n 'background-color': '#f8f9fa'\n}\n\n# the style arguments for the main content page.\nCONTENT_STYLE = {\n 'margin-left': '20%',\n 'margin-right': '5%',\n 'padding': '20px 10p'\n}\n\nTEXT_STYLE = {\n 'textAlign': 'center',\n 'color': '#191970'\n}\n\n\ncontrols = dbc.FormGroup(\n [\n html.P('Activities', style={\n 'textAlign': 'center'\n }),\n \n dcc.Checklist(id='the_activity',\n options=[{'label':str(b),'value':b} for b in sorted(df['Activity'].unique())],\n value=[b for b in sorted(df['Activity'].unique())], labelStyle = {'display': 'block', 'cursor': 'pointer', 'margin-left':'20px'}),\n \n html.Br(),\n html.P('Year Slider', style={\n 'textAlign': 'center'\n }),\n dcc.Slider(\n id=\"the_year\",\n min = 2009,\n max =2020,\n value = 2014,\n marks = {\n 2009: '2009',\n 2010: '2010',\n 2011: '2011',\n 2012: '2012',\n 2013: '2013',\n 2014: '2014',\n 2015: '2015',\n 2016: '2016',\n 2017: '2017',\n 2018: '2018',\n 2019: '2019',\n 2020: '2020',} \n )\n ]\n)\n\nsidebar = html.Div(\n [\n html.H2('Parameters', style=TEXT_STYLE),\n html.Hr(),\n controls\n ],\n style=SIDEBAR_STYLE,\n)\n\n\n\ncontent_second_row = dbc.Row(\n [\n dbc.Col(\n dcc.Graph(id='the_map', config = {\"displayModeBar\": False}), md=11\n )\n ]\n)\n\ncontent_third_row = dbc.Row(\n [\n dbc.Col(\n dcc.Graph(id='incidents-bargraph', config = {\"displayModeBar\": False}), width=5,\n ),\n dbc.Col([\n dcc.Dropdown(\n id=\"DayorYear\",\n options=[{'label': 'Day', 'value': 'Day'}, {'label': 'Year','value': 'Year'}],\n value='Year'),\n dcc.Graph(id='incidents-bargraph-days', config = {\"displayModeBar\": False})\n ], width = 5)\n ]\n)\n\n\nfig_wea = go.Figure(data=go.Scatter(x=data[\"Activity\"], y=data[\"Temp\"], mode='markers',marker_color = \"#118ab2\"))\n\nfig_wea.update_layout(title= {\"text\": 'Weather during incidents for each activity',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Activity',\n yaxis_title='Temperature in Celius')\n\nfig_date = go.Figure()\n\nfig_date.add_trace(go.Scatter(x=data[\"Date\"], y=data[\"Fatality\"], name=\"Fatality\",\n line=dict(color='#ef476f', width=4)))\nfig_date.add_trace(go.Scatter(x=data[\"Date\"], y=data[\"Injury\"], name =\"Injury\",\n line=dict(color='#073b4c', width=4)))\nfig_date.update_layout(title= {\"text\": 'ALL counts of Fatalities and Injury over the years (Hover for Individual Date)',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Dates',\n yaxis_title='Count',)\nfig_date.update_xaxes(dtick = \"M5\", tickformat=\"%d %b %Y\")\n\n\n\n\n\n\ncontent_fourth_row = dbc.Row(\n [\n dbc.Col(\n dcc.Graph(id='incidents-line-graph', config = {\"displayModeBar\": False}), md=6\n ),\n dbc.Col(\n dcc.Graph(id=\"scatter-plot\",figure = fig_wea, config = {\"displayModeBar\": False}), md=6\n ),\n ]\n)\n\n\ncontent_fifth_row = dbc.Row(\n [\n dbc.Col(\n dcc.Graph(id='incidents-dateline-graph', figure = fig_date, config = {\"displayModeBar\": False}), md=12\n )\n ]\n)\n\ncontent = html.Div(\n [\n html.H2('Incidents Due To Avalanche', style=TEXT_STYLE),\n html.Hr(),\n\n content_second_row,\n html.Hr(),\n\n\n # dcc.Dropdown(\n # id=\"Activity\",\n # options=[{'label': 'All Activities', 'value': 'All Activities'}, \n # {'label': 'Backcountry Skiing','value': 'Backcountry Skiing'},\n # {'label': 'Heliskiing','value': 'Heliskiing'},\n # {'label': 'Snowboarding','value': 'Snowboarding'},\n # {'label': 'Snowmobiling','value': 'Snowmobiling'},\n # {'label': 'Snow Biking','value': 'Snow Biking'},\n # {'label': 'Ski touring\t','value': 'Ski touring\t'},\n # {'label': 'Mechanized Skiing','value': 'Mechanized Skiing'},\n # {'label': 'Skiing','value': 'Skiing'},\n # {'label': 'Lift Skiing Closed','value': 'Lift Skiing Closed'},\n # {'label': 'Snowshoeing & Hiking','value': 'Snowshoeing & Hiking'},\n # {'label': 'Snowshoeing','value': 'Snowshoeing'},\n # {'label': 'Out-of-bounds Skiing','value': 'Out-of-bounds Skiing'},\n # {'label': 'Ice Climbing','value': 'Ice Climbing'}],\n # value='All Activities', style={'width': '49%', 'display': 'inline-block'}),\n\n # dcc.Dropdown(\n # id=\"DayorYear\",\n # options=[{'label': 'Day', 'value': 'Day'}, {'label': 'Year','value': 'Year'}],\n # value='Day',style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),\n content_third_row,\n\n html.Hr(),\n\n html.H3('Compare Between Two Actvitites'),\n\n html.P('Activity1', style={\n 'textAlign': 'left'}),\n dcc.Dropdown(\n id=\"Activity1\",\n options=[{\"label\": x, \"value\": x} for x in actvy_options],\n value='All Activities',style={'width': '49%', 'display': 'inline-block'}),\n \n html.P('Activity2', style={\n 'textAlign': 'left'}),\n dcc.Dropdown(\n id=\"Activity2\",\n options=[{\"label\": x, \"value\": x} for x in actvy_options],\n value='All Activities',style={'width': '49%', 'display': 'inline-block'}),\n \n dcc.RadioItems(\n id= \"TimelineOptions\",\n options=[\n {'label': 'Year', 'value': 'Year'},\n {'label': 'Month', 'value': 'Month'},\n {'label': 'Day', 'value': 'Day'}\n ],value='Year'),\n \n dcc.RadioItems(\n id= \"IncidentOptions\",\n options=[\n {'label': 'Fatality', 'value': 'Fatality'},\n {'label': 'Injury', 'value': 'Injury'}\n ],value='Fatality'),\n \n content_fourth_row,\n html.Hr(),\n content_fifth_row\n ],\n style=CONTENT_STYLE\n)\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\nserver = app.server\napp.layout = html.Div([sidebar, content])\n\n\n\n\n@app.callback(\n Output('the_map','figure'),\n [Input('the_year','value'),\n Input('the_activity', 'value')]\n)\ndef update_figure(year_chosen,activity_chosen):\n # if click_data is None:\n dff=df[(df['Year']==year_chosen) & df['Activity'].isin(activity_chosen)]\n dff.head()\n fig = px.scatter_mapbox(dff, lat=\"Latitude\", lon=\"Longitude\", color=\"Involvement\", size=\"Fatality\",\n color_continuous_scale=px.colors.sequential.Sunset, size_max=10, zoom=5, \n mapbox_style=\"carto-positron\", text= dff[\"text\"], center = dict(lat = 50.25789, lon = -123.228))\n # print(f\"clicked data: {click_data}\")\n # temp_lat = click_data[\"points\"][0][\"lat\"]\n # temp_lon = click_data[\"points\"][0][\"lon\"]\n # temp_df = data[bar_df['Activity'] == Activity]\n\n return (fig)\n\n\n\n@app.callback(\n dash.dependencies.Output('incidents-bargraph', 'figure'),\n [dash.dependencies.Input('the_map', 'clickData')])\ndef update_bar_graph(clickData):\n df_plot_bar = bar_df.copy()\n\n trace1_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n if clickData is not None:\n # if Activity == \"All Activities\":\n # df_plot_bar = bar_df.copy()\n # # print(\"inside all act\")\n # else:\n # df_plot_bar = bar_df[bar_df['Activity'] == Activity]\n # # print(\"inside specific act\")\n print(f\"cliced date: {clickData}\")\n temp_lat = clickData[\"points\"][0][\"lat\"]\n temp_lon = clickData[\"points\"][0][\"lon\"]\n temp_df = data[(data['Latitude']==temp_lat) & (data['Longitude']==temp_lon)]\n trace1_bar = go.Bar(x=temp_df[\"Activity\"], y=temp_df['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_bar = go.Bar(x=temp_df[\"Activity\"], y=temp_df['Injury'], name='Injury', marker_color = \"#073b4c\")\n for act in temp_df[\"Activity\"]:\n Activity = act\n return {\n 'data': [trace1_bar, trace2_bar],\n 'layout':\n go.Layout(\n title='Fatalities and Injuries for {}'.format(Activity),\n barmode='group',clickmode=\"event+select\", yaxis_title=\"Fatalities and Injuries Count\")\n }\n\n # if Activity == \"All Activities\":\n # df_plot_bar = bar_df.copy()\n # else:\n # df_plot_bar = bar_df[bar_df['Activity'] == Activity]\n\n # trace1_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Fatality'], name='Fatality')\n # trace2_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Injury'], name='Injury')\n # if flag == 0:\n # if Activity == \"All Activities\":\n # df_plot_bar = bar_df.copy()\n # else:\n # df_plot_bar = bar_df[bar_df['Activity'] == Activity]\n\n # trace1_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Fatality'], name='Fatality')\n # trace2_bar = go.Bar(x=df_plot_bar[\"Activity\"], y=df_plot_bar['Injury'], name='Injury')\n\n\n # print(\"not none\")\n return {\n 'data': [trace1_bar, trace2_bar],\n 'layout':\n go.Layout(\n title='Fatalities and Injuries for All Activities',xaxis=dict(tickangle = 30),\n barmode='group',clickmode=\"event+select\", yaxis_title=\"Fatalities and Injuries Count\")\n }\n\n\n\n@app.callback(\n dash.dependencies.Output('incidents-bargraph-days', 'figure'),\n [dash.dependencies.Input('DayorYear', 'value'),\n dash.dependencies.Input('incidents-bargraph', 'hoverData'),\n dash.dependencies.Input('the_map', 'clickData')])\ndef update_date_graph(DayorYear,hoverData,clickDataMap):\n # if clickDataMap is None:\n # print(\"inside none\")\n # if Activity == \"All Activities\" and DayorYear == \"Day\":\n # df_plot_DY = day_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n # trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n # elif Activity == \"All Activities\" and DayorYear == \"Year\":\n # df_plot_DY = year_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n # trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n\n # if Activity != \"All Activities\" and DayorYear == \"Year\":\n # df_plot_DY = yr_act_df[yr_act_df['Activity'] == Activity]\n # trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n # trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n # elif Activity != \"All Activities\" and DayorYear == \"Day\":\n # df_plot_DY = day_act_df[day_act_df['Activity'] == Activity]\n # trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n # trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n if DayorYear == \"Day\":\n df_plot_DY = day_df.copy()\n trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n elif DayorYear == \"Year\":\n df_plot_DY = year_df.copy()\n trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n \n if hoverData is not None:\n # print(f'Hover: {hoverData}')\n Activity = hoverData[\"points\"][0][\"x\"]\n if DayorYear == \"Day\":\n df_plot_DY = day_act_df[day_act_df['Activity'] == Activity]\n trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n elif DayorYear == \"Year\":\n df_plot_DY = yr_act_df[yr_act_df['Activity'] == Activity]\n trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n # if clickData is None:\n # if DayorYear == \"Day\":\n # df_plot_DY = day_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury')\n\n # elif DayorYear == \"Year\":\n # df_plot_DY = year_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury')\n \n if clickDataMap is not None:\n # print(clickData[\"points\"][0][\"x\"])\n temp_lat = clickDataMap[\"points\"][0][\"lat\"]\n temp_lon = clickDataMap[\"points\"][0][\"lon\"]\n temp_df = data[(data['Latitude']==temp_lat) & (data['Longitude']==temp_lon)]\n \n # if clickData is not None:\n # Activity = clickData[\"points\"][0][\"x\"]\n # else:\n for act in temp_df[\"Activity\"]:\n Activity = act\n if DayorYear == \"Day\":\n df_plot_DY = day_act_df[day_act_df['Activity'] == Activity]\n trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n\n elif DayorYear == \"Year\":\n df_plot_DY = yr_act_df[yr_act_df['Activity'] == Activity]\n trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality', marker_color = \"#ef476f\")\n trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury', marker_color = \"#073b4c\")\n \n# individual activty variable for checkbox and click ... see if it works\n # if Activity == \"All Activities\" and DayorYear == \"Day\":\n # df_plot_DY = day_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury')\n\n # elif Activity == \"All Activities\" and DayorYear == \"Year\":\n # df_plot_DY = year_df.copy()\n # trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury')\n\n\n # if Activity != \"All Activities\" and DayorYear == \"Year\":\n # df_plot_DY = yr_act_df[yr_act_df['Activity'] == Activity]\n # trace1_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Year\"], y=df_plot_DY['Injury'], name='Injury')\n\n # elif Activity != \"All Activities\" and DayorYear == \"Day\":\n # df_plot_DY = day_act_df[day_act_df['Activity'] == Activity]\n # trace1_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Fatality'], name='Fatality')\n # trace2_DY = go.Bar(x=df_plot_DY[\"Day\"], y=df_plot_DY['Injury'], name='Injury')\n # ctx = dash.callback_context\n\n # if not ctx.triggered:\n # button_id = 'No clicks yet'\n # else:\n # button_id = ctx.triggered\n # print(\"value:\", button_id)\n # clickData = None\n # print(clickData)\n \n return {\n 'data': [trace1_DY, trace2_DY],\n 'layout':\n go.Layout(\n title='Fatalities and Injuries for each day or year',xaxis={\"dtick\": 1},\n barmode='group', yaxis_title=\"Fatalities and Injuries Count\", xaxis_title = DayorYear)\n }\n\n\n@app.callback(\n dash.dependencies.Output('incidents-line-graph', 'figure'),\n [dash.dependencies.Input('Activity1', 'value'),\n dash.dependencies.Input('Activity2', 'value'),\n dash.dependencies.Input('IncidentOptions', 'value'),\n dash.dependencies.Input('TimelineOptions', 'value')])\ndef update_comparison_graph(Activity1, Activity2, IncidentOptions, TimelineOptions):\n\n fig_compare = go.Figure()\n # df_plot_compare = month_act_df.copy()\n if IncidentOptions == \"Fatality\" and TimelineOptions == \"Year\":\n df_plot_compare_act1 = yr_act_df[yr_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = yr_act_df[yr_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Year\"], y=df_plot_compare_act1[\"Fatality\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Year\"], y=df_plot_compare_act2[\"Fatality\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title={\"text\":'Fatalities for each Year',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Year',\n yaxis_title='Fatalities Count')\n \n elif IncidentOptions == \"Fatality\" and TimelineOptions == \"Month\":\n df_plot_compare_act1 = month_act_df[month_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = month_act_df[month_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Month\"], y=df_plot_compare_act1[\"Fatality\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Month\"], y=df_plot_compare_act2[\"Fatality\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title={\"text\": 'Fatalities for each Month',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Month',\n xaxis_tickformat= \"%B\",\n yaxis_title='Fatalities Count')\n \n elif IncidentOptions == \"Fatality\" and TimelineOptions == \"Day\":\n df_plot_compare_act1 = day_act_df[day_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = day_act_df[day_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Day\"], y=df_plot_compare_act1[\"Fatality\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Day\"], y=df_plot_compare_act2[\"Fatality\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title={\"text\": 'Fatalities for each Day',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Day',\n yaxis_title='Fatalities Count')\n\n\n \n elif IncidentOptions == \"Injury\" and TimelineOptions == \"Year\":\n df_plot_compare_act1 = yr_act_df[yr_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = yr_act_df[yr_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Year\"], y=df_plot_compare_act1[\"Injury\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Year\"], y=df_plot_compare_act2[\"Injury\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title={\"text\":'Injuries for each Year',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Year',\n yaxis_title='Injuries Count')\n \n elif IncidentOptions == \"Injury\" and TimelineOptions == \"Month\":\n df_plot_compare_act1 = month_act_df[month_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = month_act_df[month_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Month\"], y=df_plot_compare_act1[\"Injury\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Month\"], y=df_plot_compare_act2[\"Injury\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title={\"text\":'Injuries for each Month',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Month',\n yaxis_title='Injuries Count')\n \n elif IncidentOptions == \"Injury\" and TimelineOptions == \"Day\":\n df_plot_compare_act1 = day_act_df[day_act_df['Activity'] == Activity1]\n df_plot_compare_act2 = day_act_df[day_act_df['Activity'] == Activity2]\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act1[\"Day\"], y=df_plot_compare_act1[\"Injury\"], name=Activity1,\n line=dict(color='#06d6a0', width=4)))\n fig_compare.add_trace(go.Scatter(x=df_plot_compare_act2[\"Day\"], y=df_plot_compare_act2[\"Injury\"], name =Activity2,\n line=dict(color='#E85D04', width=4)))\n fig_compare.update_layout(title= {\"text\": 'Injuries for each Day',\"xanchor\":\"center\", \"x\": 0.5, \"y\": 0.9},\n xaxis_title='Day',\n yaxis_title='Injuries Count')\n return fig_compare\n # return {\n # 'data': [fig_compare],\n # # 'layout':\n # # go.Layout(\n # # title='Fatalities and Injuries for each month')\n # }\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":24221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"132954026","text":"from __future__ import absolute_import, unicode_literals\n\nfrom celery import Celery\nfrom decouple import config\n\nREDIS_URI = 'redis://{host}:{port}/{db}'.format(\n host=config('REDIS_HOST', default='localhost'),\n port=config('REDIS_PORT', cast=int, default=6379),\n db=config('REDIS_DB', default=0),\n)\n\napp = Celery('requester', broker=REDIS_URI, include=['requester.tasks'])\n\napp.conf.beat_schedule = {\n 'planner': {\n 'task': 'requester.tasks.failed_queue',\n 'schedule': 5.0,\n },\n 'periodical': {\n 'task': 'requester.tasks.periodical_queue',\n 'schedule': 10.0,\n }\n}\n\nif __name__ == '__main__':\n app.start()\n","sub_path":"requester/celery_worker.py","file_name":"celery_worker.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"214211917","text":"#!/opt/libreoffice5.4/program/python\n# -*- coding: utf-8 -*-\n# Created by modifying urlimport.py in 10.11.loading_modules_from_a_remote_machine_using_import_hooks of Python Cookbook 3rd Edition.\n# 一旦LibreOfficeを終了させないとimportはキャッシュが使われるのでデバッグ時は必ずLibreOfficeを終了すること!!!\n# インポートするパッケジーには__init__.pyが必要。\nimport sys\nimport importlib.abc\nfrom types import ModuleType\ndef _get_links(simplefileaccess, url): # url内のファイル名とフォルダ名のリストを返す関数。\n\tfoldercontents = simplefileaccess.getFolderContents(url, True) # url内のファイルとフォルダをすべて取得。フルパスで返ってくる。\n\ttdocpath = \"\".join((url, \"/\")) # 除去するパスの部分を作成。\n\treturn [content.replace(tdocpath, \"\") for content in foldercontents] # ファイル名かフォルダ名だけのリストにして返す。\nclass UrlMetaFinder(importlib.abc.MetaPathFinder): # meta path finderの実装。\n\tdef __init__(self, simplefileaccess, baseurl):\n\t\tself._simplefileaccess = simplefileaccess # LibreOfficeドキュメント内のファイルにアクセスするためのsimplefileaccess\n\t\tself._baseurl = baseurl # モジュールを探すパス\n\t\tself._links = {} # baseurl内のファイル名とフォルダ名のリストのキャッシュにする辞書。。\n\t\tself._loaders = {baseurl: UrlModuleLoader(simplefileaccess, baseurl)} # ローダーのキャッシュにする辞書。\n\tdef find_module(self, fullname, path=None): # find_moduleはPython3.4で撤廃だが、find_spec()にしてもそのままではうまく動かない。\n\t\tif path is None:\n\t\t\tbaseurl = self._baseurl\n\t\telse:\n\t\t\tif not path[0].startswith(self._baseurl):\n\t\t\t\treturn None\n\t\t\tbaseurl = path[0]\n\t\tparts = fullname.split('.')\n\t\tbasename = parts[-1]\n\t\tif basename not in self._links: # Check link cache\n\t\t\tself._links[baseurl] = _get_links(self._simplefileaccess, baseurl)\n\t\tif basename in self._links[baseurl]: # Check if it's a package。パッケージの時。\n\t\t\tfullurl = \"/\".join((self._baseurl, basename))\n\t\t\tloader = UrlPackageLoader(self._simplefileaccess, fullurl)\n\t\t\ttry: # Attempt to load the package (which accesses __init__.py)\n\t\t\t\tloader.load_module(fullname)\n\t\t\t\tself._links[fullurl] = _get_links(self._simplefileaccess, fullurl)\n\t\t\t\tself._loaders[fullurl] = UrlModuleLoader(self._simplefileaccess, fullurl)\n\t\t\texcept ImportError:\n\t\t\t\tloader = None\n\t\t\treturn loader\n\t\tfilename = \"\".join((basename, '.py'))\n\t\tif filename in self._links[baseurl]: # A normal module\n\t\t\treturn self._loaders[baseurl]\n\t\telse:\n\t\t\treturn None\n\tdef invalidate_caches(self):\n\t\tself._links.clear()\nclass UrlModuleLoader(importlib.abc.SourceLoader): # Module Loader for a URL\n\tdef __init__(self, simplefileaccess, baseurl):\n\t\tself._simplefileaccess = simplefileaccess # LibreOfficeドキュメント内のファイルにアクセスするためのsimplefileaccess\n\t\tself._baseurl = baseurl # モジュールを探すパス\n\t\tself._source_cache = {} # ソースのキャッシュの辞書。\n\tdef module_repr(self, module): # モジュールを表す文字列を返す。\n\t\treturn ''.format(module.__name__, module.__file__)\n\tdef load_module(self, fullname): # Required method。引数はimport文で使うフルネーム。\n\t\tcode = self.get_code(fullname) # モジュールのコードオブジェクトを取得。\n\t\tmod = sys.modules.setdefault(fullname, ModuleType(fullname)) # 辞書sys.modulesにキーfullnameなければ値を代入して値を取得。\n\t\tmod.__file__ = self.get_filename(fullname) # ソースファイルへのフルパスを取得。\n\t\tmod.__loader__ = self # ローダーを取得。\n\t\tmod.__package__ = fullname.rpartition('.')[0] # パッケージ名を取得。.区切りがないときは空文字が入る。\n\t\texec(code, mod.__dict__) # コードオブジェクトを実行する。\n\t\treturn mod # モジュールオブジェクトを返す。\n\tdef get_code(self, fullname): # モジュールのコードオブジェクトを返す。Optional extensions。引数はimport文で使うフルネーム。\n\t\tsrc = self.get_source(fullname)\n\t\treturn compile(src, self.get_filename(fullname), 'exec')\n\tdef get_data(self, path): # バイナリ文字列を返す。\n\t\tpass\n\tdef get_filename(self, fullname): # ソースファイルへのフルパスを返す。引数はimport文で使うフルネーム。\n\t\treturn \"\".join((self._baseurl, '/', fullname.split('.')[-1], '.py'))\n\tdef get_source(self, fullname): # モジュールのソースをテキストで返す。\n\t\tfilename = self.get_filename(fullname) # ソースファイルへのフルパス。\n\t\tif filename in self._source_cache: # すでにキャッシュがあればそれを返して終わる。\n\t\t\treturn self._source_cache[filename]\n\t\ttry:\n\t\t\tinputstream = self._simplefileaccess.openFileRead(filename) # ソースファイルのインプットストリームを取得。\n\t\t\tdummy, b = inputstream.readBytes([], inputstream.available()) # simplefileaccess.getSize(module_tdocurl)は0が返る。\n\t\t\tsource = bytes(b).decode(\"utf-8\") # モジュールのソースファイルをutf-8のテキストで取得。\n\t\t\tself._source_cache[filename] = source # ソースをキャッシュに取得。\n\t\t\treturn source # ソースのテキストを返す。\n\t\texcept:\n\t\t\traise ImportError(\"Can't load {}\".format(filename))\n\tdef is_package(self, fullname): # パッケージの時はTrueを返す。\n\t\treturn False\nclass UrlPackageLoader(UrlModuleLoader): # Package loader for a URL\n\tdef load_module(self, fullname): # fullnameはパッケージのときはフォルダ名に該当する。\n\t\tmod = super().load_module(fullname) # __init__.pyを実行する。\n\t\tmod.__path__ = [self._baseurl] # パッケージ内の検索パスを指定する文字列のリスト\n\t\tmod.__package__ = fullname # フォルダ名を入れる。\n\tdef get_filename(self, fullname): # パッケージの__init__.pyを返す。\n\t\treturn \"/\".join((self._baseurl, '__init__.py'))\n\tdef is_package(self, fullname): # パッケージの時はTrueを返す。\n\t\treturn True\n_installed_meta_cache = {} # meta path finderを入れておくグローバル辞書。重複を防ぐ目的。\ndef install_meta(simplefileaccess, address): # Utility functions for installing the loader\n\tif address not in _installed_meta_cache: # グローバル辞書にないパスの時。\n\t\tfinder = UrlMetaFinder(simplefileaccess, address) # meta path finder。モジュールを探すクラスをインスタンス化。\n\t\t_installed_meta_cache[address] = finder # グローバル辞書にmeta path finderを登録。\n\t\tsys.meta_path.append(finder) # meta path finderをsys.meta_pathに登録。\ndef remove_meta(address): # Utility functions for uninstalling the loader\n\tif address in _installed_meta_cache:\n\t\tfinder = _installed_meta_cache.pop(address)\n\t\tsys.meta_path.remove(finder)\n","sub_path":"Designr2/src/Scripts/python/pythonpath/tdocimport.py","file_name":"tdocimport.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"550361337","text":"from Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom tqdm import tqdm\n\nfh = open('no_overlap.bed','r') # opens the file containing intergenic intervals\nn=1 \nseqs={} #crates an empty dictionary \nlines=fh.readlines() # reads the list of intervals\nfor line in tqdm(lines): # itarates over the list of intervals\n line=line.split('\\t') # splits them by tab \n seq_record = None #sets seq_record to none each time\n with open(line[0]+\".gb\") as handle: #open the corresponding fasta file i.e. fasta file for chromosome 1, 2, 3, 4, or 5.\n seq_record = SeqIO.read(handle, 'gb') #tells the format used\n start=int(line[1]) # sets start position to column 1.\n end=int(line[2]) # sets end position to column 2.\n newseq=None\n if line[5] =='-': # checks the strand, if it is -, it gets the reverse complement from its corresponding gb file (chr).\n newseq = seq_record[start:end].reverse_complement() \n else:\n newseq = seq_record[start:end] # if strans is \"+\" it gets the sequence as it is.\n newseq.id = ('#'+str(n)) #set fasta sequence id; i.e. 1,2,3,4 ... corresponding to the first interval or second interval etc. \n newseq.description = (line[0]+':'+line[5]+':'+line[1]+':'+line[2]) #description of the sequence extracted; chromosome,strand,start,end\n seqs[newseq.id]=newseq\n n = n+1\n\noutfile = open('sequences.fasta','w')\nfor k in seqs:\n print(seqs[k].format('fasta') ,file = outfile)\noutfile.close()\nfh.close()\n","sub_path":"2.5. Extract fasta_seqs.py","file_name":"2.5. Extract fasta_seqs.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648696944","text":"import sqlite3\nimport pandas as pd\nfrom scraper import JobPostingScraper, JobPostingParser\nfrom database import JobPostingDatabase\nfrom pymongo import errors\n\n\nDATABASE_COLUMNS = [\"job_title\", \"start_date\", \"end_date\", \"contact_name\", \"contact_email\",\n \"description\", \"hours\", \"location\", \"work_study\", \"pay_rate\", \"positions_available\"]\n\n# Populate the database with default values for the first time\n\n\ndef seed_database(database=\"test\"):\n scraper = JobPostingScraper()\n job_postings = scraper.getRawJobPostings()\n\n jobs = [JobPostingParser(x).getJob() for x in job_postings]\n\n db = JobPostingDatabase(database=database) \n for job in jobs:\n db.add_job_posting(job)\n\n\"\"\"\nIs there a new job?\n\"\"\"\n\ndef populate_new_jobs(database=\"test\"):\n db = JobPostingDatabase(database=database)\n\n # scrape for current data\n scraper = JobPostingScraper()\n job_postings = scraper.getRawJobPostings()\n\n jobs = [JobPostingParser(x).getJob() for x in job_postings]\n new_jobs = []\n\n for job in jobs:\n try:\n db.add_job_posting(job)\n new_jobs.append(job)\n # Because of unique index, duplicate jobs will throw DuplicateKeyError\n except errors.DuplicateKeyError as e:\n # Found a duplicate job, ignore this job\n continue\n except Exception as e:\n print(e)\n\n return new_jobs\n\n\ndef main():\n seed_database(database=\"prod\") # happen only once. NEVER RUN THIS LINE!\n # new_jobs = populate_new_jobs()\n # print(new_jobs)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"jobs_list.py","file_name":"jobs_list.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"565378513","text":"\"\"\"\nMake sure we have archived N0R so that things do not freak out!\n\"\"\"\nfrom __future__ import print_function\nimport datetime\nimport sys\nimport os\n\n\ndef main(argv):\n \"\"\"Do Great Things!\"\"\"\n prod = argv[1]\n now = datetime.datetime.utcnow()\n now = now - datetime.timedelta(minutes=now.minute % 5)\n base = now\n\n miss = []\n for _ in range(12):\n fn = now.strftime((\"/mesonet/ARCHIVE/data/%Y/%m/%d/GIS/uscomp/\" +\n prod + \"_%Y%m%d%H%M.png\"))\n if not os.path.isfile(fn):\n miss.append(now.strftime(\"%Y%m%d_%H%M\"))\n now -= datetime.timedelta(minutes=5)\n\n if not miss:\n print('OK')\n return 0\n print('CRITICAL - %s archive miss N0R %s' % (base.strftime(\"%d_%H%M\"),\n ', '.join(miss),))\n return 2\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"nagios/check_archive_nexrad.py","file_name":"check_archive_nexrad.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"94523342","text":"from __future__ import print_function\n\nimport argparse\n\nimport numpy as np\nimport os\nimport random\nimport re\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\nfrom torchvision import datasets, transforms\n\nimport models\nfrom common import LossType\nfrom models.resnet_expand import BasicBlock\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch CIFAR training with Polarization')\nparser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],\n help='training dataset (default: cifar10)')\nparser.add_argument(\"--loss-type\", \"-loss\", dest=\"loss\",\n choices=list(LossType.loss_name().keys()), help=\"the type of loss\")\nparser.add_argument('--lbd', type=float, default=0.0001,\n help='scale sparse rate (i.e. lambda in eq.2) (default: 0.0001)')\nparser.add_argument('--alpha', type=float, default=1.,\n help='coefficient of mean term in polarization regularizer. deprecated (default: 1)')\nparser.add_argument('--t', type=float, default=1.,\n help='coefficient of L1 term in polarization regularizer (default: 1)')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=256, metavar='N',\n help='input batch size for testing (default: 256)')\nparser.add_argument('--epochs', type=int, default=160, metavar='N',\n help='number of epochs to train (default: 160)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--decay-epoch', type=float, nargs='*', default=[0.5, 0.75],\n help=\"the epoch to decay the learning rate (default 0.5, 0.75)\")\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, metavar='S', default=None,\n help='random seed (default: a random int)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--save', default='./logs', type=str, metavar='PATH',\n help='path to save prune model (default: current directory)')\nparser.add_argument('--arch', default='vgg', type=str,\n help='architecture to use')\nparser.add_argument('--log', default='./log', type=str, metavar='PATH',\n help='path to tensorboard log (default: ./log)')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1],\n help='LR is multiplied by gamma on decay-epoch, number of gammas should be equal to decay-epoch')\nparser.add_argument('--bn-init-value', default=0.5, type=float,\n help='initial value of bn weight (default: 0.5, following NetworkSlimming)')\nparser.add_argument('--retrain', type=str, default=None, metavar=\"PATH\",\n help=\"Pruned checkpoint for RETRAIN model.\")\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nargs.loss = LossType.from_string(args.loss)\nargs.decay_epoch = sorted([int(args.epochs * i if i < 1 else i) for i in args.decay_epoch])\nif not args.seed:\n args.seed = random.randint(500, 1000)\n\nif args.retrain:\n if not os.path.exists(args.retrain) or not os.path.isfile(args.retrain):\n raise ValueError(f\"Path error: {args.retrain}\")\n\nprint(args)\n\n# reproducibility\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\nif args.dataset == 'cifar10':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data.cifar10', train=True, download=True,\n transform=transforms.Compose([\n transforms.Pad(4),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\nelse:\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data.cifar100', download=True, train=True,\n transform=transforms.Compose([\n transforms.Pad(4),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data.cifar100', download=True, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\nnum_classes = 10 if args.dataset == 'cifar10' else 100\n\nif not args.retrain:\n if re.match(\"resnet[0-9]+\", args.arch):\n model = models.__dict__[args.arch](num_classes=num_classes,\n bn_init_value=args.bn_init_value, aux_fc=False)\n elif re.match(\"vgg[0-9]+\", args.arch):\n model = models.__dict__[args.arch](num_classes=num_classes,\n bn_init_value=args.bn_init_value)\n pass\n else:\n raise NotImplementedError(\"Do not support {}\".format(args.arch))\n\nelse: # initialize model for retraining with configs\n checkpoint = torch.load(args.retrain)\n if args.arch == \"resnet56\":\n model = models.resnet_expand.resnet56(cfg=checkpoint['cfg'], num_classes=num_classes,\n aux_fc=False)\n # initialize corresponding masks\n if \"bn3_masks\" in checkpoint:\n bn3_masks = checkpoint[\"bn3_masks\"]\n bottleneck_modules = list(filter(lambda m: isinstance(m[1], BasicBlock), model.named_modules()))\n assert len(bn3_masks) == len(bottleneck_modules)\n for i, (name, m) in enumerate(bottleneck_modules):\n if isinstance(m, BasicBlock):\n if isinstance(m.expand_layer, nn.Identity):\n continue\n mask = bn3_masks[i]\n assert mask[1].shape[0] == m.expand_layer.idx.shape[0]\n m.expand_layer.idx = np.argwhere(mask[1].clone().cpu().numpy()).squeeze().reshape(-1)\n else:\n raise NotImplementedError(\"Key bn3_masks expected in checkpoint.\")\n\n elif args.arch == \"vgg16_linear\":\n model = models.__dict__[args.arch](num_classes=num_classes, cfg=checkpoint['cfg'])\n else:\n raise NotImplementedError(f\"Do not support {args.arch} for retrain.\")\n\nif args.cuda:\n model.cuda()\n\n# build optim\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n\nif args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n\n # reinitialize model with resumed config\n if \"vgg\" in args.arch and 'cfg' in checkpoint:\n model = models.__dict__[args.arch](num_classes=num_classes,\n bn_init_value=args.bn_init_value)\n if args.cuda:\n model.cuda()\n\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print(\"=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}\"\n .format(args.resume, checkpoint['epoch'], best_prec1))\n else:\n raise ValueError(\"=> no checkpoint found at '{}'\".format(args.resume))\nelse:\n checkpoint = None\n\nhistory_score = np.zeros((args.epochs - args.start_epoch + 1, 6))\n\n\ndef bn_weights(model):\n weights = []\n bias = []\n for name, m in model.named_modules():\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n weights.append((name, m.weight.data))\n bias.append((name, m.bias.data))\n\n return weights, bias\n pass\n\n\ndef adjust_learning_rate(optimizer, epoch, gammas, schedule):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr\n assert len(gammas) == len(schedule), \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(gammas, schedule):\n if epoch >= step:\n lr = lr * gamma\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\n# additional subgradient descent on the sparsity-induced penalty term\ndef updateBN():\n if args.loss == LossType.L1_SPARSITY_REGULARIZATION:\n sparsity = args.lbd\n bn_modules = list(filter(lambda m: (isinstance(m[1], nn.BatchNorm2d) or isinstance(m[1], nn.BatchNorm1d)),\n model.named_modules()))\n bn_modules = list(map(lambda m: m[1], bn_modules)) # remove module name\n for m in bn_modules:\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n m.weight.grad.data.add_(sparsity * torch.sign(m.weight.data))\n else:\n raise NotImplementedError(f\"Do not support loss: {args.loss}\")\n\n\ndef clamp_bn(model):\n bn_modules = list(filter(lambda m: isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d), model.modules()))\n for m in bn_modules:\n m.weight.data.clamp_(0, 1)\n\n\ndef set_bn_zero(model: nn.Module, threshold=0.0) -> (nn.Module, int):\n \"\"\"\n Set bn bias to zero\n Note: The operation is inplace. Parameters of the model will be changed!\n :param model: to set\n :param threshold: set bn bias to zero if corresponding lambda <= threshold\n :return modified model, the number of zero bn channels\n \"\"\"\n with torch.no_grad():\n mask_length = 0\n for name, sub_module in model.named_modules():\n # only process bn modules\n if not (isinstance(sub_module, nn.BatchNorm1d) or isinstance(sub_module, nn.BatchNorm2d)):\n continue\n\n mask = sub_module.weight.detach() <= threshold\n sub_module.weight[mask] = 0.\n sub_module.bias[mask] = 0.\n\n mask_length += torch.sum(mask).item()\n\n return model, mask_length\n\n\ndef bn_sparsity(model, loss_type, sparsity, t, alpha):\n \"\"\"\n\n :type model: torch.nn.Module\n :type alpha: float\n :type t: float\n :type sparsity: float\n :type loss_type: LossType\n \"\"\"\n bn_modules = list(\n filter(lambda m: (isinstance(m[1], nn.BatchNorm2d) or isinstance(m[1], nn.BatchNorm1d)), model.named_modules()))\n bn_modules = list(map(lambda m: m[1], bn_modules)) # remove module name\n\n if loss_type == LossType.POLARIZATION:\n n_ = sum(map(lambda m: m.weight.data.shape[0], bn_modules))\n bn_weights_mean = torch.sum(torch.stack(list(map(lambda m: torch.sum(m.weight), bn_modules)))) / n_\n\n sparsity_loss = 0\n for m in bn_modules:\n sparsity_term = t * torch.sum(torch.abs(m.weight)) - torch.sum(\n torch.abs(m.weight - alpha * bn_weights_mean))\n sparsity_loss += sparsity * sparsity_term\n\n return sparsity_loss\n else:\n raise ValueError()\n\n pass\n\n\ndef train(epoch):\n model.train()\n global history_score, global_step\n avg_loss = 0.\n avg_sparsity_loss = 0.\n train_acc = 0.\n total_data = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = model(data)\n if isinstance(output, tuple):\n output, output_aux = output\n loss = F.cross_entropy(output, target)\n\n # logging\n avg_loss += loss.data.item()\n pred = output.data.max(1, keepdim=True)[1]\n train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()\n total_data += target.data.shape[0]\n\n if args.loss in {LossType.POLARIZATION}:\n sparsity_loss = bn_sparsity(model, args.loss, args.lbd,\n t=args.t, alpha=args.alpha)\n loss += sparsity_loss\n avg_sparsity_loss += sparsity_loss.data.item()\n loss.backward()\n if args.loss in {LossType.L1_SPARSITY_REGULARIZATION}:\n updateBN()\n optimizer.step()\n if args.loss in {LossType.POLARIZATION, }:\n clamp_bn(model)\n global_step += 1\n if batch_idx % args.log_interval == 0:\n print('Step: {} Train Epoch: {} [{}/{} ({:.1f}%)]\\tLoss: {:.6f}'.format(\n global_step, epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data.item()))\n\n history_score[epoch][0] = avg_loss / len(train_loader)\n history_score[epoch][1] = float(train_acc) / float(total_data)\n history_score[epoch][3] = avg_sparsity_loss / len(train_loader)\n pass\n\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n if isinstance(output, tuple):\n output, output_aux = output\n test_loss += F.cross_entropy(output, target, size_average=False).data.item() # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * float(correct) / len(test_loader.dataset)))\n return float(correct) / float(len(test_loader.dataset))\n\n\ndef save_checkpoint(state, is_best, filepath):\n state['args'] = args\n\n torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))\n if is_best:\n shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))\n\n\nbest_prec1 = 0.\nglobal_step = 0\n\nwriter = SummaryWriter(logdir=args.log)\nfor epoch in range(args.start_epoch, args.epochs):\n current_learning_rate = adjust_learning_rate(optimizer, epoch, args.gammas, args.decay_epoch)\n print(\"Start epoch {}/{} with learning rate {}...\".format(epoch, args.epochs, current_learning_rate))\n\n weights, bias = bn_weights(model)\n for bn_name, bn_weight in weights:\n writer.add_histogram(\"bn/\" + bn_name, bn_weight, global_step=epoch)\n for bn_name, bn_bias in bias:\n writer.add_histogram(\"bn_bias/\" + bn_name, bn_bias, global_step=epoch)\n\n train(epoch)\n\n prec1 = test()\n history_score[epoch][2] = prec1\n np.savetxt(os.path.join(args.save, 'record.txt'), history_score, fmt='%10.5f', delimiter=',')\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, filepath=args.save)\n\n # write the tensorboard\n writer.add_scalar(\"train/average_loss\", history_score[epoch][0], epoch)\n writer.add_scalar(\"train/sparsity_loss\", history_score[epoch][3], epoch)\n writer.add_scalar(\"train/train_acc\", history_score[epoch][1], epoch)\n writer.add_scalar(\"train/lr\", optimizer.param_groups[0]['lr'], epoch)\n writer.add_scalar(\"val/acc\", prec1, epoch)\n writer.add_scalar(\"val/best_acc\", best_prec1, epoch)\n\nprint(\"Best accuracy: \" + str(best_prec1))\nhistory_score[-1][0] = best_prec1\nnp.savetxt(os.path.join(args.save, 'record.txt'), history_score, fmt='%10.5f', delimiter=',')\n\n\n# output pruning report\ndef __search_threshold(weight, alg: str):\n if alg not in [\"fixed\", \"grad\", \"search\"]:\n raise NotImplementedError()\n\n hist_y, hist_x = np.histogram(weight.data.cpu().numpy(), bins=100, range=(0, 1))\n if alg == \"search\":\n for i in range(len(hist_x) - 1):\n if hist_y[i] == hist_y[i + 1]:\n return hist_x[i]\n elif alg == \"grad\":\n hist_y_diff = np.diff(hist_y)\n for i in range(len(hist_y_diff) - 1):\n if hist_y_diff[i] <= 0 <= hist_y_diff[i + 1]:\n return hist_x[i + 1]\n elif alg == \"fixed\":\n return hist_x[1]\n return 0\n\n\nwriter.close()\n\nprint(\"Best accuracy: \" + str(best_prec1))\n","sub_path":"cifar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"189511337","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 17 00:44:45 2019\n\n@author: hagar\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport re\n\ndef Encode (nbits,integ):\n st=\"\"\n p=bin(integ)\n for l in range (nbits-(len(p)-2)): \n st+='0'\n for k in range(2,len(p)): # 0b100 so discard 0b\n st+=p[k]\n return st\n\nimg = cv2.imread('Input_Sample.bmp')\n#img = cv2.imread('img_1.jpg')\nx,y,z=(img.shape)\nimage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ns=\"\"\nfor i in range(x):\n for j in range(y):\n if image[i][j] <128:\n s+='0'\n else:\n s+='1'\n \nprint(s)\n# print(len(s)) # (28*28)=784\n# m=max(max(len(i) for i in re.findall(r'1+', s)),max(len(i) for i in re.findall(r'0+', s)))\n# bits =int(np.ceil(np.log2(m)))\nbits=7\nprint(bits)\ncompressed =\"\"\nnow=s[0] \ncount=1\nfor i in range (1,len(s)):\n if s[i] != now or count == 2**bits-1:\n compressed+=now+Encode(bits,count)\n now=s[i]\n count=1\n else :\n count+=1\ncompressed+=now+Encode(bits,count)\n# print(compressed)\nprint(len(compressed))\nwith open (\"compimg.txt\",'w') as f:\n f.write(compressed)\nprint(28*28*8)\n","sub_path":"Scripts/binaryimage.py","file_name":"binaryimage.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"28124324","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\n\n\ndef mkdir_(path):\n if not os.path.isdir(path):\n mkdir_(os.path.split(path)[0])\n else:\n return\n if not os.path.exists(path):\n os.mkdir(path)\n\n\nclass BdwmPipeline(object):\n def process_item(self, item, spider):\n path = item['path']\n text = item['text']\n filename = item['filename']\n if not os.path.exists(path):\n mkdir_(path)\n with open(path + filename, 'w') as f:\n f.write(text)\n return item\n","sub_path":"pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"400810808","text":"from flask import Flask, render_template, request\nimport mysql.connector\n\napp = Flask(__name__)\n\nconfig = {\n 'user': 'root',\n 'password': 'root',\n 'host': 'db',\n 'port': '3306',\n 'database': 'project'\n }\nmydb = mysql.connector.connect(**config)\n\n@app.route('/')\ndef hello_world():\n\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef getvalue():\n sql = 'SQL'\n name = 'name'\n phone = 'phone'\n search_name = 'search_name'\n\n block = 0\n try:\n block = 1\n search_name = request.form['search_name']\n sql = \"SELECT id_phone, phone, name FROM phone WHERE name like '%\" + str(search_name) + \"%'\"\n except:\n try:\n block = 2\n name = request.form['name']\n phone = request.form['phone']\n except:\n pass\n\n if block == 1: # Search \n mycursor = mydb.cursor()\n mycursor.execute(sql)\n myresult = mycursor.fetchall()\n\n name, phone, index = [], [], []\n i = 0\n for x in myresult:\n i += 1\n index.append(i)\n name.append(x[1])\n phone.append(x[2])\n\n index.reverse()\n name.reverse()\n phone.reverse()\n\n data = []\n s = []\n count = len(index)\n\n for i in range(count):\n s = []\n s.append( index.pop() )\n s.append( name.pop() )\n s.append( phone.pop() )\n data.append( s )\n\n if block == 2: # insert\n sum = []\n sum.append(phone)\n sum.append(name)\n val = tuple(sum)\n mycursor = mydb.cursor()\n sql = \"INSERT INTO phone (phone, name) VALUES (%s, %s)\"\n mycursor.execute(sql, val)\n\n # show all\n sql = \"SELECT id_phone, phone, name FROM phone\"\n mycursor.execute(sql)\n myresult = mycursor.fetchall()\n\n name, phone, index = [], [], []\n i = 0\n for x in myresult:\n i += 1\n index.append(i)\n name.append(x[1])\n phone.append(x[2])\n\n index.reverse()\n name.reverse()\n phone.reverse()\n\n data = []\n s = []\n count = len(index)\n\n for i in range(count):\n s = []\n s.append( index.pop() )\n s.append( name.pop() )\n s.append( phone.pop() )\n data.append( s )\n\n return render_template('index.html', data=data)\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570409340","text":"\"\"\"\nThis file contains important utility functions used during training, validation and testing.\n\n@author: Aditya Vora\n\n\"\"\"\n\nimport glob\nimport os\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport cv2\n\n\n\ndef get_density_map_gaussian(points, d_map_h, d_map_w):\t\t#后两个参数预估需要和最后的前馈输出结果做比较\t,从算法来看,应该是不用的\n\t\"\"\"\n\tCreates density maps from ground truth point locations\n\t:param points: [x,y] x: along width, y: along height\n\t:param d_map_h: height of the density map\n\t:param d_map_w: width of the density map\n\t:return: density map\n\t\"\"\"\n\t'''\n\tif d_map_h%4 == 0:\n\t\td_map_h = int(0.25*d_map_h)\n\telse: d_map_h = int(0.25*d_map_h)+1\t#根据same pooling进行长宽计算。\n\tif d_map_w%4 == 0:\n\t\td_map_w = int(0.25*d_map_w)\n\telse: d_map_w = int(0.25*d_map_w)+1#用于将其尺寸在进行获取图像前就先进行缩放。\n\t'''\n\td_map_h = 0.5*d_map_h\n\tif d_map_h!=int(d_map_h):\n\t\td_map_h = int(d_map_h)+1\n\td_map_h = 0.5*d_map_h\n\tif d_map_h!=int(d_map_h):\n\t\td_map_h = int(d_map_h)+1#完成缩小到1/4\n\td_map_w = 0.5*d_map_w\n\tif d_map_w!=int(d_map_w):\n\t\td_map_w = int(d_map_w)+1\n\td_map_w = 0.5*d_map_w\n\tif d_map_w!=int(d_map_w):\n\t\td_map_w = int(d_map_w)+1#完成缩小到1/4\n\td_map_h = int(d_map_h)\n\td_map_w = int(d_map_w)\n\n\tim_density = np.zeros(shape=(d_map_h,d_map_w), dtype=np.float32)\n\n\tif np.shape(points)[0] == 0: #如果输入的数据为空\n\t\tsys.exit()\n\n\tfor i in range(np.shape(points)[0]): #遍历每一条记录\n\n\t\tf_sz = 3.5#原来是15,既然要缩小整体图像大小,这里大概也缩一下好了。\n\t\tsigma = 4\n\n\t\tgaussian_kernel = get_gaussian_kernel(f_sz, f_sz, sigma)\t\t #得到fsize,长宽为15的高斯核\n\t\t\t\t\t\n\t\tx = min(d_map_w, max(1, np.abs(np.int32(np.floor(points[i, 0]/4)))))\n\t\ty = min(d_map_h, max(1, np.abs(np.int32(np.floor(points[i, 1]/4)))))\n\n\t\tif(int(x/4) > d_map_w or int(y/4) > d_map_h):\n\t\t\tcontinue\n\n\t\tx1 = x - np.int32(np.floor(f_sz / 2))\n\t\ty1 = y - np.int32(np.floor(f_sz / 2))\n\t\tx2 = x + np.int32(np.floor(f_sz / 2))\n\t\ty2 = y + np.int32(np.floor(f_sz / 2))\n\n\t\tdfx1 = 0\n\t\tdfy1 = 0\n\t\tdfx2 = 0\n\t\tdfy2 = 0\n\n\t\tchange_H = False\n\n\t\tif(x1 < 1):\n\t\t\tdfx1 = np.abs(x1)+1\n\t\t\tx1 = 1\n\t\t\tchange_H = True\n\n\t\tif(y1 < 1):\n\t\t\tdfy1 = np.abs(y1)+1\n\t\t\ty1 = 1\n\t\t\tchange_H = True\n\n\t\tif(x2 > d_map_w):\n\t\t\tdfx2 = x2 - d_map_w\n\t\t\tx2 = d_map_w\n\t\t\tchange_H = True\n\n\t\tif(y2 > d_map_h):\n\t\t\tdfy2 = y2 - d_map_h\n\t\t\ty2 = d_map_h\n\t\t\tchange_H = True\n\n\t\tx1h = 1+dfx1\n\t\ty1h = 1+dfy1\n\t\tx2h = f_sz - dfx2\n\t\ty2h = f_sz - dfy2\n\n\t\tif (change_H == True):\n\t\t\tf_sz_y = np.double(y2h - y1h + 1)\n\t\t\tf_sz_x = np.double(x2h - x1h + 1)\t#计算得到需要得到的卷积后的区域大小(用于卷积的区域大小)\n\n\t\t\tgaussian_kernel = get_gaussian_kernel(f_sz_x, f_sz_y, sigma)\n\n\t\tim_density[y1-1:y2,x1-1:x2] = im_density[y1-1:y2,x1-1:x2] + gaussian_kernel\n\treturn im_density\n\ndef get_gaussian_kernel(fs_x, fs_y, sigma):\n\t\"\"\"\n\tCreate a 2D gaussian kernel\n\t:param fs_x: filter width along x axis\n\t:param fs_y: filter width along y axis\n\t:param sigma: gaussian width\n\t:return: 2D Gaussian filter of [fs_y x fs_x] dimension\n\t\"\"\"\n\tgaussian_kernel_x = cv2.getGaussianKernel(ksize=np.int(fs_x), sigma=sigma)\n\tgaussian_kernel_y = cv2.getGaussianKernel(ksize=np.int(fs_y), sigma=sigma)\n\tgaussian_kernel = gaussian_kernel_y * gaussian_kernel_x.T\n\treturn gaussian_kernel\n\ndef compute_abs_err(pred, gt):\n\t\"\"\"\n\tComputes mean absolute error between the predicted density map and ground truth\n\t:param pred: predicted density map\n\t:param gt: ground truth density map\n\t:return: abs |pred - gt|\n\t\"\"\"\n\treturn np.abs(np.sum(pred[:]) - np.sum(gt[:]))\n\ndef create_session(log_dir, session_id):\n\t\"\"\"\n\tModule to create a session folder. It will create a folder with a proper session\n\tid and return the session path.\n\t:param log_dir: root log directory\n\t:param session_id: ID of the session\n\t:return: path of the session id folder\n\t\"\"\"\n\tfolder_path = os.path.join(log_dir, 'session-'+str(session_id))\n\tif os.path.exists(folder_path):\n\t\tprint ('Session already taken. It will create a different session id.')#所以最好每次删了logs文件夹\n\t\t\n\t\t#sys.exit()\n\telse:\n\t\tos.makedirs(folder_path)\n\treturn folder_path\n\ndef get_file_id(filepath):\n\treturn os.path.splitext(os.path.basename(filepath))[0]\n\ndef get_data_list(data_root, mode='train'):\n\n\t\"\"\"\n\tReturns a list of images that are to be used during training, validation and testing.\n\tIt looks into various folders depending on the mode and prepares the list.\n\t:param mode: selection of appropriate mode from train, validation and test.\n\t:return: a list of filenames of images and corresponding ground truths after random shuffling.\n\t\"\"\"\n\n\tif mode == 'train':\n\t\timagepath = os.path.join(data_root, 'train_data', 'images')\n\t\tgtpath = os.path.join(data_root, 'train_data', 'ground_truth')\n\n\telif mode == 'valid':\n\t\timagepath = os.path.join(data_root, 'valid_data', 'images')\n\t\tgtpath = os.path.join(data_root, 'valid_data', 'ground_truth')\n\n\telse:\n\t\timagepath = os.path.join(data_root, 'test_data', 'images')\n\t\tgtpath = os.path.join(data_root, 'test_data', 'ground_truth')\n\n\timage_list = [file for file in glob.glob(os.path.join(imagepath,'*.jpg'))]\n\tgt_list = []\n\n\tfor filepath in image_list:\n\t\tfile_id = get_file_id(filepath)\n\t\tgt_file_path = os.path.join(gtpath, 'GT_'+ file_id + '.mat')\n\t\tgt_list.append(gt_file_path)\n\n\txy = list(zip(image_list, gt_list))\t\t#列表中为元组\n\trandom.shuffle(xy)\t\t #会更新数据\n\ts_image_list, s_gt_list = zip(*xy)\t#zip(*)可理解为解压\n\n\treturn s_image_list, s_gt_list\n\ndef reshape_tensor(tensor,channel):\n\t\"\"\"\n\tReshapes the input tensor appropriate to the network input\n\ti.e. [1, tensor.shape[0], tensor.shape[1], 1]\n\t:param tensor: input tensor\n\t:return: reshaped tensor\n\t\"\"\"\n\tr_tensor = np.reshape(tensor, newshape=(1, tensor.shape[0], tensor.shape[1], channel))\t#\n\treturn r_tensor\n\ndef save_weights(graph, fpath):\n\t\"\"\"\n\tModule to save the weights of the network into a numpy array.\n\tSaves the weights in .npz file format\n\t:param graph: Graph whose weights needs to be saved.\n\t:param fpath: filepath where the weights needs to be saved.\n\t:return:\n\t\"\"\"\n\tsess = tf.get_default_session()\n\tvariables = graph.get_collection(\"variables\")\n\tvariable_names = [v.name for v in variables]\n\tkwargs = dict(zip(variable_names, sess.run(variables)))\n\tnp.savez(fpath, **kwargs)\n\ndef load_weights(graph, fpath):\n\t\"\"\"\n\tLoad the weights to the network. Used during transfer learning and for making predictions.\n\t:param graph: Computation graph on which weights needs to be loaded\n\t:param fpath: Path where the model weights are stored.\n\t:return:\n\t\"\"\"\n\tsess = tf.get_default_session()\n\tvariables = graph.get_collection(\"variables\")\n\tdata = np.load(fpath)\n\tfor v in variables:\n\t\tif v.name not in data:\n\t\t\tprint(\"could not load data for variable='%s'\" % v.name)\n\t\t\tcontinue\n\t\tprint(\"assigning %s\" % v.name)\n\t\tsess.run(v.assign(data[v.name]))\n\ndef labelmap(img,loc):\n\timg[loc.astype('int')] = 255\n\treturn img\n\ndef get_after_epoch(sess_path,model_final):\n\tmodel_list = glob.glob(sess_path+f'/weights.{model_final}.*.npz')\n\treturn model_list\n\ndef img_padding(img,shape):#此处的shape采用img.shape得到的形式作为参数,这个函数将拟定于之前reshape多一维前使用\n\tif len(img.shape)==3:\n\t\timg_t = 255.0*np.ones((shape[0],shape[1],3))\n\telse:img_t = np.zeros((shape[0],shape[1]))#3的是为了彩色图,1的是为了给密度图\t ,1,没办法,下面的img_t要用到第三维度\n\traw_shape = img.shape\n\traw_h = img.shape[0]\n\traw_w = img.shape[1]\n\tif len(img.shape)==3:\n\t\timg_t[:raw_h,:raw_w,:] = img\n\telse:img_t[:raw_h,:raw_w] = img\n\treturn img_t\n\ndef get_size_final(d_map_h,d_map_w):\n\td_map_h = 0.5*d_map_h\n\tif d_map_h!=int(d_map_h):\n\t\td_map_h = int(d_map_h)+1\n\td_map_h = 0.5*d_map_h\n\tif d_map_h!=int(d_map_h):\n\t\td_map_h = int(d_map_h)+1#完成缩小到1/4\n\td_map_w = 0.5*d_map_w\n\tif d_map_w!=int(d_map_w):\n\t\td_map_w = int(d_map_w)+1\n\td_map_w = 0.5*d_map_w\n\tif d_map_w!=int(d_map_w):\n\t\td_map_w = int(d_map_w)+1#完成缩小到1/4\n\treturn int(d_map_h),int(d_map_w)\n\ndef rm_weights(path):\n\tmpath = os.path.split(path)[0]\n\tfilels = os.listdir(mpath)\n\tfilels = [os.path.join(mpath,i) for i in filels if '.npz' in i]\n\tmodel_now = os.path.split(path)[-1]\n\tmodel_now = model_now.split('.')[1]\n\tmodel_now = eval(model_now)\n\tresls = [os.path.join(mpath,f'weights.{i}.20.npz') for i in range(1,model_now)]\n\tresls_1 = [os.path.join(mpath,f'weights.{i}.100.npz') for i in range(1,model_now)]\n\tresls = resls+resls_1\n\tdel resls_1\n\tresls.append(path)\n\tresls.append(os.path.join(mpath,f'weights.{model_now}.20.npz'))\n\tresls_now = [os.path.join(mpath,f'weights.{model_now}.{i}.npz') for i in range(100,model_now,100)]\n\tresls = resls+resls_now\n\tdel resls_now\n\tfor i in filels:\n\t\tif i not in resls:\n\t\t\tos.remove(i)#删除权重文件","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"482463121","text":"\"\"\"\nhttps://practice.geeksforgeeks.org/problems/smallest-positive-missing-number/0\n\nYou are given an unsorted array with both positive and negative elements. You have to find the smallest positive number missing from the array in O(n) time using constant extra space.\n\nInput:\nFirst line consists of T test cases. First line of every test case consists of N, denoting the number of elements in array. Second line of every test case consists of elements in array.\n\nOutput:\nSingle line output, print the smallest positive number missing.\n\nConstraints:\n1<=T<=100\n1<=N<=100\n\nExample:\nInput:\n2\n5\n1 2 3 4 5\n5\n0 -10 1 -20\nOutput:\n6\n2\n\n\"\"\"\n\ndef parent(i):\n return int((i-1)/2)\ndef left(i):\n return 2*i + 1\ndef right(i):\n return 2*i + 2\n\n\ndef heapify(arr,n,i):\n small = i\n\n if left(i) < n and arr[left(i)] < arr[i]:\n small = left(i)\n \n if right(i) < n and arr[right(i)] < arr[small]:\n small = right(i)\n\n # swap two elements\n if small != i:\n arr[small],arr[i] = arr[i],arr[small]\n heapify(arr,n,small)\n\n\ndef extract_min(arr):\n print(arr)\n n = len(arr)\n item = arr[0]\n arr[0],arr[n-1] = arr[n-1],arr[0]\n arr.pop()\n heapify(arr,n-1,0)\n return item\n\ndef smallest_positive(arr,n):\n \n # for i in range(n):\n # if arr[i] < 0:\n # arr[i] = 100000000\n \n for i in range(int(n/2),-1,-1):\n heapify(arr,n,i)\n prev = None\n while(len(arr)>0):\n item = extract_min(arr)\n if item < 0:\n continue\n if item != 1:\n return 1\n if prev == None or prev+1 == item:\n prev = item\n continue\n else:\n return prev+1\n return prev + 1\n\ndef main():\n t = int(input())\n for i in range(t):\n n = int(input())\n arr = list(map(int,input().split()))\n print(smallest_positive(arr,n))\n\n\nif __name__ == '__main__':\n main()\n ","sub_path":"Arrays/smallest_positive_number.py","file_name":"smallest_positive_number.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428391166","text":"# Escriba un programa que pida primero dos números \n# enteros (mínimo y máximo) y que después pida números \n# enteros situados entre ellos. El programa terminará \n# cuando se escriba un número que no esté comprendido \n# entre los dos valores iniciales. El programa termina \n# escribiendo la cantidad de números escritos.\nnumero1=int(input(\"Introduzca el primer número: \"))\nnumero2=int(input(\"Introduzca el segundo número: \"))\ncontador=0\nif numero1>numero2:\n mayor=numero1\n menor=numero2\nelse:\n mayor=numero2\n menor=numero1\nprint(\"Introduzca un numero entre\",menor,\"y\",mayor)\notronumero=int(input())\nwhile otronumero>=menor and otronumero<=mayor:\n contador+=1\n print(\"Introduzca un numero entre\",menor,\"y\",mayor)\n otronumero=int(input())\nif contador!=1:\n cadena=\"números\"\nelse:\n cadena=\"número\"\nprint(\"Se han introducido\",contador,cadena)","sub_path":"movaiva/Python/Boletin_5/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203474835","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n#from rest_framework import routers\nfrom api import views\n\n'''\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\n'''\n\nurlpatterns = [\n #url(r'^', include(router.urls)),\n url(r'^admin/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^', include('ui.urls')),\n url(r'^api/', include('api.urls')),\n url(r'^region/', include('region.urls')),\n]\n","sub_path":"eigencities/eigencities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"101285394","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport spacy\nimport pickle\nfrom spacy.lang.en import STOP_WORDS\n\nnlp = spacy.load('en_core_web_md')\nengine = create_engine('mysql+pymysql://root:root@localhost:3306/personality_1', encoding=\"utf-8\", echo=True)\ndf_all = pd.read_sql_table(\"essays_youtube\", engine)\n\npunctuations = [',', '.', ':', ';', '?', '!', '(', ')', '[', ']',\n '@', '&', '#', '%', '$', '{', '}', '--', '---', '-', '']\n\nextra_stop_words = []\nfor item in df_all[\"TEXT\"]:\n text = nlp(item)\n for token in text:\n str_token = str(token)\n if str_token not in punctuations:\n for char in str_token:\n if char not in \"1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n extra_stop_words.append(str_token)\n continue\n\nfor word in extra_stop_words:\n STOP_WORDS.add(word)\n\nfor pun in punctuations:\n STOP_WORDS.add(pun)\n\nprint(STOP_WORDS)\nwith open('./stop_word_youtube.set', 'wb+') as f:\n pickle.dump(STOP_WORDS,f)\n\n\nwith open('./stop_word_youtube.set','rb') as f:\n HH=pickle.load(f)\n print(HH,type(HH))","sub_path":"similarity/construct_stop_word_youtube.py","file_name":"construct_stop_word_youtube.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"451754","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Kmeans\n\n@author: Peter Pigler\n\"\"\"\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom random import randint\n\n\ndef kmeans(Data, Param):\n\n c = Param[\"c\"]\n m = Param[\"m\"]\n e = Param[\"e\"]\n X = Data[\"X\"]\n\n if 'c' in dir(Param): c = Param.c\n else: c = 6\n if 'm' in dir(Param): m = Param.m\n else: m = 2\n if 'e' in dir(Param): e = Param.e\n else: e = 0.001\n\n X = Data.X\n [N, n] = map(int, X.shape)\n\n f = np.zeros_like(X.T[0])\n f_new = np.zeros_like(f) # f partition matrix in next iteration\n d = np.zeros((N, c))\n v = np.zeros((c, n))\n # Initialize Cluster matrix randomly\n for i in range(c):\n v[i] = X[randint(0, N - 1)]\n while not v[np.where(v == v[i])].all():\n v[i] = X[randint(0, N - 1)]\n\n # Iterate\n run = 0\n while True and run != 20:\n for i in range(c):\n d[:, i] = np.apply_along_axis(np.linalg.norm, 1, X - v[i])\n f_new = np.argmin(d, axis=1)\n for i in range(c):\n if np.size(d[np.where(f_new == i)]):\n v[i] = np.mean(X[np.where(f_new == i)], axis=0)\n else:\n v[i] = np.mean(X, axis=0)\n if np.linalg.norm(f - f_new) < e:\n break # If the distance between current partition vector (f_new) and previously iterated p. vector (f) is under terminate tolerance, halt\n f = np.copy(f_new)\n run += 1\n\n result = {\"Data\": {\"d\" : d, \"f\": f}, \"Cluster\": {\"v\" : v}, \"iter\": run, \"cost\": 0}\n\n if Param.vis:\n colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 0), (1, 1, 1)]\n fig = plt.figure(\"Kmeans - \" + str(c) + \" clusters\")\n adat = fig.add_subplot(111, projection='3d')\n adat.set_xlabel(\"x tengely\")\n adat.set_ylabel(\"y tengely\")\n adat.set_zlabel(\"z tengely\")\n for i in range(N):\n adat.scatter(X[i][0], X[i][1], X[i][2], c=colors[f[i]])\n for i in range(c):\n adat.scatter(v[i][0], v[i][1], v[i][2], c=colors[i], s=400)\n plt.show()\n\n return result\n","sub_path":"src/clustering_algorithms/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"631009506","text":"#!/usr/bin/env python\n# -- coding: utf-8 --\n\"\"\"\n主成分分析(Principal Component Analysis),是一种用于探索高维数据的技术。\nPCA通常用于高维数据集的探索与可视化。还可以用于数据压缩,数据预处理等。\nPCA可以把可能具有线性相关性的高维变量合成为线性无关的低维变量,称为主成分(principal components),\n新的低维数据集会尽可能的保留原始数据的变量,可以将高维数据集映射到低维空间的同时,尽可能的保留更多变量。\n\n注意:降维就意味着信息的丢失,这一点一定要明确,如果用原始数据在模型上没有效果,\n期望通过降维来进行改善这是不现实的,不过鉴于实际数据本身常常存在的相关性,\n我们可以想办法在降维的同时将信息的损失尽量降低。当你在原数据上跑了一个比较好的结果,\n又嫌它太慢模型太复杂时候才可以采取PCA降维。\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\n\nX = iris.data\nY = iris.target\nX_bar = np.mean(X, axis=0)\nprint(iris['feature_names'])\nprint(X_bar)\n\ncov_mat = np.cov(X.T)\neig_val, eig_vec = np.linalg.eig(cov_mat)\neig_pairs = zip(eig_val, eig_vec)\ntop3_components = sorted(eig_pairs, reverse=True)[:3]\nW = np.array([components[1] for components in top3_components]).reshape(-1, 3)\nres = np.dot(X, W)\nfig = plt.figure()\nax = Axes3D(fig)\nax.set_title('Iris Dataset by PCA', size=14)\nax.scatter(res[:, 0], res[:, 1], res[:, 2], c=Y)\nax.set_xlabel('First eigenvector')\nax.set_ylabel('Second eigenvector')\nax.set_zlabel('Third eigenvector')\nax.w_xaxis.set_ticklabels(())\nax.w_yaxis.set_ticklabels(())\nax.w_zaxis.set_ticklabels(())\nplt.show()\n","sub_path":"Homework/4+刘尚懿+辽宁/week-3/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648074426","text":"from django.shortcuts import render, redirect\r\nfrom django.http import HttpResponse\r\nfrom django.forms import inlineformset_factory\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import *\r\nfrom django.contrib.auth.models import Group\r\nfrom .models import *\r\nfrom .forms import *\r\nfrom .filters import *\r\nfrom .decorators import *\r\nfrom django.db.models import Q\r\n\r\n# Create your views here.\r\n\r\n@unauthenticated_user\r\ndef register_page(request):\r\n form = CreateUserForm\r\n\r\n if request.method == 'POST':\r\n form = CreateUserForm(request.POST)\r\n if form.is_valid():\r\n\r\n username = form.cleaned_data.get('username')\r\n\r\n group = Group.objects.get(name='customer')\r\n user = form.save()\r\n\r\n if user is not None:\r\n login(request, user)\r\n\r\n messages.success(request, 'Account created for ' + username)\r\n return redirect('create_customer')\r\n\r\n context = {\r\n 'form': form,\r\n\r\n }\r\n return render(request, 'accounts/register.html', context)\r\n\r\n@unauthenticated_user\r\ndef login_page(request):\r\n form = AuthenticationForm(request.POST)\r\n if request.method == 'POST':\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n\r\n user = authenticate(request, username=username, password=password)\r\n\r\n if user is not None:\r\n login(request, user)\r\n return redirect('user')\r\n\r\n context = {\r\n 'form': form,\r\n\r\n }\r\n return render(request, 'accounts/login.html', context)\r\n\r\n\r\n\r\n\r\n\r\ndef logout_user(request):\r\n logout(request)\r\n return redirect('home')\r\n\r\n\r\ndef home(request):\r\n products = Product.objects.all()\r\n\r\n context = {\r\n 'products': products,\r\n }\r\n return render(request, 'accounts/home.html', {'products': products})\r\n\r\n\r\n@login_required(login_url='login')\r\ndef customer_home(request):\r\n products = Product.objects.all()\r\n\r\n context = {\r\n 'products': products,\r\n\r\n }\r\n return render(request, 'accounts/user.html', context)\r\n\r\n@login_required(login_url='login')\r\ndef profile(request):\r\n customer = request.user.customer\r\n orders = request.user.customer.order_set.all()\r\n order_count = orders.count()\r\n\r\n filter = OrderFilter(request.GET, queryset=orders)\r\n orders = filter.qs\r\n\r\n context = {\r\n 'customer': customer,\r\n 'orders': orders,\r\n 'order_count': order_count,\r\n 'filter': filter,\r\n }\r\n return render(request, 'accounts/profile.html', context)\r\n\r\n@login_required(login_url='login')\r\ndef settings(request):\r\n customer = request.user.customer\r\n theme = request.user.customer.theme\r\n form = ThemeForm(instance=customer)\r\n\r\n if request.method == \"POST\":\r\n form = ThemeForm(request.POST, instance=customer)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('/customer_home/')\r\n\r\n context = {\r\n 'customer': customer,\r\n 'theme': theme,\r\n 'form': form,\r\n }\r\n\r\n return render(request, 'accounts/settings.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\n@allowed_users(allowed_roles=['admin'])\r\ndef dashboard(request):\r\n orders = Order.objects.all()\r\n customers = Customer.objects.all()\r\n total_customers = customers.count()\r\n total_orders = orders.count()\r\n in_production = orders.filter(status='In Production').count()\r\n shipping = orders.filter(status='Shipping').count()\r\n delivered = orders.filter(status='Delivered').count()\r\n\r\n context = {\r\n 'orders': orders,\r\n 'customers': customers,\r\n 'total_customers': total_customers,\r\n 'total_orders': total_orders,\r\n 'in_production': in_production,\r\n 'shipping': shipping,\r\n 'delivered': delivered,\r\n }\r\n\r\n return render(request, 'accounts/dashboard.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\n@allowed_users(allowed_roles='admin')\r\ndef products(request):\r\n products = Product.objects.all()\r\n return render(request, 'accounts/products.html', {'products': products})\r\n\r\n\r\ndef product(request, pk):\r\n product = Product.objects.get(id=pk)\r\n\r\n\r\n return render(request, 'accounts/product.html', {'product': product})\r\n\r\n\r\n@login_required(login_url='login')\r\ndef product2(request, pk):\r\n product = Product.objects.get(id=pk)\r\n\r\n\r\n return render(request, 'accounts/product2.html', {'product': product})\r\n\r\n\r\n@login_required(login_url='login')\r\n@allowed_users(allowed_roles='admin')\r\ndef customer(request, pk):\r\n customer = Customer.objects.get(id=pk)\r\n orders = customer.order_set.all()\r\n order_count = orders.count()\r\n\r\n filter = OrderFilter(request.GET, queryset=orders)\r\n orders = filter.qs\r\n\r\n context = {\r\n 'customer': customer,\r\n 'orders': orders,\r\n 'order_count': order_count,\r\n 'filter': filter,\r\n }\r\n return render(request, 'accounts/customer.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef create_order(request, pk):\r\n order_form_set = inlineformset_factory(Customer, Order, fields=('product', 'size', 'notes', 'status'), extra=5)\r\n customer = Customer.objects.get(id=pk)\r\n formset = order_form_set(queryset=Order.objects.none(), instance=customer)\r\n if request.method == \"POST\":\r\n formset = order_form_set(request.POST, instance=customer)\r\n if formset.is_valid():\r\n formset.save()\r\n return redirect('/dashboard/')\r\n context = {\r\n 'formset': formset,\r\n 'customer': customer,\r\n }\r\n return render(request, 'accounts/order_form_set.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef update_order(request, pk):\r\n order = Order.objects.get(id=pk)\r\n form = OrderForm(instance=order)\r\n if request.method == \"POST\":\r\n form = OrderForm(request.POST, instance=order)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('/dashboard/')\r\n context = {\r\n 'order': order,\r\n 'form': form,\r\n }\r\n return render(request, 'accounts/order_form.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef delete_order(request, pk):\r\n order = Order.objects.get(id=pk)\r\n if request.method == \"POST\":\r\n order.delete()\r\n return redirect('/dashboard/')\r\n\r\n context = {\r\n 'order': order,\r\n }\r\n return render(request, 'accounts/delete_order.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef create_customer(request):\r\n form = CustomerForm\r\n if request.method == \"POST\":\r\n form = CustomerForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('/dashboard/')\r\n context = {\r\n 'form': form,\r\n }\r\n return render(request, 'accounts/customer_form.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef update_customer(request, pk):\r\n customer = Customer.objects.get(id=pk)\r\n form = CustomerForm(instance=customer)\r\n if request.method == \"POST\":\r\n form = CustomerForm(request.POST, instance=customer)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('/dashboard/')\r\n context = {\r\n 'customer': customer,\r\n 'form': form,\r\n }\r\n return render(request, 'accounts/customer_form.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef delete_customer(request, pk):\r\n customer = Customer.objects.get(id=pk)\r\n user = customer.user\r\n orders = customer.order_set.all()\r\n if request.method == \"POST\":\r\n orders.delete()\r\n user.delete()\r\n customer.delete()\r\n return redirect('/dashboard/')\r\n\r\n context = {\r\n 'customer': customer,\r\n 'user': user,\r\n 'orders': orders,\r\n }\r\n return render(request, 'accounts/delete_customer.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\n@allowed_users(allowed_roles='admin')\r\ndef create_product(request):\r\n form = ProductForm()\r\n if request.method == \"POST\":\r\n form = ProductForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('/products/')\r\n context = {\r\n 'form': form,\r\n }\r\n return render(request, 'accounts/product_form.html', context)\r\n\r\n\r\n@login_required(login_url='login')\r\ndef admin_search(request):\r\n try:\r\n q = request.GET.get('q')\r\n except:\r\n q = None\r\n if q:\r\n customers = Customer.objects.filter(Q(name__icontains=q) | Q(phone__icontains=q) | Q(email__icontains=q))\r\n products = Product.objects.filter(Q(name__icontains=q) | Q(category__icontains=q) | Q(price__icontains=q) |\r\n Q(description__icontains=q))\r\n orders = Order.objects.filter(Q(size__icontains=q) | Q(status__icontains=q) | Q(notes__icontains=q) |\r\n Q(customer__name__icontains=q) | Q(product__name__icontains=q))\r\n\r\n\r\n context = {\r\n 'orders': orders,\r\n 'customers': customers,\r\n 'products': products,\r\n 'query': q,\r\n }\r\n else:\r\n context = {}\r\n\r\n return render(request, 'accounts/admin_search.html', context)\r\n\r\n\r\ndef customer_search(request):\r\n try:\r\n q = request.GET.get('q')\r\n except:\r\n q = None\r\n if q:\r\n products = Product.objects.filter(Q(name__icontains=q) | Q(category__icontains=q) | Q(price__icontains=q) |\r\n Q(description__icontains=q))\r\n\r\n\r\n context = {\r\n 'products': products,\r\n 'query': q,\r\n }\r\n else:\r\n context = {}\r\n\r\n return render(request, 'accounts/customer_search.html', context)\r\n\r\n","sub_path":"crm1/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"540253206","text":"from selenium import webdriver\nimport pandas as pd\n\nbrowser = webdriver.PhantomJS()\nurl = \"https://qiita.com\"\nbrowser.get(url)\ndf = pd.DataFrame({'title': [], 'date': [], 'like': []})\n\nwhile True:\n if len(browser.find_element_by_class_name('要素')) > 0:\n posts = browser.find_element_by_class_name('要素')\n for post in posts:\n title = post.find_element_by_css_selector('要素').text\n date = post.find_element_by_css_selector('要素').text\n like = post.find_element_by_css_selector('要素').text\n series = pd.Series([title, date, like], [\n 'title', 'date', 'bookmarks'])\n print(series)\n df = df.append(series, ignore_index=True)\n\n link = browser.find_element_by_link_text('要素')\n link.click()\n browser.implicitly_wait(10)\n else:\n break\n\ndf['like'] = pd.to_numeric(df['like'].str.replace('users', ''))\ndf = df.sort_values(['like'], ascending=False).reset_index(drop=True)\ndf.to_csv('hoge.csv')\nprint('完了')\n","sub_path":"scraping_py/scraping_n_csv/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"565329205","text":"from selenium import webdriver # 引入webdriver类\nfrom selenium.webdriver.common.action_chains import ActionChains # 引入ActionChains类\n\nurl = 'https://www.vmall.com/'\n# 创建浏览器驱动对象,打开浏览器\ndriver = webdriver.Chrome()\n# 最大化浏览器\ndriver.maximize_window()\n# 访问网址\ndriver.get(url)\n# 隐式等待2s\ndriver.implicitly_wait(2)\n# 获取所有的一级菜单\ncategory_list = driver.find_elements_by_xpath(\"//ol[@class=\\\"category-list\\\"]/li\")\n# 获取每一个一级菜单的名称\nfor category in category_list:\n # 获取每一个一级菜单的名称\n category_name = category.find_element_by_xpath(\".//div[@class=\\\"category-info\\\"]//span\").text\n print(\"一级菜单:\", category_name)\n # 打开二级菜单页面\n ActionChains(driver).move_to_element(category.find_element_by_xpath(\".//div[@class=\\\"category-info\\\"]\"))\\\n .perform()\n # 获取每一个一级菜单里面的二级菜单\n sub_category_list = category.find_elements_by_xpath(\".//ul[@class=\\\"subcate-list clearfix\\\"]/li[\"\n \"@class=\\\"subcate-item\\\"]\")\n for sub_category in sub_category_list:\n sub_category_name = sub_category.find_element_by_xpath(\".//span\").text\n print(\"\\t\", sub_category_name)\n# 退出浏览器\ndriver.quit()\n","sub_path":"Selenium/VMall.py","file_name":"VMall.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"311542483","text":"from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import get_language\nfrom django.utils.timezone import get_current_timezone\nfrom .models import User, Messages\n\n\nclass SignUpUserForm(forms.ModelForm):\n\n def __init__(self, data=None, *args, **kwargs):\n super(SignUpUserForm, self).__init__(data, *args, **kwargs)\n\n self.fields['first_name'].required = True\n self.fields['mobile_phone'].required = True\n self.fields['market_role'].required = True\n if self.fields['market_role'].widget.choices[0][0] == '':\n self.fields['market_role'].widget.choices.pop(0)\n\n def save(self, commit=True):\n user = super(SignUpUserForm, self).save(commit=False)\n user.is_active = False\n user.user_language = get_language()\n user.user_timezone = get_current_timezone()\n user.save()\n return user\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email', 'market_role', 'mobile_phone', 'country', 'region', 'city')\n widgets = {\n 'market_role': forms.widgets.RadioSelect(),\n }\n\n\nclass AuthenticationForm(AuthenticationForm):\n\n username = forms.CharField(max_length=254, widget=forms.TextInput(attrs={\"placeholder\": _(\"Email\")}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={\"placeholder\": _(\"Password\")}))\n\n\nclass UpdateUserForm(forms.ModelForm):\n\n def __init__(self, data=None, *args, **kwargs):\n super(UpdateUserForm, self).__init__(data, *args, **kwargs)\n\n self.fields['first_name'].required = True\n self.fields['mobile_phone'].required = True\n self.fields['user_timezone'].required = True\n self.fields['country'].required = True\n\n def clean_region(self):\n data = self.cleaned_data['region']\n if data:\n if data.country != self.cleaned_data['country'] or not self.cleaned_data['country']:\n raise forms.ValidationError(_('Invalid value for selected country: %(value)s'),\n code='invalid', params={'value': data},)\n return data\n\n def clean_city(self):\n data = self.cleaned_data['city']\n if data:\n try:\n if data.region != self.cleaned_data['region'] or data.country != self.cleaned_data['country']:\n raise forms.ValidationError(_('Invalid value for selected country or region: %(value)s'),\n code='invalid', params={'value': data},)\n except:\n raise forms.ValidationError(_('Invalid value for selected country or region: %(value)s'),\n code='invalid', params={'value': data},)\n return data\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'mobile_phone', 'office_phone', 'company_name',\n 'company_profile', 'post', 'country', 'region', 'city', 'avatar', 'user_timezone', 'user_language']\n\n\nclass CreateMessageForm(forms.ModelForm):\n def __init__(self, data=None, request=None, *args, **kwargs):\n self.request = request\n super(CreateMessageForm, self).__init__(data, request, *args, **kwargs)\n if self.request:\n self.fields['to_user'].queryset = User.objects.exclude(market_role=self.request.user.market_role)\\\n .exclude(id=self.request.user.id)\n\n def save(self, commit=True):\n message = super(CreateMessageForm, self).save(commit=False)\n message.from_user = self.request.user\n message.save()\n return message\n\n class Meta:\n model = Messages\n fields = ['to_user', 'text']\n widgets = {\n 'text': forms.widgets.Textarea(attrs={'rows': 3}),\n 'to_user': forms.widgets.HiddenInput()\n }","sub_path":"eflow/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"238161132","text":"\nimport pandas\nfrom surprise import NormalPredictor\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise.model_selection import cross_validate\nfrom surprise.model_selection import train_test_split\nfrom surprise.dump import dump\nfrom sklearn.externals import joblib\nfrom surprise import*\n\n# userID = 'cevia' #通过这个userid(这个userid可以从douban影评csv里面任意取一个模拟)最终通过得到getTopN得到topN的电影ID\n\ndef SVDFun(data,userSet, movieSet, userID):\n\t# Evaluate performances of our algorithm on the dataset.\n#\titemList = [[0] * userNumber] * itemNumber\n#\tuserList = [[0] * itemNumber] * userNumber\n\talgo = SVD()\n\t# perf = evaluate(algo, data, measures=['RMSE', 'MAE'])\n\t# trainset = data.build_full_trainset()\n\ttrainset, testset = train_test_split(data, test_size=.25)\n\t# algo.fit(trainset)\n\t# predictions = algo.test(testset)\n\talgo = joblib.load('svdmodel.pkl') \n\t# meanRMSE = average(perf['RMSE'])\n\t# meanMAE = average(perf['MAE'])\n\tmovielist = dict()\n\tfor movie in movieSet:\n\t\test = algo.predict(userID,movie).est\n\t\tmovielist[movie] = est\n\treturn movielist\n\n\ndef getTopN(movielist,ratedMovieList):\n\n\tnumberSort = sorted(movielist.items(),key=lambda d : d[0],reverse = False)\n\tnumberSort2 = sorted(numberSort,key=lambda d : d[1],reverse = True)\n\n\n\ttop = []\n\tfor l in list(tuple(numberSort2)):\n\t\ttop.append(l[0])\n\n\ttop = [l for l in top if l not in ratedMovieList]\n\ttop_n = []\n\tfor n in top[0:10]:\n\t\ttop_n.append(n)\n\treturn top_n\t\n\ndef prepareJob(userID):\n\n\tdouban_comments = pandas.read_csv('douban_yingping.csv')\n\tdouban_comments.duplicated()\n\tcomments = douban_comments.iloc[:,[8,9,10]]\n\n\tratedList = comments[comments['userId'] == userID].values\n\tratedMovieList = []\n\tfor i in range(0,ratedList.shape[0]):\n\t\tratedMovieList.append(ratedList[i][1])\n\n\tcomments = comments.values\n\n\tratings = []\n\tmovieids = []\n\tuserIds = []\n\n\tfor i in range(0,comments.shape[0]):\n\t\trating = comments[i][0]\n\t\tmovieid = comments[i][1]\n\t\tuserId = comments[i][2]\n\t\ttry:\n\t\t\trating = int(rating)\n\t\t\tmovieid = int(movieid)\n\t\t\tratings.append(rating)\n\t\t\tmovieids.append(movieid)\n\t\t\tuserIds.append(userId)\n\t\texcept:\n\t\t\t# print('str cannot convert to int')\n\t\t\tpass\n\n\tratings_dict = {'itemID': movieids,\n\t 'userID': userIds,\n\t 'rating': ratings}\n\n\tdf = pandas.DataFrame(ratings_dict)\n\t# A reader is still needed but only the rating_scale param is requiered.\n\treader = Reader(rating_scale=(1, 5))\n\n\t# The columns must correspond to user id, item id and ratings (in that order).\n\tdata = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)\n\n\t# We can now use this dataset as we please, e.g. calling cross_validate\n\tcross_validate(NormalPredictor(), data, cv=2)\n\n\tuserSet = set(userIds)\n\tmovieSet = set(movieids)\n\n\tmovielist = SVDFun(data,userSet,movieSet,userID)\n\n\treturn getTopN(movielist,ratedMovieList)# 这里运行getTopN()\n\ndef average(seq, total=0.0): \n num = 0 \n for item in seq: \n total += item \n num += 1 \n return total / num\n\n\nprint(prepareJob(\"cevia\"))\n\n\n\n\n\n\n\n\t","sub_path":"myMovie/myMovie/RecommendationsLeilaEdit backoff.py","file_name":"RecommendationsLeilaEdit backoff.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"499207412","text":"from aiohttp import web\n\nfrom core.api.actor import CBPiActor\nfrom core.api.decorator import on_event, request_mapping\nfrom core.controller.crud_controller import CRUDController\nfrom core.database.model import ActorModel\nfrom core.http_endpoints.http_api import HttpAPI\nfrom core.utils import parse_props\n\n\nclass ActorHttp(HttpAPI):\n\n @request_mapping(path=\"/{id:\\d+}/on\", auth_required=False)\n async def http_on(self, request) -> web.Response:\n \"\"\"\n :param request: \n :return: \n \"\"\"\n id = int(request.match_info['id'])\n self.cbpi.bus.fire(topic=\"actor/%s/switch/on\" % id, id=id, power=99)\n return web.Response(status=204)\n\n\n @request_mapping(path=\"/{id:\\d+}/off\", auth_required=False)\n async def http_off(self, request) -> web.Response:\n \"\"\"\n :param request: \n :return: \n \"\"\"\n id = int(request.match_info['id'])\n self.cbpi.bus.fire(topic=\"actor/%s/off\" % id, id=id)\n return web.Response(status=204)\n\n @request_mapping(path=\"/{id:\\d+}/toggle\", auth_required=False)\n async def http_toggle(self, request) -> web.Response:\n \"\"\"\n :param request: \n :return: \n \"\"\"\n id = int(request.match_info['id'])\n print(\"ID\", id)\n self.cbpi.bus.fire(topic=\"actor/%s/toggle\" % id, id=id)\n return web.Response(status=204)\n\nclass ActorController(ActorHttp, CRUDController):\n\n '''\n The main actor controller\n '''\n model = ActorModel\n\n def __init__(self, cbpi):\n super(ActorController, self).__init__(cbpi)\n self.cbpi = cbpi\n self.state = False;\n\n self.cbpi.register(self, \"/actor\")\n self.types = {}\n self.actors = {}\n\n\n def register(self, name, clazz) -> None:\n\n print(\"REGISTER\", name)\n if issubclass(clazz, CBPiActor):\n print(\"ITS AN ACTOR\")\n\n parse_props(clazz)\n self.types[name] = clazz\n\n async def init(self):\n '''\n This method initializes all actors during startup. It creates actor instances\n \n :return: \n '''\n await super(ActorController, self).init()\n\n for name, clazz in self.types.items():\n print(\"Type\", name)\n\n for id, value in self.cache.items():\n\n if value.type in self.types:\n cfg = value.config.copy()\n\n cfg.update(dict(cbpi=self.cbpi, id=id, name=value.name))\n clazz = self.types[value.type][\"class\"];\n\n self.cache[id].instance = clazz(**cfg)\n print(\"gpIO\", self.cache[id].instance, self.cache[id].instance.gpio)\n\n\n\n\n @on_event(topic=\"actor/+/switch/on\")\n def on(self, id , power=100, **kwargs) -> None:\n '''\n Method to switch an actor on.\n Supporting Event Topic \"actor/+/on\"\n \n :param actor_id: the actor id\n :param power: as integer value between 1 and 100\n :param kwargs: \n :return: \n '''\n\n id = int(id)\n if id in self.cache:\n print(\"POWER ON\")\n actor = self.cache[id ].instance\n self.cbpi.bus.fire(\"actor/%s/on/ok\" % id)\n actor.on(power)\n\n @on_event(topic=\"actor/+/toggle\")\n def toggle(self, id, power=100, **kwargs) -> None:\n '''\n Method to toggle an actor on or off\n Supporting Event Topic \"actor/+/toggle\"\n \n :param power: \n :return: \n '''\n\n id = int(id)\n if id in self.cache:\n actor = self.cache[id].instance\n if actor.state is True:\n actor.off()\n else:\n actor.on()\n\n @on_event(topic=\"actor/+/off\")\n def off(self, id, **kwargs) -> None:\n \"\"\"\n \n Method to switch and actor off\n Supporting Event Topic \"actor/+/off\"\n \n :param id: \n :param kwargs: \n \"\"\"\n\n id = int(id)\n\n if id in self.cache:\n actor = self.cache[id].instance\n actor.off()\n","sub_path":"core/controller/actor_controller.py","file_name":"actor_controller.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"101956739","text":"import csv\n\nclass FabricaDeCartas:\n \"\"\"Fabrica de cartas crea las cartas de los mazos de cada era\"\"\"\n def crearMazo(filename):\n mazo = []\n with open(filename, \"r\") as file:\n file_csv = csv.reader(f)\n for line in file_csv:\n nombre = line[0]\n Tipo = line[1]\n CostosTot = line[2]\n CMon = line[3]\n CLad = line[4]\n CCem = line[5]\n COro = line[6]\n CMad = line[7]\n CCer = line[8]\n CPap = line[9]\n CTel = line[10]\n Gratis = line[11]\n EMon = line[12]\n ELad = line[13]\n ECem = line[14]\n EOro = line[15]\n EMad = line[16]\n ECer = line[17]\n EPap = line[18]\n ETel = line[19]\n EGeo = line[20]\n ERue = line[21]\n EEsc = line[22]\n EMil = line[23]\n EPto = line[24]\n\n carta = Carta(nombre, Tipo, CostosTot, CMon, CLad, CCem, COro, CMad, CCer, CPap, CTel, Gratis, EMon, ELad, ECem, EOro, EMad, ECer, EPap, ETel, EGeo, ERue, EEsc, EMil, EPto)\n\n mazo.append(carta)\n\n return mazo\n","sub_path":"Tercer Entrega/fabrica_de_cartas.py","file_name":"fabrica_de_cartas.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66404620","text":"# This problem was asked by Airbnb.\n# You come across a dictionary of sorted words in a language you've never\n# seen before. Write a program that returns the correct order of letters in\n# this language.\n# For example, given\n# ['xww', 'wyz', 'wxyw', 'ywx', 'ywz'], you should return ['x', 'z', 'w', 'y'].\n\nimport unittest\nfrom collections import UserList\n\n\ndef update_table(table, current_letter, next_letter):\n \"\"\"\n Update the table. Table is realized by using dictionary. The time to\n search an item in dictionary is constant, regardless of how many\n elements it has.\n :param table:\n :param current_letter:\n :param next_letter:\n :return:\n \"\"\"\n if current_letter in table:\n table[current_letter][1].add(next_letter)\n else:\n table[current_letter] = [set(), {next_letter}]\n\n if next_letter in table:\n table[next_letter][0].add(current_letter)\n else:\n table[next_letter] = [{current_letter}, set()]\n\n return table\n\n\ndef search_to_fill_table(target, table):\n \"\"\"\n Search through the target list and to record the information found in\n the table. The table has the form:\n | letter | front | back |\n | x | | y |\n | y | w | |\n After the table is formed, x has no front-letter, its the first letter.\n Then eliminate the row contains x, and x in other rows. Then the letter\n without front-letter should come just after letter-x.\n :param target:\n :param table:\n :return:\n \"\"\"\n if len(target) <= 1:\n return {}\n\n current_letter = ''\n\n i = 0\n # 使用While Loop搜索,因为在这过程中列表的长度会变化\n while 1:\n if current_letter == '':\n try:\n current_letter = target[0][0]\n except IndexError:\n return table\n\n next_letter = target[i][0]\n end_position = i\n\n if next_letter != current_letter:\n table = update_table(table, current_letter, next_letter)\n current_letter = next_letter\n\n for k in range(end_position):\n target[k] = target[k][1:]\n small_target = target[:end_position]\n target = target[end_position:]\n search_to_fill_table(small_target, table)\n i = 0\n\n elif i == len(target)-1:\n for k in range(end_position+1):\n target[k] = target[k][1:]\n small_target = target\n target = target[end_position:]\n # 此语句,相当于是决定了何时结束递归。\n # 递归结束的语句往往放在函数定义的开始,这需要深入思考一下\n search_to_fill_table(small_target, table)\n current_letter = ''\n i = 0\n else:\n i = i + 1\n\n return table\n\n\ndef parse_table(table, alphabet):\n \"\"\"\n Parse the table formed by search_to_fill_table() method and return a\n list containing all the letters in order. The table is represented by\n embedded list.\n Information about the order of letters are collected by\n search_to_fill_method(). For each letter, we know all the letters before it\n and all letters after it. We notice that the 1st without any letter\n before it. We find it, put it into the list. Then we delete the 1st\n letter in the table, the 2nd letter has no letter before it. We find\n it, append it into the list. Repeat the above step until the table is\n empty.\n As implied in the above description, the process is recursive.\n :param alphabet:\n :param table: a table represented by embedded list. 1st column contains\n the letter, 2nd the letters before it and 3rd the letters after it.\n :return: a list, in which all letters stands in order.\n \"\"\"\n\n if len(table) == 1:\n for kunci in table.keys():\n return [kunci]\n\n for key, value in table.items():\n if value[0] == set(alphabet):\n break\n del table[key]\n alphabet.append(key)\n result = parse_table(table, alphabet)\n return alphabet.extend(result)\n\n\ndef find_alphabet(words_sequence):\n return parse_table(search_to_fill_table(words_sequence, {}), [])\n\n\nif __name__ == \"__main__\":\n import time\n import random\n import string\n\n all_lowercase_letters = list(string.ascii_lowercase)\n long_list = []\n for i in range(1000):\n string = ''\n j = random.randint(10, 50)\n for k in range(j):\n string += random.choice(all_lowercase_letters)\n long_list.append(string)\n long_list = sorted(long_list)\n\n test_target = ['xww', 'wxyz', 'wxyw', 'ywx', 'ywz']\n\n start = time.time()\n result1 = find_alphabet(test_target)\n print(f\"total time used is { time.time() - start } \")\n print(result1)\n\n start = time.time()\n result2 = find_alphabet(long_list)\n total_time = time.time() - start\n print(f\"total time used is {total_time}\")\n print(result2)\n\n","sub_path":"dcp220/problem226-main.py","file_name":"problem226-main.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644379898","text":"import time\nfrom turtle import Screen\n\nfrom ball import *\nfrom paddle import *\nfrom scoreboard import *\n\nscreen = Screen()\nscreen.tracer(0)\nscreen.setup(800, 600)\nscreen.title('PONG - Ultimate Edition')\nscreen.bgcolor('black')\n\nr_score = 0\nl_score = 0\n\nr_scoreboard = Scoreboard((50, 240))\nl_scoreboard = Scoreboard((-50, 240))\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\nball = Ball()\n\nscreen.listen()\nscreen.onkey(r_paddle.go_up, 'Up')\nscreen.onkey(r_paddle.go_down, 'Down')\nscreen.onkey(l_paddle.go_up, 'w')\nscreen.onkey(l_paddle.go_down, 's')\n\nwhile True:\n ball.move()\n time.sleep(ball.move_speed)\n screen.update()\n\n # Wall Collision\n if ball.ycor() >= 275 or ball.ycor() <= -275:\n ball.wall_bounce()\n\n # Paddle Collision\n\n if ball.distance(r_paddle) < 50 and ball.xcor() > 330 or ball.distance(l_paddle) < 50 and ball.xcor() < -330:\n ball.paddle_bounce()\n\n if ball.xcor() > 380:\n l_scoreboard.increase_score()\n ball.reset()\n ball.paddle_bounce()\n\n if ball.xcor() < -380:\n r_scoreboard.increase_score()\n ball.reset()\n ball.paddle_bounce()\n\nscreen.exitonclick()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"151888909","text":"#!/usr/bin/env python\n\nimport matplotlib\nimport os\n#for openSUSE\nif os.name == 'posix':\n\tmatplotlib.use(\"GTK\")\nimport matplotlib.pyplot as plt\n\nfrom dir_pattern import *\n\npath_1 = '20_deg\\\\20deg_7db_att_minus3deg_1'\n#path_2 = '20_deg\\\\20deg_7db_att_minus3deg_2'\n#path_3 = '20_deg\\\\20deg_7db_att_minus3deg_2_old'\npath_4 = '20_deg\\\\20deg_7db_att_minus3deg_2_old'\n\n[deg_1, dr_1] = get_dr(path_1)\n#[deg_2, dr_2] = get_dr(path_2)\n#[deg_3, dr_3] = get_dr(path_3)\n\n[deg_4, dr_4] = get_dr(path_4)\n'''\n# temporal\npath_1 = 'temporal\\\\11_02_14\\\\20deg_10db_att\\\\1.lvm'\npath_12= \"temporal\\\\21_05_2014\\\\20deg_13db_att_minus2deg\\\\110_2.lvm\"\npath_13 = '20_deg\\\\20deg_7db_att_minus3deg_3\\\\110.lvm'\n\n[t,V] = get_temporal(path_1)\n[t2,V2] = get_temporal(path_12)\n[t3,V3] = get_temporal(path_13)\n'''\n#Plot directivity pattern\nplt.plot(deg_1, dr_1, 'r', deg_4, dr_4, 'k')\n#plt.plot(t[1700:2100],V[1700:2100],'r', t2[1700:2100], V2[1700:2100], 'b', t3[1700:2100], V3[1700:2100], 'g')\nplt.grid(True)\nplt.ylabel('R('+r'$\\theta$'+')')\nplt.xlabel(r'$\\theta$' + ', deg.')\nplt.legend(['1', '4'])\nplt.title(\"Directivity pattern at 0 deg.\")\nplt.show()","sub_path":"20_deg_all.py","file_name":"20_deg_all.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106138433","text":"\"\"\"\nSANS data loader\n================\n\nLoad SANS NeXus file into :mod:`sansred.sansdata` data structure.\n\"\"\"\n\nfrom dataflow.lib import hzf_readonly_stripped as hzf\nfrom dataflow.lib import unit\n\nfrom .sansdata import SansData\n\nmetadata_lookup = {\n \"det.dis\": \"DAS_logs/detectorPosition/softPosition\",\n \"resolution.lmda\" : \"instrument/monochromator/wavelength\",\n \"resolution.dlmda\": \"instrument/monochromator/wavelength_error\",\n \"det.beamx\": \"instrument/detector/beam_center_x\",\n \"det.beamy\": \"instrument/detector/beam_center_y\",\n \"det.pixeloffsetx\": \"instrument/detector/x_offset\",\n \"det.pixelsizex\": \"instrument/detector/x_pixel_size\",\n \"det.pixeloffsety\": \"instrument/detector/y_offset\",\n \"det.pixelsizey\": \"instrument/detector/y_pixel_size\",\n \"analysis.intent\": \"DAS_logs/trajectoryData/intent\",\n \"analysis.filepurpose\": \"DAS_logs/trajectoryData/filepurpose\",\n \"sample.name\": \"DAS_logs/sample/name\",\n \"sample.description\": \"DAS_logs/sample/description\",\n \"sample.labl\": \"DAS_logs/sample/description\", # compatibility\n \"polarization.front\": \"DAS_logs/frontPolarization/direction\",\n \"polarization.back\": \"DAS_logs/backPolarization/direction\",\n \"run.filename\": \"DAS_logs/trajectoryData/fileName\",\n \"run.filePrefix\": \"DAS_logs/trajectoryData/filePrefix\",\n \"run.experimentScanID\": \"DAS_logs/trajectory/experimentScanID\",\n \"run.instrumentScanID\": \"DAS_logs/trajectory/instrumentScanID\",\n \"run.detcnt\": \"control/detector_counts\",\n \"run.rtime\": \"control/count_time\",\n \"run.moncnt\": \"control/monitor_counts\",\n \"run.atten\": \"instrument/attenuator/index\",\n \"analysis.groupid\": \"DAS_logs/trajectoryData/groupid\",\n \"run.configuration\": \"DAS_logs/configuration/key\",\n \"sample.thk\": \"DAS_logs/sample/thickness\",\n \"adam.voltage\": \"DAS_logs/adam4021/voltage\",\n \"sample.temp\": \"DAS_logs/temp/primaryNode/average_value\",\n \"resolution.ap1\": \"DAS_logs/geometry/sourceAperture\",\n \"resolution.ap2\": \"instrument/sample_aperture/size\",\n \"resolution.ap12dis\": \"instrument/source_aperture/distance\",\n \"sample.position\": \"instrument/sample_aperture/distance\",\n \"rfflipperpowersupply.voltage\": \"DAS_logs/RFFlipperPowerSupply/actualVoltage/average_value\",\n \"rfflipperpowersupply.frequency\": \"DAS_logs/RFFlipperPowerSupply/frequency\",\n \"huberRotation.softPosition\": \"DAS_logs/huberRotation/softPosition\",\n \"start_time\":\"start_time\",\n \"end_time\":\"end_time\",\n}\n\nunit_specifiers = {\n \"det.dis\": \"cm\",\n \"det.pixelsizex\": \"cm\",\n \"det.pixeloffsetx\": \"cm\",\n \"det.pixelsizey\": \"cm\",\n \"det.pixeloffsety\": \"cm\",\n \"sample.thk\": \"cm\",\n \"resolution.ap1\": \"cm\",\n \"resolution.ap2\": \"cm\",\n \"sample.thk\": \"cm\"\n}\n\ndef process_sourceAperture(field, units):\n import numpy as np\n def handler(v):\n return np.float(v.split()[0])\n handle_values = np.vectorize(handler)\n value = handle_values(field.value)\n units_from = \"\"\n v0 = field.value[0].split()\n if len(v0) > 1:\n units_from = v0[1]\n converter = unit.Converter(units_from)\n return converter(value, units) \n\ndef data_as(field, units):\n \"\"\"\n Return value of field in the desired units.\n \"\"\"\n if field.name.split('/')[-1] == 'sourceAperture':\n return process_sourceAperture(field, units)\n else:\n converter = unit.Converter(field.attrs.get('units', ''))\n value = converter(field.value, units)\n return value\n\ndef readSANSNexuz(input_file, file_obj=None):\n \"\"\"\n Load all entries from the NeXus file into sans data sets.\n \"\"\"\n datasets = []\n file = hzf.File(input_file, file_obj)\n for entryname, entry in file.items():\n areaDetector = entry['data/areaDetector'].value\n shape = areaDetector.shape\n if len(shape) < 2 or len(shape) > 3:\n raise ValueError(\"areaDetector data must have dimension 2 or 3\")\n return\n if len(shape) == 2:\n # add another dimension at the front\n shape = (1,) + shape\n areaDetector = areaDetector.reshape(shape)\n \n for i in range(shape[0]):\n metadata = {}\n for mkey in metadata_lookup:\n field = entry.get(metadata_lookup[mkey], None)\n if field is not None:\n if mkey in unit_specifiers:\n field = data_as(field, unit_specifiers[mkey])\n else:\n field = field.value\n if field.dtype.kind == 'f':\n field = field.astype(\"float\")\n elif field.dtype.kind == 'i':\n field = field.astype(\"int\")\n \n if len(field) == shape[0]:\n metadata[mkey] = field[i]\n else:\n metadata[mkey] = field\n else:\n metadata[mkey] = field\n\n metadata['entry'] = entryname\n dataset = SansData(data=areaDetector[i].copy(), metadata=metadata)\n datasets.append(dataset) \n\n return datasets\n","sub_path":"sansred/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65484720","text":"#!/usr/bin/env pyformex --gui\n# $Id$\n##\n## This file is part of pyFormex 0.7.2 Release Tue Sep 23 16:18:43 2008\n## pyFormex is a Python implementation of Formex algebra\n## Website: http://pyformex.berlios.de/\n## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be) \n##\n## This program is distributed under the GNU General Public License\n## version 2 or later (see file COPYING for details)\n##\n\"\"\"Mobius Ring\n\nlevel = 'advanced'\ntopics = ['geometry','surface']\ntechniques = ['dialog', 'animation', 'colors']\n\n\"\"\"\n\nreset()\nsmoothwire()\n\nres = askItems([('width',2),\n ('length',30),\n ('number of turns',1),\n ])\nif not res:\n exit()\n \nw = res['width']\nl = res['length']\nn = res['number of turns']\n\nC = Formex(pattern('1234'))\ncell = connect([C,C,C,C],bias=[0,1,2,3])\nstrip = cell.replic2(l,w,1.,1.).translate(1,-0.5*w)\nTA = draw(strip,color='white')\n\nsleep(1)\n\nnsteps = 20\nstep = n*180./nsteps/l\nfor i in arange(nsteps+1):\n a = i*step\n torded = strip.map(lambda x,y,z: [x,y*cosd(x*a),y*sind(x*a)])\n TB = draw(torded,color='yellow')\n undraw(TA)\n TA = TB\n\nsleep(1)\n#TA = None\nnsteps = 40\nstep = 360./nsteps\nfor i in arange(1,nsteps+1):\n ring = torded.trl(2,l*nsteps/pi/i).scale([i*step/l,1.,1.]).trl(0,-90).cylindrical(dir=[2,0,1])\n TB = draw(ring,color='orange')\n undraw(TA)\n TA = TB\n\nsleep(1)\nnsteps = 40\nstep = 720./nsteps\nfor i in arange(1,nsteps+1):\n mobius = ring.rotate(i*step)\n TB = draw(mobius,color='orange')\n undraw(TA)\n TA = TB\n\n## path = ring.select(range(30)).selectNodes([2,3])\n\n## flyAlong(path.scale(0.8))\n## export({'flypath':path})\n\n\n","sub_path":"tags/release-0.7.2/pyformex/examples/Mobius.py","file_name":"Mobius.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"54759353","text":"import pandas as pd\nimport numpy as np\nimport sys\n\n\n\n\"\"\"\n\tThis script processes data from the 15 minutes csv files. They are fixed-width-delimited files (as opposed to standard comma-delimited files, and if they are read in the usual way some of the columns in the end containing several strings (like the PassOn and PassOff columns) get messed up). So instead they need to be read using the read_fwf function in pandas.\n\nInput: \targ1 - the name of the input file\n \targ2 - the name of the output file (optional)\n\n\nOutput: \n\tIf a second argument is passed the processed dataset is saved as a tab-delimited .csv file\n\t\n\nUsage: \tfrom command line \n\t>> python read_fwf.py fixed_width_file.csv tab_delimited_file.csv\n\n \tfrom within ipython\n \t>> % run python read_fwf.py fixed_width_file.csv\n\t(here I do not write the output to a file but I directly create a pandas dataframe called data)\n\nNote: The types are hard-coded: you can modify usage based on your needs \n ('ETA','DwellTime','Activity' are floats as they contain NaNs\n and cannot be automatically converted to integers)\n\n\nWarning: this relies on the structure of files not changing\n\n \n\"\"\"\n\n\ndef main():\n\n filename = sys.argv[1]\n print('Processing file '+ filename)\n\n # read the data\n data = read(filename)\n\n #write the data\n if len(sys.argv)>2:\n data.to_csv(sys.argv[2],sep = '\\t')\n\ndef read(filename):\n \"\"\" \n \n reads a fwf file into a pandas data frame\n \n Usage: data = read(filename)\n \n Note: ('ETA','DwellTime','Activity' are floats as they contain NaNs\n and cannot be automatically converted to integers)\n \"\"\" \n \n # creating the widths of each column\n widths = [12,21,12,12,12,9,12,12,21,255,255,25,25,12,12,12,12,17,51,51,12]\n \n # creating colspecs (containing starting and ending point [) of a column)\n cumsum = [sum(widths[:i+1]) for i in range(len(widths))]\n \n # excluding the commas\n cumsum0 = [0]+cumsum[:-1]\n cumsum_short = [item-1 for item in cumsum]\n colspecs = list(zip(cumsum0,cumsum_short))\n \n # reading the file\n try:\n data = pd.read_fwf(filename,colspecs = colspecs,skiprows = [1], dtype = {'ProviderId': 'str', 'ETA': 'str'})\n except(IOError):\n print('This file does not exist. Please, check the filename or the directory.')\n sys.exit()\n \n # spectifying explicitly the data types\n data = data.astype('object')\n numeric_list = ['LON','LAT','DwellTime']\n data[numeric_list] = data[numeric_list].astype('float')\n return(data)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Disaster_Recovery/read_fwf.py","file_name":"read_fwf.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78750980","text":"import random\n\ndef is_valid(input_string):\n global top\n if (input_string.isdigit()):\n input_number = int(input_string)\n if (input_number > 0 or input_number <= top):\n return True\n return False\n\n# вроде оптимальный вариант ищу банальным бинарным поиском, но иногда получается сыграть лучше оптимального\ndef guesser(start, end, secret):\n guess = (end + start) // 2\n if (guess > secret):\n return guesser(start, guess, secret) + 1\n elif(guess < secret):\n return guesser(guess,end,secret) + 1\n else:\n return 1\n \nprint('\\n{:-^50}\\n{:-^50}\\n\\n'.format('Добро пожаловать в игру','УГАДАЙ ЧИСЛО'))\n\ntop = int(input('Введите верхнюю границу: '))\nsecret_number = 0;\n\n\ndef init_game():\n global secret_number, counter_of_guess, optimal_guess\n secret_number = random.randint(1, top)\n counter_of_guess = 0\n optimal_guess = guesser(1,top,secret_number)\n\ninit_game()\n\nwhile True:\n input_string = input('Введите число от 1 до ' + str(top))\n if (not is_valid(input_string)):\n continue\n\n user_number = int(input_string)\n counter_of_guess += 1\n if (user_number > secret_number):\n print('Загаданное число меньше введенного')\n elif (user_number < secret_number):\n print('Загаданное число больше введенного')\n else:\n print(\"Поздравляем! Вы угадали число за {0} попыток, оптимально {1} попыток. Хотите сыграть еще? (y/n): \".format(counter_of_guess, optimal_guess), end='') \n continue_game = input()\n if (continue_game.lower() == 'y'):\n init_game()\n else:\n break\n\n","sub_path":"lesson13/task1-4.py","file_name":"task1-4.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142564316","text":"import os\nimport signal\nimport psutil\n\n\nclass Program:\n def __init__(self, name, username, pid):\n self._name = name\n self._username = username\n self._pid = pid\n self._cpu_times = cpu_times\n self._memory_percent = memory_percent\n self._create_time = create_time\n self._status = status\n self._exe = exe\n self._cmdliine = cmdliine\n\n def getProgram(self, pid):\n p = psutil.Process(pid=pid)\n\n with p.oneshot():\n return {\n \"name\": p.name(),\n \"cpu_times\": p.cpu_times(),\n \"cpu_percent\": p.cpu_percent(),\n \"memory_percent\": p.memory_percent(),\n \"create_time\": p.create_time(),\n \"ppid\": p.ppid(),\n \"status\": p.status(),\n \"exe\": p.exe(),\n \"cmdliine\": p.cmdline(),\n }\n\n def stopProgram(self,pid):\n os.kill(pid, signal.CTRL_C_EVENT)\n\n\n def __str__(self):\n return \"\"\"\n Program Info \\n\n -------------- \\n \n Name : {} \\n \n Program Pid : {} \\n\n Path : {} \\n\n Language : {} \\n\n Run Command : {} \\n\n Arguments : {}\n \"\"\" \\\n .format(self._name, self._username, self._pid)","sub_path":"src/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"344000286","text":"#\n# settings.py\n#\n# Copyright © 2010-2013 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nSupport for mapping setting values to name table indices.\n\"\"\"\n\n# System imports\nimport logging\n\n# Other imports\nfrom fontio3.fontdata import mapmeta\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass Settings(dict, metaclass=mapmeta.FontDataMetaclass):\n \"\"\"\n Objects representing all of the settings associated with a single feature.\n These are dicts mapping setting values to name table indices.\n \n >>> e = _fakeEditor()\n >>> _testingValues[1].pprint(editor=e)\n 0: 303 ('Required Ligatures On')\n 2: 304 ('Common Ligatures On')\n \n >>> _testingValues[2].pprint(editor=e)\n 0: 306 ('Regular')\n 1: 307 ('Small Caps')\n \n >>> logger = utilities.makeDoctestLogger(\"val\")\n >>> e = _fakeEditor()\n >>> _testingValues[3].isValid(logger=logger, editor=e)\n val.[0] - ERROR - Name table index 12 not present in 'name' table.\n val.[1] - ERROR - The name table index 70000 does not fit in 16 bits.\n val.[2] - ERROR - The name table index 'fred' is not a real number.\n val.[5] - ERROR - Name table index 320 not present in 'name' table.\n False\n \"\"\"\n \n #\n # Class definition variables\n #\n \n mapSpec = dict(\n item_pprintlabelpresort = True,\n item_renumbernamesdirectvalues = True)\n \n #\n # Methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Adds the binary data for the Settings to the specified LinkedWriter.\n \n >>> utilities.hexdump(_testingValues[1].binaryString())\n 0 | 0000 012F 0002 0130 |.../...0 |\n \n >>> utilities.hexdump(_testingValues[2].binaryString())\n 0 | 0000 0132 0001 0133 |...2...3 |\n \"\"\"\n \n if 'stakeValue' in kwArgs:\n stakeValue = kwArgs.pop('stakeValue')\n w.stakeCurrentWithValue(stakeValue)\n else:\n stakeValue = w.stakeCurrent()\n \n w.addGroup(\"2H\", ((k, self[k]) for k in sorted(self)))\n \n @classmethod\n def fromvalidatedwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a new Settings object from the specified walker,\n doing source validation. There is one required keyword argument:\n \n count The number of settings. This is determined from the\n Feature object.\n \n >>> s = _testingValues[1].binaryString()\n >>> logger = utilities.makeDoctestLogger(\"fvw\")\n >>> fvb = Settings.fromvalidatedbytes\n >>> obj = fvb(s, count=2, logger=logger)\n fvw.settings - DEBUG - Walker has 8 remaining bytes.\n >>> obj == _testingValues[1]\n True\n \n >>> fvb(s[:-1], count=2, logger=logger)\n fvw.settings - DEBUG - Walker has 7 remaining bytes.\n fvw.settings - ERROR - Insufficient bytes.\n \"\"\"\n \n count = kwArgs.pop('count')\n logger = kwArgs.pop('logger', logging.getLogger())\n logger = logger.getChild(\"settings\")\n \n logger.debug((\n 'V0001',\n (w.length(),),\n \"Walker has %d remaining bytes.\"))\n \n if w.length() < 4 * count:\n logger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n return cls(w.group(\"2H\", count))\n \n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a new Settings object from the specified walker.\n There is one required keyword argument:\n \n count The number of settings. This is determined from the\n Feature object.\n \n >>> for obj in _testingValues[1:3]:\n ... print(obj == Settings.frombytes(obj.binaryString(), count=2))\n True\n True\n \"\"\"\n \n return cls(w.group(\"2H\", kwArgs['count']))\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n \n def _fakeEditor():\n from fontio3.name import name\n \n _fakeNameTable = {\n (1, 0, 0, 303): \"Required Ligatures On\",\n (1, 0, 0, 304): \"Common Ligatures On\",\n (1, 0, 0, 306): \"Regular\",\n (1, 0, 0, 307): \"Small Caps\"}\n \n e = utilities.fakeEditor(0x1000)\n e.name = name.Name(_fakeNameTable)\n return e\n \n _testingValues = (\n Settings(),\n Settings({0: 303, 2: 304}),\n Settings({0: 306, 1: 307}),\n \n # bad values start here\n \n Settings({0: 12, 1: 70000, 2: \"fred\", 5: 320}))\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/fontio3/feat/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"413638965","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Tom van Steijn, Royal HaskoningDHV\n\nimport logging\n\n# logging\nLOGGING_LEVEL = logging.INFO\n\n# global variables\nMAPVARS = ('prefix', 'project', 'project_number', 'creator', 'controller',\n 'series', 'principal')\nSCENARIOVARS = ('scenario', 'scenario_number')\nLAYERSETVARS = ('map', 'map_number', 'source', 'version', 'comment')\nOPTCOLUMNS = ['export', 'map', 'map_number', 'bookmark', 'source', 'version',\n 'comment']\n\n# templates\nNUMBER_TEMPLATE = '{project_number}-D{series:0>2d}-N{number:0>3d}'\nTITLE_TEMPLATE = (\n '{prefix}{scenario_number}{dot}{map_number}{space}{scenario}: {map}')\nFILENAME_TEMPLATE = '{numberstr}{space}{scenario}-{map}-(V{version:.2f})'\nMERGEDPDFFILENAME_TEMPLATE = '{project_number}-D{series:0>2d}-{scenario}'\n\n# text formatting\nCHARMAP = {' ': '-', ':': '', ';': '', '/': '-', '\\\\': '-', '.': '-'}\nMAX_CHARS = 35\nMAX_LINES = 2\n\n# text element mapping\nELEMENTS = {'title': 'DHV_TITLE',\n'versionstr': 'DHV_VERSION', # e.g. V1.00\n'source': 'DHV_SOURCE',\n'project': 'DHV_PROJECT',\n'numberstr': 'DHV_NUMMER', # e.g. BD7227-D01-N006\n'principal': 'DHV_PRINCIPAL',\n'creator': 'DHV_CREATOR',\n'controller': 'DHV_CONTROLEUR',\n'comment': 'DHV_COMMENT'\n}","sub_path":"mapgenerator/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"485293025","text":"import os\nimport re\n#\n# DEFINE HOME DIRECTORY VARIABLE:\nHomeDirectory = os.path.expanduser('~')\n#\n# REPLACE foo WITH THE STRING TO BE SEARCHED FOR:\n_replace_re = re.compile('foo')\n#\n# DEFINE THE DIRECTORY TO SEARCH AS THE SearchMe DIRECTORY ON YOUR UNIX DESKTOP:\nfor dirpath, dirnames, filenames in os.walk(HomeDirectory + '/Desktop/SearchMe'):\n#\n for file in filenames:\n file = os.path.join(dirpath, file)\n tempfile = file + '.temp'\n with open(tempfile, 'w') as target:\n with open(file) as source:\n for line in source:\n#\n# REPLACE foobar WITH THE STRING THAT SHOULD BE REPLACED:\n line = _replace_re.sub('foobar', line)\n target.write(line)\n os.rename(tempfile, file)\n#\n","sub_path":"dirFindReplace.py","file_name":"dirFindReplace.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"285580801","text":"import numpy as np\nfrom scipy import ndimage\nfrom skimage.morphology import binary_dilation\n\n\ndef find_food_vacuole_centroid(frame):\n dark_thresh = np.percentile(frame,0.25)\n mask = frame < dark_thresh\n labels, numlabel = ndimage.label(mask)\n for l in range(numlabel+1):\n if np.sum(labels == l) < 300:\n labels[labels==l] = 0\n labels, numlabel = ndimage.label(labels)\n com = ndimage.measurements.center_of_mass(np.ones(labels.shape),labels,numlabel)\n return com, labels, numlabel\n\ndef get_donut(center_mask):\n # takes a mask and returns the donut mask around it\n \n #generate circular mask for dilation\n r = 50\n y,x = np.ogrid[-r:r, -r:r]\n circle = x*x + y*y <= r*r\n \n # dilate and subtract\n total_mask = binary_dilation(center_mask,circle)\n donut_mask = total_mask - center_mask\n \n return donut_mask\n","sub_path":".ipynb_checkpoints/image_processing-checkpoint.py","file_name":"image_processing-checkpoint.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"471588967","text":"general_constant = {\r\n # Arguments of experiment\r\n \"NUM_SKILL\" : 3,\r\n \"MAXLEN_PER_SKILL\" : 3,\r\n # LEN_SKILL : NUM_SKILL * MAXLEN_PER_SKILL # Total Length of skills e.g (2,3):>6, (1,9):>9\r\n \"TOTAL_NUM_OF_WORKER\" : 30,\r\n # UPDATE_FREQ : LEN_SKILL * TOTAL_NUM_OF_WORKER # Total Length of skills training at the same time e.g 20 woker with macro shape(2,3):> 20*2*3:120\r\n\r\n # Hyperparameters\r\n \"NUM_EPOCH\" : 2,\r\n \"LEARNING_RATE\" : 5e-6,\r\n \"PENALTY\" : -1000,\r\n \"BATCH_SIZE\" : 128,\r\n\r\n \"NUM_INIT_SKILL\" : 200,\r\n \"NUM_EXPLORE_SKILL\": 800,\r\n \"MAX_NUM_TOTAL_SKILL\" : 1201,\r\n \"MAX_LIMIT_TIME\": 4*86400, \r\n # Worker parameters\r\n \"WORKER_TRAINING_STEP\" : int(5e6),\r\n}\r\n\r\n\r\nenv_list = [ \"Alien\", \"Seaquest\", \"BeamRider\",\r\n \"Breakout\", \"SpaceInvaders\", \"KungFuMaster\",\r\n \"Venture\", \"Asteroids\", \"Gravitar\",\r\n \"Solaris\", \"Frostbite\", \"CrazyClimber\", \"Zaxxon\"]\r\n\r\n# s\r\n\r\nAlien_constant = {\r\n \"REWARD_FACTOR\" : 0.05,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5],\r\n}\r\n\r\nSeaquest_constant = {\r\n \"REWARD_FACTOR\" : 0.1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5],\r\n}\r\n\r\nBeamRider_constant = {\r\n \"REWARD_FACTOR\" : 0.05,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3],\r\n}\r\n\r\nBreakout_constant = { \r\n \"REWARD_FACTOR\" : 0.05,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3],\r\n}\r\n\r\n\r\n\r\nSpaceInvaders_constant = {\r\n \"REWARD_FACTOR\" : 0.05,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3],\r\n}\r\n\r\nKungFuMaster_constant = {\r\n \"REWARD_FACTOR\" : 0.02,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],\r\n}\r\n\r\n\r\nVenture_constant = {\r\n \"REWARD_FACTOR\" : 1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4],\r\n \"WORKER_TRAINING_STEP\": int(5e6),\r\n}\r\n\r\n\r\nAsteroids_constant = {\r\n \"REWARD_FACTOR\" : 0.1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7],\r\n}\r\n\r\nGravitar_constant = {\r\n \"REWARD_FACTOR\" : 1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\r\n}\r\n\r\nFrostbite_constant = {\r\n \"REWARD_FACTOR\" : 1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\r\n}\r\nSolaris_constant = {\r\n \"REWARD_FACTOR\" : 1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\r\n}\r\n\r\nCrazyClimber_constant = {\r\n \"REWARD_FACTOR\" : 0.1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8],\r\n} \r\n\r\n\r\nZaxxon_constant = {\r\n \"REWARD_FACTOR\" : 0.1,\r\n \"VALID_ACTIONS\" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],\r\n}\r\n\r\n\r\n\r\n\r\nAlien_constant.update(general_constant)\r\n\r\nAsteroids_constant.update(general_constant)\r\nBeamRider_constant.update(general_constant)\r\nBreakout_constant.update(general_constant)\r\nCrazyClimber_constant.update(general_constant)\r\nFrostbite_constant.update(general_constant)\r\nGravitar_constant.update(general_constant)\r\nKungFuMaster_constant.update(general_constant)\r\nSeaquest_constant.update(general_constant)\r\nSpaceInvaders_constant.update(general_constant)\r\nSolaris_constant.update(general_constant)\r\nVenture_constant.update(general_constant)\r\nZaxxon_constant.update(general_constant)\r\n\r\nif __name__ == \"__main__\":\r\n print(Seaquest_constant)\r\n \r\n \r\n","sub_path":"DQN_skill/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"523491811","text":"#!/usr/bin/env python\n# coding: utf8\n\n# 使用示例\ndef add_temp_truck(plate, settlement_mode):\n master = db.session.using_bind('master')\n truck = Truck.query.with_session(master).filter_by(plate=plate).update({Truck.settlement_mode: settlement_mode})\n if truck:\n db.session.commit()\n return None, plate\n else:\n return cs.NO_TRUCK, None\n\n\n\"\"\"\n这里存放服务需要的一些数据库辅助函数\n\"\"\"\n\n# 以下代码摘自 https://gist.github.com/adhorn/b84dc47175259992d406\n# 用于实现数据库的读写分离\n# 同时参考了这里 https://techspot.zzzeek.org/2012/01/11/django-style-database-routers-in-sqlalchemy/\n\nfrom flask_sqlalchemy import SQLAlchemy, orm, get_state, BaseQuery\nfrom functools import partial\nfrom flask import current_app\n\n\nclass RoutingSession(orm.Session):\n\n def __init__(self, db, autocommit=False, autoflush=False, **options):\n self.app = db.get_app()\n self.db = db\n self._model_changes = {}\n orm.Session.__init__(\n self, autocommit=autocommit, autoflush=autoflush,\n bind=db.engine,\n query_cls=BaseQuery,\n binds=db.get_binds(self.app), **options)\n\n def get_bind(self, mapper=None, clause=None):\n\n try:\n state = get_state(self.app)\n except (AssertionError, AttributeError, TypeError) as err:\n current_app.logger.info(\n \"cant get configuration. default bind. Error:\" + err)\n return orm.Session.get_bind(self, mapper, clause)\n\n \"\"\"\n If there are no binds configured, connect using the default\n SQLALCHEMY_DATABASE_URI\n \"\"\"\n if state is None or not self.app.config['SQLALCHEMY_BINDS']:\n if not self.app.debug:\n current_app.logger.info(\"Connecting -> DEFAULT\")\n return orm.Session.get_bind(self, mapper, clause)\n\n elif self._name:\n self.app.logger.debug(\"Connecting -> {}\".format(self._name))\n return state.db.get_engine(self.app, bind=self._name)\n\n # 有写的情况下必须走主库\n elif self._flushing: # we who are about to write, salute you\n current_app.logger.info(\"Connecting -> MASTER\")\n return state.db.get_engine(self.app, bind='master')\n\n else:\n # 默认走主库 除非显式声明要用从库\n # 这样可以避免一些预料不到的逻辑问题\n current_app.logger.info(\"Connecting -> MASTER\")\n return state.db.get_engine(self.app, bind='master')\n\n _name = None\n\n def using_bind(self, name):\n s = RoutingSession(self.db)\n vars(s).update(vars(self))\n s._name = name\n return s\n\n\nclass RouteSQLAlchemy(SQLAlchemy):\n\n def __init__(self, *args, **kwargs):\n SQLAlchemy.__init__(self, *args, **kwargs)\n self.session.using_bind = lambda s: self.session().using_bind(s)\n\n def create_scoped_session(self, options=None):\n if options is None:\n options = {}\n scopefunc = options.pop('scopefunc', None)\n return orm.scoped_session(\n partial(RoutingSession, self, **options), scopefunc=scopefunc\n )\n\n# db = RouteSQLAlchemy()\n","sub_path":"sqlalchemy/jyf_app.py","file_name":"jyf_app.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"36732369","text":"#!/usr/bin/python\n\"\"\"Unit tests for facebook.py.\n\"\"\"\n\n__author__ = ['Ryan Barrett ']\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nimport mox\nimport urllib\nimport urlparse\nimport webapp2\n\nimport facebook\nimport source\nimport testutil\n\n# test data\nUSER = {\n 'id': '212038',\n 'name': 'Ryan Barrett',\n 'link': 'http://www.facebook.com/snarfed.org',\n 'username': 'snarfed.org',\n 'location': {'id': '123', 'name': 'San Francisco, California'},\n 'updated_time': '2012-01-06T02:11:04+0000',\n 'bio': 'something about me',\n }\nACTOR = {\n 'displayName': 'Ryan Barrett',\n 'image': {'url': 'http://graph.facebook.com/snarfed.org/picture?type=large'},\n 'id': 'tag:facebook.com,2012:snarfed.org',\n 'updated': '2012-01-06T02:11:04+0000',\n 'url': 'http://www.facebook.com/snarfed.org',\n 'username': 'snarfed.org',\n 'description': 'something about me',\n 'location': {'id': '123', 'displayName': 'San Francisco, California'},\n }\nPOST = {\n 'id': '212038_10100176064482163',\n 'from': {'name': 'Ryan Barrett', 'id': '212038'},\n 'story': 'Ryan Barrett added a new photo.',\n 'picture': 'https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash4/420582_10100176064452223_212038_41571100_37729316_s.jpg',\n 'link': 'http://snarfed.org/2012-02-22_portablecontacts_for_facebook_and_twitter',\n 'message': 'Checking another side project off my list. portablecontacts-unofficial is live!',\n 'name': 'PortableContacts for Facebook and Twitter',\n 'caption': 'snarfed.org',\n 'icon': 'https://s-static.ak.facebook.com/rsrc.php/v1/yx/r/og8V99JVf8G.gif',\n 'place': {\n 'id': '113785468632283',\n 'name': 'Lake Merced',\n 'location': {\n 'city': 'San Francisco',\n 'state': 'CA',\n 'country': 'United States',\n 'latitude': 37.728193717481,\n 'longitude': -122.49336423595\n }\n },\n 'type': 'photo',\n 'object_id': '10100176064452223',\n 'application': {'name': 'Facebook for Android', 'id': '350685531728'},\n 'created_time': '2012-03-04T18:20:37+0000',\n 'updated_time': '2012-03-04T19:08:16+0000',\n}\nOBJECT = {\n 'objectType': 'note',\n 'author': {\n 'id': 'tag:facebook.com,2012:212038',\n 'displayName': 'Ryan Barrett',\n 'image': {'url': 'http://graph.facebook.com/212038/picture?type=large'},\n },\n 'content': 'Checking another side project off my list. portablecontacts-unofficial is live!',\n 'id': 'tag:facebook.com,2012:212038_10100176064482163',\n 'published': '2012-03-04T18:20:37+0000',\n 'updated': '2012-03-04T19:08:16+0000',\n 'url': 'http://facebook.com/212038/posts/10100176064482163',\n 'image': {'url': 'https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash4/420582_10100176064452223_212038_41571100_37729316_s.jpg'},\n 'location': {\n 'displayName': 'Lake Merced',\n 'id': '113785468632283',\n 'position': '+37.728194-122.493364/',\n },\n }\nACTIVITY = {\n 'verb': 'post',\n 'published': '2012-03-04T18:20:37+0000',\n 'updated': '2012-03-04T19:08:16+0000',\n 'id': 'tag:facebook.com,2012:212038_10100176064482163',\n 'url': 'http://facebook.com/212038/posts/10100176064482163',\n 'actor': OBJECT['author'],\n 'object': OBJECT,\n 'generator': {\n 'displayName': 'Facebook for Android',\n 'id': 'tag:facebook.com,2012:350685531728',\n }\n }\n\n\nclass FacebookTest(testutil.HandlerTest):\n\n def setUp(self):\n super(FacebookTest, self).setUp()\n self.facebook = facebook.Facebook(self.handler)\n\n def test_get_activities_defaults(self):\n resp = json.dumps({'data': [\n {'id': '1_2', 'message': 'foo'},\n {'id': '3_4', 'message': 'bar'},\n ]})\n self.expect_urlfetch(\n 'https://graph.facebook.com/me/home?offset=0&limit=0', resp)\n self.mox.ReplayAll()\n\n self.assert_equals((\n None,\n [{'id': 'tag:facebook.com,2012:1_2',\n 'object': {'content': 'foo',\n 'id': 'tag:facebook.com,2012:1_2',\n 'objectType': 'note',\n 'url': 'http://facebook.com/1/posts/2'},\n 'url': 'http://facebook.com/1/posts/2',\n 'verb': 'post'},\n {'id': 'tag:facebook.com,2012:3_4',\n 'object': {'content': 'bar',\n 'id': 'tag:facebook.com,2012:3_4',\n 'objectType': 'note',\n 'url': 'http://facebook.com/3/posts/4'},\n 'url': 'http://facebook.com/3/posts/4',\n 'verb': 'post'},\n ]),\n self.facebook.get_activities())\n\n def test_get_activities_self(self):\n self.expect_urlfetch(\n 'https://graph.facebook.com/me/posts?offset=0&limit=0', '{}')\n self.mox.ReplayAll()\n self.assert_equals((None, []),\n self.facebook.get_activities(group_id=source.SELF))\n\n def test_get_activities_passes_through_access_token(self):\n self.expect_urlfetch(\n 'https://graph.facebook.com/me/home?offset=0&limit=0&access_token=asdf',\n '{\"id\": 123}')\n self.mox.ReplayAll()\n\n handler = webapp2.RequestHandler(webapp2.Request.blank('/?access_token=asdf'),\n webapp2.Response())\n self.facebook = facebook.Facebook(handler)\n self.facebook.get_activities()\n\n def test_get_activities_activity_id(self):\n self.expect_urlfetch('https://graph.facebook.com/000', json.dumps(POST))\n self.mox.ReplayAll()\n\n # activity id overrides user, group, app id and ignores startIndex and count\n self.assert_equals(\n (1, [ACTIVITY]),\n self.facebook.get_activities(\n user_id='123', group_id='456', app_id='789', activity_id='000',\n start_index=3, count=6))\n\n def test_get_activities_activity_id_not_found(self):\n self.expect_urlfetch('https://graph.facebook.com/000', 'false')\n self.mox.ReplayAll()\n self.assert_equals((0, []), self.facebook.get_activities(activity_id='000'))\n\n def test_post_to_activity_full(self):\n self.assert_equals(ACTIVITY, self.facebook.post_to_activity(POST))\n\n def test_post_to_activity_minimal(self):\n # just test that we don't crash\n self.facebook.post_to_activity({'id': '123_456', 'message': 'asdf'})\n\n def test_post_to_activity_empty(self):\n # just test that we don't crash\n self.facebook.post_to_activity({})\n\n def test_post_to_object_full(self):\n self.assert_equals(OBJECT, self.facebook.post_to_object(POST))\n\n def test_post_to_object_minimal(self):\n # just test that we don't crash\n self.facebook.post_to_object({'id': '123_456', 'message': 'asdf'})\n\n def test_post_to_object_empty(self):\n self.assert_equals({}, self.facebook.post_to_object({}))\n\n def test_user_to_actor_full(self):\n self.assert_equals(ACTOR, self.facebook.user_to_actor(USER))\n\n def test_user_to_actor_minimal(self):\n actor = self.facebook.user_to_actor({'id': '212038'})\n self.assert_equals('tag:facebook.com,2012:212038', actor['id'])\n self.assert_equals('http://graph.facebook.com/212038/picture?type=large',\n actor['image']['url'])\n\n def test_user_to_actor_empty(self):\n self.assert_equals({}, self.facebook.user_to_actor({}))\n","sub_path":"facebook_test.py","file_name":"facebook_test.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"304481084","text":"#!/usr/bin/env python3 -tt\n\nclass Object(object):\n def __init__(self, size, pos, vel=(0,0), lifespan='inf', canvas=None):\n self.size = size\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.lifespan = lifespan\n self.oid = self.draw(canvas) if canvas else None\n\n def draw(self,canvas):\n canvas.create_rectangle(self.pos[0]-self.size[0]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.pos[1]-self.size[1]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.pos[0]+self.size[0]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.pos[1]+self.size[1]/2)\n\n def update(self,canvas):\n self.pos[0] += self.vel[0]\n self.pos[1] += self.vel[1]\n canvas.coords(self.oid, self.pos[0]-self.size[0]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.pos[1]-self.size[1]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.pos[0]+self.size[0]/2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.pos[1]+self.size[1]/2)\n\n def __str__(self):\n s = \"An object is created with-\\nsize:{0} \\npos:{1} \\nvel:{2} \\nlifespan:{3} \\noid:{4}\"\n return s.format(*list(map(str,[self.size,self.pos,self.vel,self.lifespan,self.oid])))\n\nif __name__==\"__main__\":\n print(Object((1,2),(3,4)))\n","sub_path":"coursera_interactive_python/miniprojects/finalproject/Object.py","file_name":"Object.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"588229142","text":"from os import makedirs, path\nfrom undine.server.driver.base_driver import BaseDriver\nfrom undine.server.information import ConfigInfo, WorkerInfo, InputInfo\nfrom undine.server.information import TaskInfo\nfrom undine.utils.exception import UndineException\n\n\nclass FileDriver(BaseDriver):\n _DEFAULT_RESULT_DIR = 'results'\n _DEFAULT_RESULT_EXT = '.log'\n _DEFAULT_WORKER_ID = 0\n _DEFAULT_WORKER_CMD = 'example.rb'\n _DEFAULT_WORKER_ARGS = '-c %C -r %R %I'\n _DEFAULT_WORKER_DIR = ''\n _DEFAULT_WORKER_USE_FILE_ARGS = True\n\n #\n # Private/Protected methods\n #\n def _load_config(self, file_path):\n self._configs = dict()\n\n cid = 0\n for line in open(file_path, 'r'):\n if not line.strip():\n continue\n\n name, config = [item.strip()\n for item in line.split(',', maxsplit=1)]\n\n self._configs[cid] = ConfigInfo(cid=cid, name=name, config=config,\n dir=self._config_dir,\n ext=self._config_ext)\n\n cid += 1\n\n def _load_inputs(self, file_path):\n self._inputs = dict()\n\n iid = 0\n for line in open(file_path, 'r'):\n if not line.strip():\n continue\n\n name, items = [item.strip()\n for item in line.split(',', maxsplit=1)]\n\n self._inputs[iid] = InputInfo(iid=iid, name=name, items=items)\n\n iid += 1\n\n def _build_worker(self, config):\n command = config.setdefault('worker_command', self._DEFAULT_WORKER_CMD)\n args = config.setdefault('worker_arguments', self._DEFAULT_WORKER_ARGS)\n directory = config.setdefault('worker_dir', self._DEFAULT_WORKER_DIR)\n file_input = config.setdefault('worker_use_file_args',\n self._DEFAULT_WORKER_USE_FILE_ARGS)\n\n self._worker = WorkerInfo(wid=self._DEFAULT_WORKER_ID,\n dir=directory, cmd=command,\n arguments=args, file_input=file_input)\n\n def _build_task_matrix(self):\n # key: tid, value: TaskInfo\n self._tasks = dict()\n\n # key: state name, value: tid list\n self._state = {'ready': list(), 'issued': list(), 'done': list(),\n 'canceled': list(), 'failed': list()}\n\n total_inputs = len(self._inputs)\n for cid in self._configs.keys():\n for iid in self._inputs.keys():\n tid = cid * total_inputs + iid\n\n self._tasks[tid] = TaskInfo(tid=tid, cid=cid, iid=iid,\n wid=self._DEFAULT_WORKER_ID)\n\n self._state['ready'].append(tid)\n\n def _task_name(self, task):\n return \"tid{0}-{1}-{2}\".format(task.tid,\n self._configs[task.cid].name,\n self._inputs[task.iid].name)\n\n def _task_filename(self, task, ext):\n return \"{0}{1}\".format(self._task_name(task), ext)\n\n def _move_state(self, tid, from_, to_):\n self._state[from_].remove(tid)\n self._state[to_].append(tid)\n\n return self._tasks[tid]\n\n #\n # Constructor & Destructor\n #\n def __init__(self, config, config_dir):\n BaseDriver.__init__(self, config, config_dir)\n\n # 1. Check input parameter is not missing\n if 'config_file' not in config:\n raise UndineException(\"'config_file' is not set in driver section\")\n\n if 'input_file' not in config:\n raise UndineException(\"'input_file' is not set in driver section\")\n\n # 2. Get default values\n self._result_ext = config.setdefault('result_ext',\n self._DEFAULT_RESULT_EXT)\n\n # 3. Load task information\n self._load_config(config['config_file'])\n self._load_inputs(config['input_file'])\n self._build_worker(config)\n self._build_task_matrix()\n\n # 4. Make result repository\n self._results = dict()\n self._result_dir = config.setdefault('result_dir',\n self._DEFAULT_RESULT_DIR)\n\n makedirs(self._result_dir, mode=0o700, exist_ok=True)\n\n #\n # Inherited methods\n #\n def fetch(self):\n return self._tasks[self._state['ready'][0]]\n\n def config(self, cid):\n return self._configs[cid]\n\n def worker(self, _wid):\n return self._worker\n\n def inputs(self, iid):\n return self._inputs[iid]\n\n def preempt(self, tid):\n self._move_state(tid, 'ready', 'issued')\n\n return True\n\n def done(self, tid, contents, _report):\n task = self._move_state(tid, 'issued', 'done')\n\n # Keep result in memory currently.\n self._results[tid] = contents\n\n # Store result file in file repository\n filename = self._task_filename(task, self._result_ext)\n with open(path.join(self._result_dir, filename), 'w') as f_out:\n f_out.write(contents)\n\n return True\n\n def cancel(self, tid):\n self._move_state(tid, 'issued', 'canceled')\n\n def fail(self, tid, message):\n task = self._move_state(tid, 'issued', 'failed')\n title = 'tid({1}) - {0}'.format(self._task_name(task), tid)\n\n self._error_logging(title, message)\n\n def is_ready(self):\n return bool(self._state['ready'])\n","sub_path":"undine/server/driver/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"216318036","text":"#!/usr/bin/python\r\n# -*- coding: cp936 -*-\r\n# Author: zhanglingfeng\r\n# Date: 2015-08-27\r\n\r\nimport time\r\nimport string\r\nimport re\r\n\r\nfrom libcom.lib_pub.logging_drv import log_info\r\nfrom libcom.console_drv.console_drv import Console\r\nfrom libcom.lib_cmd.inter_mode import *\r\nfrom libcom.lib_cmd.config_mode import *\r\nfrom libcom.lib_cmd.shell_mode import *\r\nfrom libcom.lib_cmd.chg_mode import *\r\nfrom libcom.console_drv.console_drv import *\r\nfrom libcom.config_topo.topo_drv import *\r\nfrom cases_set.route.uc.uc_commn_sw_cmds import get_sw_active_intf\r\nfrom libcom.device_adapt.device_adapt import _sw_map as _dev_mapping \r\nfrom libcom.device_adapt.device_adapt import SwitchAdapter\r\n\r\n__all__ = [\"uft_base_003\"]\r\n\r\nSUCCESS = 0\r\nFAIL = -1\r\n\r\nuft_mode = []\r\ndev_uft_mode = {'N18K_ED1':['bridge','default','gateway','gateway-max','gateway-ndmax',\r\n 'label','route-v4max','route-v6max'],\r\n 'S6220':['bridge','default','gateway','gateway-max','gateway-ndmax',\r\n 'route-v4max','route-v6max','vxlan'],\r\n 'S6K':['bridge','default','gateway','gateway-max','gateway-ndmax',\r\n 'route-v4max','route-v6max','vxlan'],\r\n 'N18K_DB':['bridge','default','gateway','gateway-max','gateway-ndmax',\r\n 'label','route-v4max','route-v6max']}\r\ndef uft_change_mode_test(cb_arg, mode, console):\r\n sw_adapter = SwitchAdapter(cb_arg.dev_names[0])\r\n slots = sw_adapter.get_slots()\r\n for lc_name in slots:\r\n console.run_cmd(\"en\")\r\n console.run_cmd(\"conf\")\r\n console.run_cmd(\"switch-mode \"+ mode + \" slot \" + lc_name)\r\n console.run_cmd(\"en\")\r\n console.run_cmd(\"w\")\r\n\r\n time.sleep(1)\r\n console.run_cmd(\"reload\" +\"\\r\\n\" + \" y\")\r\n console.run_cmd(\"en\")\r\n console.read_until(\"#\")\r\n time.sleep(3)\r\n console.run_cmd(\"en\")\r\n cli_result = console.run_cmd(\"show switch-mode status\")\r\n log_info(\"the cli find string is \"+cli_result)\r\n if cli_result.find(mode) == -1:\r\n log_info(\"the mode \"+mode+\" changed is wrong\")\r\n return FAIL\r\n return SUCCESS\r\n\r\ndef _uft_base_003(cb_arg, console):\r\n console.run_cmd(\"en\")\r\n dev = _dev_mapping[cb_arg.dev_names[0]]\r\n if dev_uft_mode.has_key(dev):\r\n for mode in dev_uft_mode[dev]:\r\n if uft_change_mode_test(cb_arg, mode, console) ==FAIL:\r\n return FAIL\r\n else:\r\n log_info(\"Failed:the case has not the switch uft mode\")\r\n return FAIL\r\n return SUCCESS\r\n\r\ndef uft_base_003(cb_arg):\r\n if len(cb_arg.dev_names) == 0:\r\n log_info(\"Failed: Need one switch to be test.\")\r\n return FAIL\r\n\r\n dev_name = cb_arg.dev_names[0]\r\n console = Console(dev_name)\r\n console.wake_up()\r\n result = SUCCESS\r\n try:\r\n result = _uft_base_003(cb_arg, console)\r\n finally:\r\n exit_console(dev_name)\r\n return result\r\n","sub_path":"cases_set/route/uft/UFT_BASE_003.py","file_name":"UFT_BASE_003.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"156647462","text":"import tkinter as tk\r\nfrom tkinter.constants import CENTER, DISABLED, GROOVE, PIESLICE, SUNKEN, TOP\r\nimport random\r\n\r\nwindow = tk.Tk()\r\nsin0 = \"0\"\r\nsin30 = \"1/2\"\r\nsin45 = \"1/root2\"\r\nsin60 = \"root3/2\"\r\nsin90 = \"1\"\r\n\r\ncos0=\"1\"\r\ncos30=\"root3/2\"\r\ncos45=\"1/root2\"\r\ncos60=\"1/2\"\r\ncos90=\"0\"\r\n\r\ntan0=\"0\"\r\ntan30=\"1/root3\"\r\ntan45=\"1\"\r\ntan60=\"root3\"\r\ntan90=\"not defined\"\r\n\r\ncosec0=\"Not defined\"\r\ncosec30=\"2\"\r\ncosec45=\"root2\"\r\ncosec60=\"2/root3\"\r\ncosec90=\"1\"\r\n\r\nsec0=\"1\"\r\nsec30=\"2/root3\"\r\nsec45=\"root2\"\r\nsec60=\"2\"\r\nsec90=\"Not defined\"\r\n\r\ncot0=\"Not defined\"\r\ncot30=\"root3\"\r\ncot45=\"1\"\r\ncot60=\"1/root3\"\r\ncot90=\"0\"\r\n\r\nvalues_w = []\r\nentrys_w=[]\r\ncorrect_values= [[sin0,sin30,sin45,sin60,sin90],\r\n [cos0,cos30,cos45,cos60,cos90],\r\n [tan0,tan30,tan45,tan60,tan90],\r\n [cosec0,cosec30,cosec45,cosec60,cosec90],\r\n [sec0,sec30,sec45,sec60,sec90],\r\n [cot0,cot30,cot45,cot60,cot90] ]\r\n\r\nclass Table:\r\n def __init__(self,root):\r\n for i in range(total_rows):\r\n if i >0:\r\n values_w.append(values)\r\n values=[]\r\n for j in range(total_columns):\r\n text = tk.StringVar()\r\n self.e = tk.Entry(root, width=15, fg='black',\r\n font=('Arial',16,'bold'),textvariable=text)\r\n self.e.grid(row=i+1, column=j)\r\n if i == 0 or j == 0:\r\n self.e.insert(tk.END, lst[i][j])\r\n self.e.configure(state='readonly')\r\n self.e.insert(tk.END, lst[i][j])\r\n values.append(text)\r\n self.btn = tk.Button(root,width=20,text=\"Submit\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=submit)\r\n self.btn.grid(row=total_rows+1,column=2)\r\n self.btn = tk.Button(root,width=20,text=\"Refresh\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=refresh)\r\n self.btn.grid(row=total_rows+1,column=4)\r\n self.btn = tk.Button(root,width=20,text=\"Fill all\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=fill_all)\r\n self.btn.grid(row=total_rows+2,column=3)\r\n\r\ndef set_fill_lst():\r\n lst = [('ratios',0,30,45,60,90)]\r\n lst.append(('sin',sin0,sin30,sin45,sin60,sin90))\r\n lst.append(('cos',cos0,cos30,cos45,cos60,cos90))\r\n lst.append(('tan',tan0,tan30,tan45,tan60,tan90))\r\n lst.append(('cosec',cosec0,cosec30,cosec45,cosec60,cosec90))\r\n lst.append(('sec',sec0,sec30,sec45,sec60,sec90))\r\n lst.append(('cot',cot0,cot30,cot45,cot60,cot90))\r\n return lst\r\n\r\ndef fill_all():\r\n global t\r\n global lst\r\n global values_w\r\n t.e.grid_remove()\r\n lst=set_fill_lst()\r\n values_w=[]\r\n t=Table(window)\r\n \r\n\r\ndef refresh():\r\n global t\r\n global lst\r\n global values_w\r\n t.e.grid_remove()\r\n lst = load_random_values()\r\n values_w=[]\r\n t=Table(window)\r\n\r\ndef submit():\r\n global values_w\r\n global total_rows\r\n global total_columns\r\n not_correct = False\r\n for i in range(total_rows-1):\r\n for j in range(total_columns):\r\n if j > 0 and i > 0:\r\n if values_w[i][j].get() != \"\":\r\n if values_w[i][j].get().lower().replace(\" \",\"\") == correct_values[i-1][j-1].lower().replace(\" \",\"\"):\r\n pass\r\n else:\r\n text.configure(text=\"Some of the values are incorrect\",foreground=\"red\")\r\n not_correct = True\r\n return\r\n else:\r\n text.configure(text=\"Please Fill all the values\",foreground=\"red\")\r\n not_correct = True\r\n return\r\n if not not_correct: \r\n text.configure(text=\"Every Value is Correct!\",foreground=\"green\")\r\n refresh() \r\n\r\ndef load_random_values():\r\n lst = [('ratios',0,30,45,60,90)]\r\n lst.append(('sin',random.choice(['',sin0]),random.choice(['',sin30]),random.choice(['',sin45]),random.choice(['',sin60]),random.choice(['',sin90])))\r\n lst.append(('cos',random.choice(['',cos0]),random.choice(['',cos30]),random.choice(['',cos45]),random.choice(['',cos60]),random.choice(['',cos90])))\r\n lst.append(('tan',random.choice(['',tan0]),random.choice(['',tan30]),random.choice(['',tan45]),random.choice(['',tan60]),random.choice(['',tan90])))\r\n lst.append(('cosec',random.choice(['',cosec0]),random.choice(['',cosec30]),random.choice(['',cosec45]),random.choice(['',cosec60]),random.choice(['',cosec90])))\r\n lst.append(('sec',random.choice(['',sec0]),random.choice(['',sec30]),random.choice(['',sec45]),random.choice(['',sec60]),random.choice(['',sec90])))\r\n lst.append(('cot',random.choice(['',cot0]),random.choice(['',cot30]),random.choice(['',cot45]),random.choice(['',cot60]),random.choice(['',cot90])))\r\n return lst\r\n\r\nlst = load_random_values()\r\n\r\n# find total number of rows and\r\n# columns in list\r\ntotal_rows = len(lst)\r\ntotal_columns = len(lst[0])\r\n\r\nlst = load_random_values()\r\n\r\n# find total number of rows and\r\n# columns in list\r\ntotal_rows = len(lst)\r\ntotal_columns = len(lst[0])\r\n\r\ntext=tk.Label(window,text=\"\")\r\ntext.grid(column=3,row=total_rows+1)\r\n \r\n# create root window\r\nt=Table(window)\r\n\r\n\r\nwindow.mainloop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115260442","text":"from config import database, helpers, db_context\nhelpers.define_model(\n \"SYS_ValueList\",\n [[\"language\", \"list_name\"]],\n language=(\"text\"),\n list_name=(\"text\"),\n values=(\"list\",False,dict(\n value = (\"numeric\"),\n caption = (\"text\"),\n custom = (\"text\")\n )),\n multi_select=(\"bool\"),\n description=(\"text\"),\n created_on=(\"date\"),\n created_by=(\"text\"),\n modified_on=(\"date\"),\n modified_by=(\"text\")\n )\n\ndef SYS_ValueList():\n ret = db_context.collection(\"SYS_ValueList\")\n return ret","sub_path":"apps/performance/api/models/SYS_ValueList.py","file_name":"SYS_ValueList.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23399433","text":"import logging\nimport pandas as pd\nfrom django import forms\nfrom django.contrib import admin\nfrom bootstrap3_datetime.widgets import DateTimePicker\nfrom django.core.urlresolvers import reverse\n\nfrom base.admin import DateForm\nfrom rivers.settings import QUOTE_DIR\nfrom subtool.live.excel_rtd.views import excel_rtd_create\nfrom subtool.models import OptionTimeSale\nfrom subtool.option.timesale.views import timesale_report_view, timesale_insert_view\nfrom subtool.ticker.minute1.views import minute1_si_report\n\nlogger = logging.getLogger('views')\n\n\nclass OptionTimeSaleAdmin(admin.ModelAdmin):\n # enable bootstrap datetime js\n\n def report(self):\n return 'Report'.format(\n link=reverse(\n 'admin:timesale_report_view',\n kwargs={'symbol': self.symbol.lower(), 'date': self.date.strftime('%Y-%m-%d')}\n )\n )\n\n report.allow_tags = True\n\n list_display = ['symbol', 'date', report]\n fieldsets = (\n ('Primary Fields', {\n 'fields': (\n 'symbol', 'date', 'extra_field',\n )\n }),\n )\n search_fields = ('symbol', 'date')\n list_per_page = 20\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def get_actions(self, request):\n # Disable delete\n actions = super(OptionTimeSaleAdmin, self).get_actions(request)\n del actions['delete_selected']\n return actions\n\n # noinspection PyMethodMayBeStatic,PyUnusedLocal\n def delete_report(self, request, queryset):\n for q in queryset.all():\n timesale = OptionTimeSale.objects.get(id=q.id)\n logger.info('OptionTimeSale: %s %s' % (\n timesale.symbol, timesale.date.strftime('%Y-%m-%d')\n ))\n\n db = pd.HDFStore(QUOTE_DIR)\n path = '/option/%s/final/timesale' % timesale.symbol.lower()\n try:\n df_timesale = db.select(path, 'date == %r' % pd.to_datetime(timesale.date))\n logger.info('df_timesale remove: %d' % len(df_timesale))\n except KeyError:\n pass\n\n try:\n db.remove(path, where='date == %r' % pd.to_datetime(timesale.date))\n except NotImplementedError:\n db.remove(path)\n except KeyError:\n pass\n\n db.close()\n\n queryset.delete()\n\n delete_report.short_description = \"Delete option time sale report\"\n actions = [delete_report]\n\n\nadmin.site.register(OptionTimeSale, OptionTimeSaleAdmin)\n\nadmin.site.register_view(\n 'subtool/optiontimesale/input', urlname='timesale_insert_view', view=timesale_insert_view\n)\nadmin.site.register_view(\n 'subtool/optiontimesale/summary/(?P\\w+)/(?P\\d{4}-\\d{2}-\\d{2})/$',\n urlname='timesale_report_view', view=timesale_report_view\n)\nadmin.site.register_view(\n 'subtool/live/excel_rtd/create', urlname='excel_rtd_create', view=excel_rtd_create\n)\nadmin.site.register_view(\n 'subtool/ticker/minute1/si/report/(?P\\w+)/(?P\\d{4}-\\d{2}-\\d{2})/$',\n urlname='minute1_si_report', view=minute1_si_report\n)\n","sub_path":"subtool/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"369904925","text":"\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Load libraries for data processing\n #data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np\nfrom scipy.stats import norm\n\n# visualization\nimport seaborn as sns \nplt.style.use('fivethirtyeight')\nsns.set_style(\"white\")\n\n\nplt.rcParams['figure.figsize'] = (8,4) \n#plt.rcParams['axes.titlesize'] = 'large'\n\ndata = pd.read_csv('data/clean-data.csv', index_col=False)\ndata.drop('Unnamed: 0',axis=1, inplace=True)\n\narray = data.values\nX = array[:,1:31]\ny = array[:,0]\n\n#transform the class labels from their original string representation (M and B) into integers\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\n\n#Call the transform method of LabelEncorder on two dummy variables\n#le.transform (['M', 'B'])\n\nfrom sklearn.model_selection import train_test_split\n\n##Split data set in train 70% and test 30%\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=7)\nX_train.shape, y_train.shape, X_test.shape, y_test.shape\n\nfrom sklearn.preprocessing import StandardScaler\n\n# Normalize the data (center around 0 and scale to remove the variance).\nscaler =StandardScaler()\nXs = scaler.fit_transform(X)\n\nfrom sklearn.decomposition import PCA\n# feature extraction\npca = PCA(n_components=10)\nfit = pca.fit(Xs)\n\n# summarize components\n#print(\"Explained Variance: %s\") % fit.explained_variance_ratio_\n#print(fit.components_)\n\nX_pca = pca.transform(Xs)\n\nPCA_df = pd.DataFrame()\n\nPCA_df['PCA_1'] = X_pca[:,0]\nPCA_df['PCA_2'] = X_pca[:,1]\n\nplt.plot(PCA_df['PCA_1'][data.diagnosis == 'M'],PCA_df['PCA_2'][data.diagnosis == 'M'],'o', alpha = 0.7, color = 'r')\nplt.plot(PCA_df['PCA_1'][data.diagnosis == 'B'],PCA_df['PCA_2'][data.diagnosis == 'B'],'o', alpha = 0.7, color = 'b')\n\nplt.xlabel('PCA_1')\nplt.ylabel('PCA_2')\nplt.legend(['Malignant','Benign'])\nplt.show()\n\n#The amount of variance that each PC explains\nvar= pca.explained_variance_ratio_\n#Cumulative Variance explains\n#var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)\n#print(var1)\n\n#The amount of variance that each PC explains\nvar= pca.explained_variance_ratio_\n#Cumulative Variance explains\n#var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)\n#print(var1)\n\nplt.plot(var)\nplt.title('Scree Plot')\nplt.xlabel('Principal Component')\nplt.ylabel('Eigenvalue')\n\nleg = plt.legend(['Eigenvalues from PCA'], loc='best', borderpad=0.3,shadow=False,markerscale=0.4)\nleg.get_frame().set_alpha(0.4)\nleg.draggable(state=True)\nplt.show()","sub_path":"DataPreprocessing.py","file_name":"DataPreprocessing.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338658653","text":"import bs4\nfrom bs4 import BeautifulSoup as BS\n\ndef wrapInXml(url, title, text, agency, author):\n xml = '\\n'\n xml+='\\n'\n xml+=url\n xml+='\\n'\n xml+=title\n xml+='\\n'\n xml+=text\n xml+='\\n'\n xml+=agency\n xml+='\\n'\n xml+=author\n xml+='\\n\\n'\n return xml.encode('utf-8')\n\ndef getRequiredContent(page):\n pageId = page.split(\"/\")[-1]\n page = open(page).read()\n soup = BS(page)\n title = soup.title.string.split(\"-\")[0].strip()\n url = soup.findAll('meta',property=\"og:url\")[0]['content']\n text = \"\"\n textTag = soup.findAll('div', {\"class\":\"entry-content\"})[0].contents\n for misc in textTag:\n if type(misc)==bs4.element.Tag:\n if type(misc.contents[0]) == bs4.element.Tag:\n text += \" \"+ misc.contents[0].contents[0]\n else:\n text += \" \"+misc.contents[0]\n else:\n text += misc\n text = text.strip()\n agency = soup.findAll('a',rel=\"category\")[0].contents[0]\n author = soup.findAll('p',{'class':['details', 'vcard', 'author']})[0].findChildren(\"a\",rel=\"user\")[0][\"href\"].split(\"/\")[-1]\n return url, title, text, agency, author\n\n\ndef generateXML(page, xmlOutput):\n outFile = open(xmlOutput,\"w\")\n url, title, text, agency, author = getRequiredContent(page)\n xml = wrapInXml(url, title, text, agency, author)\n outFile.write(xml)\n outFile.close()\n \nif __name__ == '__main__':\n page = \"/usr0/home/pgadde/Misc/Scraping/Pages/365050-19787\"\n xmlOutput = \"/usr0/home/pgadde/Misc/Scraping/365050-19787.xml\"\n generateXML(page, xmlOutput)","sub_path":"EthnicGroups/src/MessageBoards/BeautofulSoupStarter.py","file_name":"BeautofulSoupStarter.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309811461","text":"\n__command__ = \"strlen\"\n__usage__ = \"strlen\"\n__description__ = \"Text length\"\n\ndef run(shell, args): \t\n\tkeyword = \"\" if len(args) == 0 else args[0]\n\tlines = shell.multilineInput()\n\tprint(\"Number of characters with spaces : \"+ str(len(lines)))\n\tprint(\"Number of alphanumeric characters : \"+ str(len( \"\".join(lines.split()) )))\t\n\treturn \"\"\n\t\t\n\t","sub_path":"tasks/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"411466118","text":"from django.conf import settings\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives, get_connection\nfrom django.template import Context, TemplateDoesNotExist\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext as _\n\nfrom .utils import _get_node, BlockNotFound\n\n\nclass EmailRenderException(Exception):\n pass\n\n\nclass TemplateBackend(object):\n def __init__(self, fail_silently=False,\n template_prefix=None, template_suffix=None, **kwargs):\n self.template_prefix = template_prefix or getattr(settings, 'TEMPLATED_EMAIL_TEMPLATE_DIR', 'templated_email/')\n self.template_suffix = template_suffix or getattr(settings, 'TEMPLATED_EMAIL_FILE_EXTENSION', 'email')\n\n def _render_email(self, template_name, context,\n template_dir=None, file_extension=None):\n response = {}\n errors = {}\n prefixed_template_name = ''.join((template_dir or self.template_prefix, template_name))\n render_context = Context(context, autoescape=False)\n file_extension = file_extension or self.template_suffix\n if file_extension.startswith('.'):\n file_extension = file_extension[1:]\n full_template_name = '%s.%s' % (prefixed_template_name, file_extension)\n\n try:\n multi_part = get_template(full_template_name)\n except TemplateDoesNotExist:\n multi_part = None\n\n if multi_part:\n for part in ['subject', 'html', 'plain']:\n try:\n response[part] = _get_node(multi_part, render_context, name=part)\n except BlockNotFound as error:\n errors[part] = error\n else:\n try:\n html_part = get_template('%s.html' % prefixed_template_name)\n except TemplateDoesNotExist:\n html_part = None\n\n try:\n plain_part = get_template('%s.txt' % prefixed_template_name)\n except TemplateDoesNotExist:\n if not html_part:\n raise TemplateDoesNotExist(full_template_name)\n else:\n plain_part = None\n\n if plain_part:\n response['plain'] = plain_part.render(render_context)\n\n if html_part:\n response['html'] = html_part.render(render_context)\n\n if response == {}:\n raise EmailRenderException(\"Couldn't render email parts. Errors: %s\"\n % errors)\n\n return response\n\n def get_email_message(self, template_name, context, from_email=None, to=None,\n cc=None, bcc=None, headers=None,\n template_prefix=None, template_suffix=None,\n template_dir=None, file_extension=None):\n\n parts = self._render_email(template_name, context,\n template_prefix or template_dir,\n template_suffix or file_extension)\n plain_part = 'plain' in parts\n html_part = 'html' in parts\n\n if 'subject' in parts:\n subject = parts['subject']\n else:\n subject_dict = getattr(settings, 'TEMPLATED_EMAIL_DJANGO_SUBJECTS', {})\n subject_template = subject_dict.get(template_name,\n _('%s email subject' % template_name))\n subject = subject_template % context\n\n if plain_part and not html_part:\n e = EmailMessage(\n subject,\n parts['plain'],\n from_email,\n to,\n cc=cc,\n bcc=bcc,\n headers=headers,\n )\n\n if html_part and not plain_part:\n e = EmailMessage(\n subject,\n parts['html'],\n from_email,\n to,\n cc=cc,\n bcc=bcc,\n headers=headers,\n )\n e.content_subtype = 'html'\n\n if plain_part and html_part:\n e = EmailMultiAlternatives(\n subject,\n parts['plain'],\n from_email,\n to,\n cc=cc,\n bcc=bcc,\n headers=headers,\n )\n e.attach_alternative(parts['html'], 'text/html')\n\n return e\n","sub_path":"utils/templated_emails/templater.py","file_name":"templater.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28965405","text":"# Author: Deeraj Nagothu\r\n# ENF estimation from video recordings using Rolling Shutter Mechanism\r\n\r\n# Import required packages\r\nimport numpy as np\r\nimport cv2\r\nimport pickle\r\nimport pyenf\r\n#from scipy import signal, io\r\nimport scipy.io.wavfile\r\nimport math\r\nfrom scipy.fftpack import fftshift\r\nimport matplotlib.pyplot as plt\r\nimport librosa\r\nfrom skimage.util import img_as_float\r\nfrom skimage.segmentation import slic\r\nfrom scipy.stats.stats import pearsonr\r\n\r\n# Constants\r\nopen_video_to_extract_Row_signal = False # set it to True to extract, else False to use the dump file\r\nvideo_folder = \"Recordings/2022/HomeRec/Deepfake1/\"\r\nvideo_rec_name = \"deepfake1.mp4\"\r\n#video_folder = \"/home/deeraj/Documents/Projects/pyENF_extraction_rolling_shutter/Recordings/Deepfake/set1/original/\"\r\n#video_rec_name = \"01__talking_against_wall.mp4\"\r\npower_rec_name = \"power_deepfake1.wav\"\r\nnumSegments = 500 # number of superpixel segments per frame\r\n# video_rec_name = \"resized_MVI_0288.avi\"\r\n# power_rec_name = \"80D_power_recording_3_20min.wav\"\r\nvideo_filepath = video_folder + video_rec_name\r\npower_filepath = video_folder + power_rec_name\r\ndo_ssm = 0 # decides if SSM should be applied or not to a code.\r\nmotion_detection_threshold = 40 # threshold decides after how many pixel changes, it should apply Superpixel mask\r\nwindow_size = 30\r\nshift_size = 5\r\n\r\ndef SSM(frame, frame_segments, new_frame_with_mask, motion_detection_threshold, ones_Superpixel_mask):\r\n motion_threshold = np.count_nonzero(new_frame_with_mask) # count how many pixels were effected\r\n # print(motion_threshold)\r\n if motion_threshold >= motion_detection_threshold: # no. of pixels effected more than threshold then apply mask\r\n new_frame_with_mask[new_frame_with_mask == 255] = 1 # 255 represents white pixels which are motion detections\r\n new_frame_with_mask[new_frame_with_mask == 127] = 1 # 127 represents gray pixels which are shadow of object\r\n superpixel_motion_mask = np.multiply(frame_segments,\r\n new_frame_with_mask) # multiplying to see which superpixels were effected\r\n effected_superpixels = np.unique(superpixel_motion_mask)\r\n for each_superpixel in effected_superpixels: # all the effected superpixels are set to zero\r\n frame_segments[frame_segments == each_superpixel] = 0\r\n ones_Superpixel_mask[frame_segments == 0] = 0\r\n if frame.shape[2] == 3: # its RGB frame, so the mask is applied to each layer individually\r\n frame[:, :, 0] = np.multiply(ones_Superpixel_mask, frame[:, :, 0])\r\n frame[:, :, 1] = np.multiply(ones_Superpixel_mask, frame[:, :, 1])\r\n frame[:, :, 2] = np.multiply(ones_Superpixel_mask, frame[:, :, 2])\r\n else: # for grayscale\r\n frame = np.multiply(ones_Superpixel_mask, frame)\r\n return ones_Superpixel_mask,frame\r\n\r\ndef correlation_vector(ENF_signal1, ENF_signal2, window_size, shift_size):\r\n correlation_ENF = []\r\n length_of_signal = min(len(ENF_signal1), len(ENF_signal2))\r\n total_windows = math.ceil(( length_of_signal - window_size + 1) / shift_size)\r\n rho = np.zeros((1,total_windows))\r\n for i in range(0,total_windows):\r\n enf_sig1 = ENF_signal1[i * shift_size: i * shift_size + window_size]\r\n enf_sig2 = ENF_signal2[i * shift_size: i * shift_size + window_size]\r\n enf_sig1 = np.reshape(enf_sig1, (len(enf_sig1),))\r\n enf_sig2 = np.reshape(enf_sig2,(len(enf_sig2), ))\r\n r,p = pearsonr(enf_sig1, enf_sig2)\r\n rho[0][i] = r\r\n return rho,total_windows\r\n\r\ndef extract_row_pixel_with_SSM(frame):\r\n # check for grayscale or RGB\r\n frame_shape = frame.shape\r\n if frame_shape[2] == 3: # its an RGB frame\r\n average_frame_across_rgb = np.mean(frame, axis=2)\r\n dup_average_frame_across_rgb = average_frame_across_rgb.astype(np.float32)\r\n dup_average_frame_across_rgb[dup_average_frame_across_rgb == 0] = np.nan\r\n dup_average_frame_across_column = np.nanmean(dup_average_frame_across_rgb, axis=1)\r\n dup_average_frame_across_column[np.isnan(dup_average_frame_across_column)] = 0\r\n average_frame_across_column = dup_average_frame_across_column.astype(np.int32)\r\n else:\r\n dup_frame = frame.astype(np.float32)\r\n dup_frame[dup_frame == 0] = np.nan\r\n dup_average_frame_across_column = np.nanmean(dup_frame, axis=1)\r\n average_frame_across_column = dup_average_frame_across_column.astype(np.int32)\r\n average_frame_across_column = np.reshape(average_frame_across_column, (frame_shape[0],))\r\n return average_frame_across_column\r\n\r\ndef second_half_extract_row_pixel(frame):\r\n # check for grayscale or RGB\r\n frame_shape = frame.shape\r\n if frame_shape[2] == 3: # its an RGB frame\r\n average_frame_across_rgb = np.mean(frame, axis=2)\r\n average_frame_across_column = np.mean(average_frame_across_rgb[:,:1000], axis=1)\r\n else:\r\n average_frame_across_column = np.mean(frame, axis=1)\r\n average_frame_across_column = np.reshape(average_frame_across_column, (frame_shape[0],))\r\n return average_frame_across_column\r\n\r\ndef extract_row_pixel(frame):\r\n # check for grayscale or RGB\r\n frame_shape = frame.shape\r\n if frame_shape[2] == 3: # its an RGB frame\r\n average_frame_across_rgb = np.mean(frame, axis=2)\r\n average_frame_across_column = np.mean(average_frame_across_rgb, axis=1)\r\n else:\r\n average_frame_across_column = np.mean(frame, axis=1)\r\n average_frame_across_column = np.reshape(average_frame_across_column, (frame_shape[0],))\r\n return average_frame_across_column\r\n\r\n\r\n# Input the video stream\r\nvideo = cv2.VideoCapture(video_filepath)\r\n\r\n# Validating the read of input video\r\nif not video.isOpened():\r\n print(\"Error Opening the video stream or file\")\r\n\r\n# Video specifics extraction\r\ntotal_number_of_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\r\nheight_of_frame = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # total number of rows\r\nwidth_of_frame = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) # total number of columns\r\nframe_rate = float(video.get(cv2.CAP_PROP_FPS))\r\nsize_of_row_signal = int(np.multiply(total_number_of_frames, height_of_frame))\r\n# print(size_of_row_signal)\r\n#print(width_of_frame)\r\n\r\n# row_signal = np.zeros((size_of_row_signal, 1), dtype=float)\r\nrow_signal = np.zeros((total_number_of_frames, height_of_frame, 1), dtype=float)\r\n# Collect the row signal from the buffered frames\r\n\r\n# Generating superpixel segment from first frame of the video\r\nret, frame = video.read()\r\nframe = img_as_float(frame)\r\nsegments = slic(frame, n_segments=numSegments, sigma=5, start_label=1) # Initializing superpixels segments using SLIC algorithm\r\nmotion_mask = cv2.createBackgroundSubtractorMOG2(history=50, varThreshold=150, detectShadows=True)\r\n# ones superpixel is created for final AND operation with superpixel mask. The superpixels effected is set to zero,\r\n# so same pixels are also set to zero in ones superpixel\r\nmaster_ones_Superpixel_mask = np.ones([height_of_frame,width_of_frame],dtype=int)\r\nif open_video_to_extract_Row_signal is True:\r\n frame_counter = 0\r\n while video.isOpened():\r\n ret, frame = video.read()\r\n if ret is True:\r\n frame_shape = frame.shape\r\n start_index = frame_counter * height_of_frame\r\n # row_signal[start_index:start_index + height_of_frame] = extract_row_pixel(frame)\r\n if do_ssm == 1:\r\n ones_Superpixel_mask = master_ones_Superpixel_mask.copy()\r\n frame_segments = segments.copy() # creating a copy of SLIC segments for each frame\r\n new_frame_with_mask = motion_mask.apply(frame) # applying the background subtractor to frame\r\n oSSM_mask,frame = SSM(frame, frame_segments, new_frame_with_mask, motion_detection_threshold, ones_Superpixel_mask)\r\n row_signal[frame_counter, :, 0] = second_half_extract_row_pixel(frame)\r\n else:\r\n row_signal[frame_counter, :, 0] = second_half_extract_row_pixel(frame)\r\n\r\n #oSSM_mask = oSSM_mask * 255\r\n #oSSM_mask = oSSM_mask.astype(np.uint8)\r\n cv2.imshow('Frame', frame)\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n frame_counter += 1\r\n # print(frame_counter)\r\n video.release()\r\n cv2.destroyAllWindows()\r\n # store the variables for faster future use\r\n variable_location = video_folder + \"row_signal.pkl\"\r\n store_variable_file = open(variable_location, 'wb')\r\n pickle.dump(row_signal, store_variable_file)\r\n store_variable_file.close()\r\n print(\"Extracted Row Signal and stored in dump.\\n\")\r\nelse:\r\n variable_location = video_folder + \"row_signal.pkl\"\r\n load_variable_file = open(variable_location, 'rb')\r\n row_signal = pickle.load(load_variable_file)\r\n load_variable_file.close()\r\n print(\"Loaded the Row Signal. \\n\")\r\n\r\ntime = np.arange(0.0, size_of_row_signal)\r\n\r\n# For a static video, clean the row signal with its video signal\r\n# that should leave only the ENF signal\r\n# Refer to { Exploiting Rolling Shutter For ENF Signal Extraction From Video }\r\n# row_signal = video_signal + enf_signal\r\n# average_of_each_row_element(row_signal) = average_of_each_row_element(video_signal) [since average of enf is 0]\r\n# enf_signal = row_signal - average_of_each_row_element(row_signal)\r\n\r\n# Estimate the ENF signal using the row signal collected\r\naverage_of_each_row_element = np.mean(row_signal, axis=0)\r\nenf_video_signal = row_signal - average_of_each_row_element\r\nflattened_enf_signal = enf_video_signal.flatten() # the matrix shape ENF data is flattened to one dim data\r\n\r\nfs = 500 # downsampling frequency\r\nnfft = 8192\r\nframe_size = 6 # change it to 6 for videos with large length recording\r\noverlap = 0\r\nfilename = \"mediator.wav\"\r\n# Writing the ENF data to the wav file for data type conversion\r\nscipy.io.wavfile.write(filename, rate=int(frame_rate * height_of_frame), data=flattened_enf_signal)\r\nfilename_dup = \"mediator_dup.wav\"\r\nsignal0, fs = librosa.load(filename, sr=fs) # loading the video ENF data\r\npower_signal_filename = power_filepath\r\npower_signal0, fs = librosa.load(power_signal_filename, sr=fs) # loading the power ENF data\r\n\r\n\r\n# ENF extraction from video recording\r\nvideo_signal_object = pyenf.pyENF(signal0=signal0, fs=fs, nominal=120, harmonic_multiples=1, duration=1,\r\n strip_index=0, frame_size_secs=frame_size, nfft=nfft, overlap_amount_secs=overlap, width_signal=0.02, width_band=0.5)\r\nspectro_strip, frequency_support = video_signal_object.compute_spectrogam_strips()\r\nweights = video_signal_object.compute_combining_weights_from_harmonics()\r\nOurStripCell, initial_frequency = video_signal_object.compute_combined_spectrum(spectro_strip, weights,\r\n frequency_support)\r\nENF = video_signal_object.compute_ENF_from_combined_strip(OurStripCell, initial_frequency)\r\n#print(ENF)\r\n# uncomment when comparing only 2 graphs.\r\n\"\"\"\r\nfig, (video, power) = plt.subplots(2, 1, sharex=True)\r\nvideo.plot(ENF[:-12],'b')\r\nvideo.set_title(\"ENF Signal without SSM\", fontsize=12)\r\nvideo.ticklabel_format(useOffset=False)\r\nvideo.set_ylabel(\"Frequency (Hz)\", fontsize=12)\r\n#video.hlines(y=0.7, xmin=0, xmax=len(t), colors='r', linestyles='--', lw=2)\r\n\"\"\"\r\nfig, (video,videom, power) = plt.subplots(3, 1, sharex=True)\r\nvideo.plot(ENF[:-12],'r')\r\nvideo.set_title(\"ENF Signal without SSM\", fontsize=12)\r\nvideo.ticklabel_format(useOffset=False)\r\nvideo.set_ylabel(\"Freq (Hz)\", fontsize=12)\r\n\r\n\"\"\"\r\n# use this to compare 3 enf's from with ssm, without ssm, and power enf\r\nvariable_location = video_folder + \"mask_enf.pkl\"\r\nload_variable_file = open(variable_location, 'rb')\r\nENF_mask = pickle.load(load_variable_file)\r\nload_variable_file.close()\r\n\r\nvideom.plot(ENF_mask[:-12],'b')\r\nvideom.set_title(\"ENF Signal with SSM\", fontsize=12)\r\nvideom.ticklabel_format(useOffset=False)\r\nvideom.set_ylabel(\"Freq (Hz)\", fontsize=12)\r\n\"\"\"\r\n\r\n# ENF extraction from power recording\r\npower_signal_object = pyenf.pyENF(signal0=power_signal0, fs=fs, nominal=60, harmonic_multiples=1, duration=0.1,\r\n strip_index=0, frame_size_secs=frame_size, nfft=nfft, overlap_amount_secs=overlap)\r\npower_spectro_strip, power_frequency_support = power_signal_object.compute_spectrogam_strips()\r\npower_weights = power_signal_object.compute_combining_weights_from_harmonics()\r\npower_OurStripCell, power_initial_frequency = power_signal_object.compute_combined_spectrum(power_spectro_strip,\r\n power_weights,\r\n power_frequency_support)\r\npower_ENF = power_signal_object.compute_ENF_from_combined_strip(power_OurStripCell, power_initial_frequency)\r\n\r\npower.plot(power_ENF[:-8],'g')\r\npower.set_title(\"Power ENF Signal\", fontsize=12)\r\npower.set_ylabel(\"Freq (Hz)\", fontsize=12)\r\npower.set_xlabel(\"Time\", fontsize=12)\r\n# plt.show()\r\npower.ticklabel_format(useOffset=False)\r\nprint(\"Correlating the signal\")\r\n#enf_corr = signal.correlate(ENF, power_ENF, mode='same')\r\n#corr.plot(enf_corr)\r\n#corr.axhline(0.5, ls=':')\r\nfig.tight_layout()\r\nplt.show()\r\n\r\nrho,total_windows = correlation_vector(ENF[:-7], power_ENF[:-7],window_size,shift_size)\r\n\r\n\"\"\"\r\n# temp load of rho\r\nvariable_location = video_folder + \"rho_without_SSM.pkl\"\r\nload_variable_file = open(variable_location, 'rb')\r\nrho_without_SSM = pickle.load(load_variable_file)\r\nload_variable_file.close()\r\n\r\n#\r\n\r\n\r\n\"\"\"\r\nt = np.arange(0,total_windows-1,1)\r\nplt.plot(t,rho[0][1:],'g--', label=\"Plain Wall\")\r\n#plt.plot(t,rho_without_SSM[0][1:],'b', label=\"Without SSM\")\r\nplt.hlines(y=0.8, xmin=0, xmax=len(t), colors='r', linestyles='--', lw=2)\r\nplt.ylabel('Correlation Coefficient', fontsize=12)\r\nplt.xlabel('Number of Windows compared', fontsize=12)\r\nplt.title('ENF fluctuations compared', fontsize=12)\r\n#plt.set_legend('With SSM','Without SSM')\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n","sub_path":"pyENF_roll_shutter.py","file_name":"pyENF_roll_shutter.py","file_ext":"py","file_size_in_byte":14192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566042738","text":"from itertools import groupby\n\n\ndef get_sorted_best_submissions(individual_submissions,\n team_submissions,\n ordering_score_key,\n score_key,\n reverse_order,\n use_selected_submission=False):\n ranking_submissions = []\n\n if use_selected_submission:\n selected_individual_submissions = individual_submissions.filter(selected=True)\n selected_team_submissions = team_submissions.filter(selected=True)\n\n participant_grouped_submissions = groupby(\n individual_submissions,\n lambda submission: submission.participant.id\n )\n for part_id, part_submissions in participant_grouped_submissions:\n ranking_submission = None\n\n if use_selected_submission:\n selected_submissions = selected_individual_submissions.filter(participant__id=part_id)\n if selected_submissions.count() == 1:\n ranking_submission = selected_submissions[0]\n\n if ranking_submission is None:\n if reverse_order:\n ranking_submission = max(part_submissions, key=ordering_score_key)\n else:\n ranking_submission = min(part_submissions, key=ordering_score_key)\n\n\n ranking_submissions.append({\n 'name': str(ranking_submission.participant),\n 'score': score_key(ranking_submission),\n 'date': ranking_submission.date\n })\n\n\n team_grouped_submissions = groupby(\n team_submissions,\n lambda submission: submission.team.id\n )\n\n for team_id, team_submissions in team_grouped_submissions:\n ranking_submission = None\n\n if use_selected_submission:\n selected_submissions = selected_team_submissions.filter(team__id=team_id)\n if selected_submissions.count() == 1:\n ranking_submission = selected_submissions[0]\n\n if ranking_submission is None:\n if reverse_order:\n ranking_submission = max(team_submissions, key=ordering_score_key)\n else:\n ranking_submission = min(team_submissions, key=ordering_score_key)\n\n ranking_submissions.append({\n 'name': str(ranking_submission.team),\n 'score': score_key(ranking_submission),\n 'date': ranking_submission.date\n })\n\n return sorted(ranking_submissions, key=lambda s: s['score'], reverse=reverse_order)\n","sub_path":"participants/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554011598","text":"import heapq\nfrom itertools import count\n\n\nclass ReplayMemory:\n \"\"\"\n PER: Thanks to the python module heapq we do not need to implement our own priority heap\n INFO: heapq works on standard python arrays\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n We initialize a replay memory to sample experiences from it and append past experiences\n :param size: int, maximum length of experiences to remember\n \"\"\"\n self.maxlen = size\n self.memory = []\n # for equal priorities we must define a tiebreaker\n self.tiebreaker = count()\n\n def append(self, experience, TDerror):\n \"\"\"\n Append a experience tuple to our memory object\n :param TDerror: the temporal difference error defines priority\n :param experience: Tuple(state, action, reward, next_state, done)\n :return: void\n \"\"\"\n heapq.heappush(self.memory, (abs(TDerror), next(self.tiebreaker), experience))\n if len(self.memory) > self.maxlen:\n self.memory = self.memory[:-1]\n heapq.heapify(self.memory)\n\n def get_batch(self, batch_size: int):\n \"\"\"\n Return a batch of experiences of our expected batch size if possible\n :param batch_size: int > 0\n :return: List[Tuple(state, action, reward, next_state, done)]\n \"\"\"\n batch = heapq.nlargest(batch_size, self.memory)\n batch = [experience for _, _, experience in batch] # return the S,A,R,S_,D and ignore the others\n self.memory = self.memory[batch_size:]\n return batch\n","sub_path":"ddqn_per/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570148790","text":"import pandas as pd\nimport json\nfrom anndata import AnnData\nfrom ._datastructures import AirrCell\nfrom typing import (\n Iterable,\n Mapping,\n MutableMapping,\n Sequence,\n Union,\n Collection,\n Optional,\n)\nimport numpy as np\nfrom glob import iglob\nimport pickle\nimport os.path\nfrom . import _tracerlib\nimport sys\nfrom pathlib import Path\nimport airr\nfrom ..util import _doc_params, _is_true, _is_true2, _translate_dna_to_protein, _is_na2\nfrom ._convert_anndata import from_airr_cells, to_airr_cells, _sanitize_anndata\nfrom ._util import doc_working_model, _IOLogger, _check_upgrade_schema\nfrom .._compat import Literal\nfrom airr import RearrangementSchema\nimport itertools\nimport re\nfrom .. import __version__\n\n\n# patch sys.modules to enable pickle import.\n# see https://stackoverflow.com/questions/2121874/python-pckling-after-changing-a-modules-directory\nsys.modules[\"tracerlib\"] = _tracerlib\n\nDEFAULT_AIRR_FIELDS = (\n \"productive\",\n \"locus\",\n \"v_call\",\n \"d_call\",\n \"j_call\",\n \"c_call\",\n \"junction\",\n \"junction_aa\",\n \"consensus_count\",\n \"duplicate_count\",\n)\nDEFAULT_10X_FIELDS = DEFAULT_AIRR_FIELDS + (\"is_cell\", \"high_confidence\")\nDEFAULT_AIRR_CELL_ATTRIBUTES = (\"is_cell\", \"high_confidence\", \"multi_chain\")\n\n\ndef _cdr3_from_junction(junction_aa, junction_nt):\n \"\"\"CDR3 euqals junction without the conserved residues C and W/F, respectively.\n Should the conserved residues not equal to C and W/F, then the chain\n is non-productive and we set CDR3 to None.\n\n See also https://github.com/icbi-lab/scirpy/pull/290.\n \"\"\"\n cdr3_aa, cdr3_nt = None, None\n if (\n junction_aa is not None\n and junction_aa[0] == \"C\"\n and junction_aa[-1] in (\"W\", \"F\")\n ):\n cdr3_aa = junction_aa[1:-1]\n if (\n junction_nt is not None\n and _translate_dna_to_protein(junction_nt[:3]) == \"C\"\n and _translate_dna_to_protein(junction_nt[-3:]) in (\"W\", \"F\")\n ):\n cdr3_nt = junction_nt[3:-3]\n return cdr3_aa, cdr3_nt\n\n\ndef _read_10x_vdj_json(\n path: Union[str, Path],\n filtered: bool = True,\n include_fields: Optional[Collection[str]] = None,\n) -> AnnData:\n \"\"\"Read IR data from a 10x genomics `all_contig_annotations.json` file\"\"\"\n logger = _IOLogger()\n with open(path, \"r\") as f:\n cells = json.load(f)\n\n airr_cells = {}\n for cell in cells:\n if filtered and not (cell[\"is_cell\"] and cell[\"high_confidence\"]):\n continue\n barcode = cell[\"barcode\"]\n if barcode not in airr_cells:\n ir_obj = AirrCell(\n barcode,\n logger=logger,\n cell_attribute_fields=[\"is_cell\", \"high_confidence\"],\n )\n airr_cells[barcode] = ir_obj\n else:\n ir_obj = airr_cells[barcode]\n\n # AIRR-compliant chain dict\n chain = AirrCell.empty_chain_dict()\n\n genes = dict()\n mapping = {\n \"L-REGION+V-REGION\": \"v\",\n \"D-REGION\": \"d\",\n \"J-REGION\": \"j\",\n \"C-REGION\": \"c\",\n }\n for annot in cell[\"annotations\"]:\n feat = annot[\"feature\"]\n if feat[\"region_type\"] in mapping:\n region = mapping[feat[\"region_type\"]]\n assert region not in genes, region\n genes[region] = dict()\n genes[region][\"chain\"] = feat[\"chain\"]\n genes[region][\"gene\"] = feat[\"gene_name\"]\n genes[region][\"start\"] = annot[\"contig_match_start\"]\n genes[region][\"end\"] = annot[\"contig_match_end\"]\n\n chain[\"v_call\"] = genes[\"v\"][\"gene\"] if \"v\" in genes else None\n chain[\"d_call\"] = genes[\"d\"][\"gene\"] if \"d\" in genes else None\n chain[\"j_call\"] = genes[\"j\"][\"gene\"] if \"j\" in genes else None\n chain[\"c_call\"] = genes[\"c\"][\"gene\"] if \"c\" in genes else None\n\n # check if chain type is consistent\n chain_types = [g[\"chain\"] for g in genes.values()]\n chain_type = chain_types[0] if np.unique(chain_types).size == 1 else None\n\n # compute inserted nucleotides\n # VJ junction for TRA, TRG, IGK, IGL chains\n # VD + DJ junction for TRB, TRD, IGH chains\n #\n # Notes on indexing:\n # some tryouts have shown, that the indexes in the json file\n # seem to be python-type indexes (i.e. the 'end' index is exclusive).\n # Therefore, no `-1` needs to be subtracted when computing the number\n # of inserted nucleotides.\n chain[\"np1_length\"] = None\n chain[\"np2_length\"] = None\n if (\n chain_type in AirrCell.VJ_LOCI\n and chain[\"v_call\"] is not None\n and chain[\"j_call\"] is not None\n ):\n assert (\n chain[\"d_call\"] is None\n ), \"TRA, TRG or IG-light chains should not have a D region\"\n chain[\"np1_length\"] = genes[\"j\"][\"start\"] - genes[\"v\"][\"end\"]\n elif (\n chain_type in AirrCell.VDJ_LOCI\n and chain[\"v_call\"] is not None\n and chain[\"j_call\"] is not None\n and chain[\"d_call\"] is not None\n ):\n chain[\"np1_length\"] = genes[\"d\"][\"start\"] - genes[\"v\"][\"end\"]\n chain[\"np2_length\"] = genes[\"j\"][\"start\"] - genes[\"d\"][\"end\"]\n\n chain[\"locus\"] = chain_type\n chain[\"junction\"] = cell[\"cdr3_seq\"]\n chain[\"junction_aa\"] = cell[\"cdr3\"]\n chain[\"duplicate_count\"] = cell[\"umi_count\"]\n chain[\"consensus_count\"] = cell[\"read_count\"]\n chain[\"productive\"] = cell[\"productive\"]\n chain[\"is_cell\"] = cell[\"is_cell\"]\n chain[\"high_confidence\"] = cell[\"high_confidence\"]\n\n # additional cols from CR6 outputs: fwr{1,2,3,4}{,_nt} and cdr{1,2}{,_nt}\n fwrs = [f\"fwr{i}\" for i in range(1, 5)]\n cdrs = [f\"cdr{i}\" for i in range(1, 3)]\n\n for col in fwrs + cdrs:\n if col in cell.keys():\n chain[col] = cell[col].get(\"nt_seq\") if cell[col] else None\n chain[col + \"_aa\"] = cell[col].get(\"aa_seq\") if cell[col] else None\n\n chain[\"cdr3_aa\"], chain[\"cdr3\"] = _cdr3_from_junction(\n chain[\"junction_aa\"], chain[\"junction\"]\n )\n\n ir_obj.add_chain(chain)\n\n return from_airr_cells(airr_cells.values(), include_fields=include_fields)\n\n\ndef _read_10x_vdj_csv(\n path: Union[str, Path],\n filtered: bool = True,\n include_fields: Optional[Collection[str]] = None,\n) -> AnnData:\n \"\"\"Read IR data from a 10x genomics `_contig_annotations.csv` file\"\"\"\n logger = _IOLogger()\n df = pd.read_csv(path)\n\n airr_cells = {}\n if filtered:\n df = df.loc[_is_true(df[\"is_cell\"]) & _is_true(df[\"high_confidence\"]), :]\n for barcode, cell_df in df.groupby(\"barcode\"):\n ir_obj = AirrCell(\n barcode, logger=logger, cell_attribute_fields=(\"is_cell\", \"high_confidence\")\n )\n for _, chain_series in cell_df.iterrows():\n chain_dict = AirrCell.empty_chain_dict()\n chain_dict.update(\n locus=chain_series[\"chain\"],\n junction_aa=chain_series[\"cdr3\"],\n junction=chain_series[\"cdr3_nt\"],\n duplicate_count=chain_series[\"umis\"],\n consensus_count=chain_series[\"reads\"],\n productive=_is_true2(chain_series[\"productive\"]),\n v_call=chain_series[\"v_gene\"],\n d_call=chain_series[\"d_gene\"],\n j_call=chain_series[\"j_gene\"],\n c_call=chain_series[\"c_gene\"],\n is_cell=chain_series[\"is_cell\"],\n high_confidence=chain_series[\"high_confidence\"],\n )\n\n # additional cols from CR6 outputs: fwr{1,2,3,4}{,_nt} and cdr{1,2}{,_nt}\n fwrs = [f\"fwr{i}\" for i in range(1, 5)]\n cdrs = [f\"cdr{i}\" for i in range(1, 3)]\n\n for col in fwrs + cdrs:\n if col in chain_series.index:\n chain_dict[col + \"_aa\"] = chain_series.get(col)\n if col + \"_nt\" in chain_series.index:\n chain_dict[col] = chain_series.get(col + \"_nt\")\n\n chain_dict[\"cdr3_aa\"], chain_dict[\"cdr3\"] = _cdr3_from_junction(\n chain_dict[\"junction_aa\"], chain_dict[\"junction\"]\n )\n\n ir_obj.add_chain(chain_dict)\n\n airr_cells[barcode] = ir_obj\n\n return from_airr_cells(airr_cells.values(), include_fields=include_fields)\n\n\n@_doc_params(doc_working_model=doc_working_model, include_fields=DEFAULT_10X_FIELDS)\ndef read_10x_vdj(\n path: Union[str, Path],\n filtered: bool = True,\n include_fields: Optional[Collection[str]] = DEFAULT_10X_FIELDS,\n) -> AnnData:\n \"\"\"\\\n Read :term:`IR` data from 10x Genomics cell-ranger output.\n\n Supports `all_contig_annotations.json` and\n `{{all,filtered}}_contig_annotations.csv`.\n\n If the `json` file is available, it is preferable as it\n contains additional information about V(D)J-junction insertions. Other than\n that there should be no difference.\n\n {doc_working_model}\n\n Parameters\n ----------\n path\n Path to `filterd_contig_annotations.csv`, `all_contig_annotations.csv` or\n `all_contig_annotations.json`.\n filtered\n Only keep filtered contig annotations (i.e. `is_cell` and `high_confidence`).\n If using `filtered_contig_annotations.csv` already, this option\n is futile.\n include_fields\n The fields to include in `adata`. The AIRR rearrangment schema contains\n can contain a lot of columns, most of which irrelevant for most analyses.\n Per default, this includes a subset of columns relevant for a typical\n scirpy analysis, to keep `adata.obs` a bit cleaner. Defaults to {include_fields}.\n Set this to `None` to include all columns.\n \n\n Returns\n -------\n AnnData object with IR data in `obs` for each cell. For more details see\n :ref:`data-structure`.\n \"\"\"\n path = Path(path)\n if path.suffix == \".json\":\n return _read_10x_vdj_json(path, filtered, include_fields)\n else:\n return _read_10x_vdj_csv(path, filtered, include_fields)\n\n\n@_doc_params(doc_working_model=doc_working_model)\ndef read_tracer(path: Union[str, Path]) -> AnnData:\n \"\"\"\\\n Read data from `TraCeR `_ (:cite:`Stubbington2016-kh`).\n\n Requires the TraCeR output directory which contains a folder for each cell.\n Unfortunately the results files generated by `tracer summarize` do not\n contain all required information.\n\n The function will read TCR information from the `filtered_TCR_seqs/.pkl`\n files.\n\n {doc_working_model}\n\n Parameters\n ----------\n path\n Path to the TraCeR output folder.\n\n Returns\n -------\n AnnData object with TCR data in `obs` for each cell. For more details see\n :ref:`data-structure`.\n \"\"\"\n logger = _IOLogger()\n airr_cells = {}\n path = str(path)\n\n def _process_chains(chains, chain_type):\n for tmp_chain in chains:\n if tmp_chain.cdr3 == \"N/A\" or tmp_chain.cdr3nt == \"N/A\":\n # ignore chains that have no sequence\n continue\n\n # AIRR-rearrangement compliant chain dictionary\n chain_dict = AirrCell.empty_chain_dict()\n if tmp_chain.has_D_segment:\n assert chain_type in AirrCell.VDJ_LOCI\n assert len(tmp_chain.junction_details) == 5\n assert len(tmp_chain.summary) == 8\n chain_dict[\"v_call\"] = tmp_chain.summary[0].split(\"*\")[0]\n chain_dict[\"d_call\"] = tmp_chain.summary[1].split(\"*\")[0]\n chain_dict[\"j_call\"] = tmp_chain.summary[2].split(\"*\")[0]\n else:\n assert chain_type in AirrCell.VJ_LOCI\n assert len(tmp_chain.junction_details) == 3\n assert len(tmp_chain.summary) == 7\n chain_dict[\"v_call\"] = tmp_chain.summary[0].split(\"*\")[0]\n chain_dict[\"d_call\"] = None\n chain_dict[\"j_call\"] = tmp_chain.summary[1].split(\"*\")[0]\n\n for call_key in [\"v_call\", \"d_call\", \"j_call\"]:\n if chain_dict[call_key] == \"N/A\":\n chain_dict[call_key] = None\n\n if chain_dict[call_key] is not None:\n assert chain_dict[call_key][3] == call_key[0].upper()\n\n chain_dict[\"np1_length\"] = (\n len(tmp_chain.junction_details[1])\n if tmp_chain.junction_details[1] != \"N/A\"\n else None\n )\n try:\n # only in VDJ\n chain_dict[\"np2_length\"] = (\n len(tmp_chain.junction_details[3])\n if tmp_chain.junction_details[3] != \"N/A\"\n else None\n )\n except IndexError:\n chain_dict[\"np2_length\"] = None\n\n chain_dict[\"locus\"] = chain_type\n chain_dict[\"consensus_count\"] = tmp_chain.TPM\n chain_dict[\"productive\"] = tmp_chain.productive\n chain_dict[\"junction\"] = tmp_chain.cdr3nt\n chain_dict[\"junction_aa\"] = tmp_chain.cdr3\n\n yield chain_dict\n\n for summary_file in iglob(\n os.path.join(path, \"**/filtered_TCR_seqs/*.pkl\"), recursive=True\n ):\n cell_name = summary_file.split(os.sep)[-3]\n airr_cell = AirrCell(cell_name, logger=logger)\n try:\n with open(summary_file, \"rb\") as f:\n tracer_obj = pickle.load(f)\n chains = tracer_obj.recombinants[\"TCR\"]\n for chain_id in \"ABGD\":\n if chain_id in chains and chains[chain_id] is not None:\n for tmp_chain in _process_chains(\n chains[chain_id], f\"TR{chain_id}\"\n ):\n airr_cell.add_chain(tmp_chain)\n except ImportError as e:\n # except Exception as e:\n raise Exception(\n \"Error loading TCR data from cell {}\".format(summary_file)\n ) from e\n\n airr_cells[cell_name] = airr_cell\n\n if not len(airr_cells):\n raise IOError(\n \"Could not find any TraCeR *.pkl files. Make sure you are \"\n \"using a TraCeR output folder that looks like \"\n \"/filtered_TCR_seqs/*.pkl\"\n )\n\n return from_airr_cells(airr_cells.values())\n\n\n@_doc_params(\n doc_working_model=doc_working_model,\n cell_attributes=f\"\"\"`({\",\".join([f'\"{x}\"' for x in DEFAULT_AIRR_CELL_ATTRIBUTES])})`\"\"\",\n include_fields=f\"\"\"`({\",\".join([f'\"{x}\"' for x in DEFAULT_AIRR_FIELDS])})`\"\"\",\n)\ndef read_airr(\n path: Union[str, Sequence[str], Path, Sequence[Path]],\n use_umi_count_col: Union[bool, Literal[\"auto\"]] = \"auto\",\n infer_locus: bool = True,\n cell_attributes: Collection[str] = DEFAULT_AIRR_CELL_ATTRIBUTES,\n include_fields: Optional[Collection[str]] = DEFAULT_AIRR_FIELDS,\n) -> AnnData:\n \"\"\"\\\n Read data from `AIRR rearrangement `_ format.\n\n The following columns are required by scirpy:\n * `cell_id`\n * `productive`\n * `locus`\n * at least one of `consensus_count`, `duplicate_count`, or `umi_count`\n * at least one of `junction_aa` or `junction`.\n\n Data should still import if one of these fields is missing, but they are required\n by most of scirpy's processing functions. All chains for which the field\n `junction_aa` is missing or empty, will be considered as non-productive and\n will be moved to the `extra_chains` column.\n\n {doc_working_model}\n\n Parameters\n ----------\n path\n Path to the AIRR rearrangement tsv file. If different\n chains are split up into multiple files, these can be specified\n as a List, e.g. `[\"path/to/tcr_alpha.tsv\", \"path/to/tcr_beta.tsv\"]`.\n use_umi_count_col\n Whether to add UMI counts from the non-strandard (but common) `umi_count`\n column. When this column is used, the UMI counts are moved over to the\n standard `duplicate_count` column. Default: Use `umi_count` if there is\n no `duplicate_count` column present.\n infer_locus\n Try to infer the `locus` column from gene names, in case it is not specified.\n cell_attributes\n Fields in the rearrangement schema that are specific for a cell rather\n than a chain. The values must be identical over all records belonging to a\n cell. This defaults to {cell_attributes}.\n include_fields\n The fields to include in `adata`. The AIRR rearrangment schema contains\n can contain a lot of columns, most of which irrelevant for most analyses.\n Per default, this includes a subset of columns relevant for a typical\n scirpy analysis, to keep `adata.obs` a bit cleaner. Defaults to {include_fields}.\n Set this to `None` to include all columns.\n\n Returns\n -------\n AnnData object with IR data in `obs` for each cell. For more details see\n :ref:`data-structure`.\n \"\"\"\n airr_cells = {}\n logger = _IOLogger()\n\n if isinstance(path, (str, Path, pd.DataFrame)):\n path: list = [path]\n\n def _decide_use_umi_count_col(chain_dict):\n \"\"\"Logic to decide whether or not to use counts form the `umi_counts` column.\"\"\"\n if (\n \"umi_count\" in chain_dict\n and use_umi_count_col == \"auto\"\n and \"duplicate_count\" not in chain_dict\n ):\n logger.warning(\n \"Renaming the non-standard `umi_count` column to `duplicate_count`. \"\n ) # type: ignore\n return True\n elif use_umi_count_col is True:\n return True\n else:\n return False\n\n for tmp_path in path:\n if isinstance(tmp_path, pd.DataFrame):\n iterator = tmp_path.to_dict(orient=\"records\")\n else:\n iterator = airr.read_rearrangement(str(tmp_path))\n\n for chain_dict in iterator:\n cell_id = chain_dict.pop(\"cell_id\")\n\n try:\n tmp_cell = airr_cells[cell_id]\n except KeyError:\n tmp_cell = AirrCell(\n cell_id=cell_id,\n logger=logger,\n cell_attribute_fields=cell_attributes,\n )\n airr_cells[cell_id] = tmp_cell\n\n if _decide_use_umi_count_col(chain_dict):\n chain_dict[\"duplicate_count\"] = RearrangementSchema.to_int(\n chain_dict.pop(\"umi_count\")\n )\n\n if infer_locus and \"locus\" not in chain_dict:\n logger.warning(\n \"`locus` column not found in input data. The locus is being inferred from the {v,d,j,c}_call columns.\"\n )\n chain_dict[\"locus\"] = _infer_locus_from_gene_names(chain_dict)\n\n tmp_cell.add_chain(chain_dict)\n\n return from_airr_cells(airr_cells.values(), include_fields=include_fields)\n\n\ndef _infer_locus_from_gene_names(chain_dict):\n \"\"\"Infer the IGMT locus name from VDJ calls\"\"\"\n keys = [\"v_call\", \"d_call\", \"j_call\", \"c_call\"]\n\n # TRAV.*/DV is misleading as it actually points to a delta locus\n # See #285\n if re.search(\"TRAV.*/DV\", chain_dict[\"v_call\"]):\n keys.remove(\"v_call\")\n\n genes = []\n for k in keys:\n gene = chain_dict[k]\n if not _is_na2(gene):\n genes.append(gene.lower())\n\n if not len(genes):\n locus = None\n elif all(\"tra\" in x for x in genes):\n locus = \"TRA\"\n elif all(\"trb\" in x for x in genes):\n locus = \"TRB\"\n elif all(\"trd\" in x for x in genes):\n locus = \"TRD\"\n elif all(\"trg\" in x for x in genes):\n locus = \"TRG\"\n elif all(\"igh\" in x for x in genes):\n locus = \"IGH\"\n elif all(\"igk\" in x for x in genes):\n locus = \"IGK\"\n elif all(\"igl\" in x for x in genes):\n locus = \"IGL\"\n else:\n locus = None\n\n return locus\n\n\n@_doc_params(doc_working_model=doc_working_model)\ndef read_bracer(path: Union[str, Path]) -> AnnData:\n \"\"\"\\\n Read data from `BraCeR `_ (:cite:`Lindeman2018`).\n\n Requires the `changeodb.tab` file as input which is created by the\n `bracer summarise` step.\n\n {doc_working_model}\n\n Parameters\n ----------\n path\n Path to the `changeodb.tab` file.\n\n Returns\n -------\n AnnData object with BCR data in `obs` for each cell. For more details see\n :ref:`data-structure`.\n \"\"\"\n logger = _IOLogger()\n changeodb = pd.read_csv(path, sep=\"\\t\", na_values=[\"None\"])\n\n bcr_cells = dict()\n for _, row in changeodb.iterrows():\n cell_id = row[\"CELL\"]\n try:\n tmp_ir_cell = bcr_cells[cell_id]\n except KeyError:\n tmp_ir_cell = AirrCell(cell_id, logger=logger)\n bcr_cells[cell_id] = tmp_ir_cell\n\n chain_dict = AirrCell.empty_chain_dict()\n\n chain_dict[\"v_call\"] = row[\"V_CALL\"] if not pd.isnull(row[\"V_CALL\"]) else None\n chain_dict[\"d_call\"] = row[\"D_CALL\"] if not pd.isnull(row[\"D_CALL\"]) else None\n chain_dict[\"j_call\"] = row[\"J_CALL\"] if not pd.isnull(row[\"J_CALL\"]) else None\n chain_dict[\"c_call\"] = (\n row[\"C_CALL\"].split(\"*\")[0] if not pd.isnull(row[\"C_CALL\"]) else None\n )\n chain_dict[\"locus\"] = \"IG\" + row[\"LOCUS\"]\n\n chain_dict[\"np1_length\"] = None\n chain_dict[\"np2_length\"] = None\n if (\n chain_dict[\"locus\"] in AirrCell.VJ_LOCI\n and not pd.isnull(row[\"V_SEQ_START\"])\n and not pd.isnull(row[\"J_SEQ_START\"])\n ):\n assert pd.isnull(\n row[\"D_SEQ_START\"]\n ), \"TRA, TRG or IG-light chains should not have a D region\" + str(row)\n chain_dict[\"np1_length\"] = row[\"J_SEQ_START\"] - (\n row[\"V_SEQ_START\"] + row[\"V_SEQ_LENGTH\"]\n ) # type: ignore\n elif (\n chain_dict[\"locus\"] in AirrCell.VDJ_LOCI\n and not pd.isnull(row[\"V_SEQ_START\"])\n and not pd.isnull(row[\"D_SEQ_START\"])\n and not pd.isnull(row[\"J_SEQ_START\"])\n ):\n chain_dict[\"np1_length\"] = row[\"D_SEQ_START\"] - (\n row[\"V_SEQ_START\"] + row[\"V_SEQ_LENGTH\"]\n ) # type: ignore\n chain_dict[\"np2_length\"] = row[\"J_SEQ_START\"] - (\n row[\"D_SEQ_START\"] + row[\"D_SEQ_LENGTH\"]\n ) # type: ignore\n\n chain_dict[\"junction\"] = (\n row[\"JUNCTION\"] if not pd.isnull(row[\"JUNCTION\"]) else None\n )\n chain_dict[\"junction_aa\"] = (\n _translate_dna_to_protein(chain_dict[\"junction\"])\n if chain_dict[\"junction\"] is not None\n else None\n )\n chain_dict[\"consensus_count\"] = row[\"TPM\"]\n chain_dict[\"productive\"] = row[\"FUNCTIONAL\"]\n\n tmp_ir_cell.add_chain(chain_dict)\n\n return from_airr_cells(bcr_cells.values())\n\n\n@_check_upgrade_schema()\ndef write_airr(adata: AnnData, filename: Union[str, Path]) -> None:\n \"\"\"Export :term:`IR` data to :term:`AIRR` Rearrangement `tsv` format.\n\n Parameters\n ----------\n adata\n annotated data matrix\n filename\n destination filename\n \"\"\"\n airr_cells = to_airr_cells(adata)\n try:\n fields = airr_cells[0].fields\n for tmp_cell in airr_cells[1:]:\n assert tmp_cell.fields == fields, \"All rows of adata have the same fields.\"\n except IndexError:\n # case of an empty output file\n fields = None\n\n writer = airr.create_rearrangement(filename, fields=fields)\n for tmp_cell in airr_cells:\n for chain in tmp_cell.to_airr_records():\n # workaround for AIRR library writing out int field as floats (if it happens to be a float)\n for f in chain:\n if RearrangementSchema.type(f) == \"integer\":\n chain[f] = int(chain[f])\n writer.write(chain)\n writer.close()\n\n\ndef upgrade_schema(adata) -> None:\n \"\"\"Update older versions of a scirpy anndata object to the latest schema.\n\n Modifies adata inplace.\n\n Parameters\n ----------\n adata\n annotated data matrix\n \"\"\"\n # the scirpy_version flag was introduced in 0.7, therefore, for now,\n # there's no need to parse the version information but just check its presence.\n if \"scirpy_version\" in adata.uns:\n raise ValueError(\n \"Your AnnData object seems already up-to-date with scirpy v0.7\"\n )\n # junction_ins is not exactly np1, therefore we just leave it as is\n rename_dict = {\n f\"IR_{arm}_{i}_{key_old}\": f\"IR_{arm}_{i}_{key_new}\"\n for arm, i, (key_old, key_new) in itertools.product(\n [\"VJ\", \"VDJ\"],\n [\"1\", \"2\"],\n {\n \"cdr3\": \"junction_aa\",\n \"expr\": \"duplicate_count\",\n \"expr_raw\": \"consensus_count\",\n \"v_gene\": \"v_call\",\n \"d_gene\": \"d_call\",\n \"j_gene\": \"j_call\",\n \"c_gene\": \"c_call\",\n \"cdr3_nt\": \"junction\",\n }.items(),\n )\n }\n rename_dict[\"clonotype\"] = \"clone_id\"\n adata.obs.rename(columns=rename_dict, inplace=True)\n adata.obs[\"extra_chains\"] = None\n adata.uns[\"scirpy_version\"] = __version__\n _sanitize_anndata(adata)\n\n\n@_check_upgrade_schema()\ndef to_dandelion(adata: AnnData):\n \"\"\"Export data to `Dandelion `_ (:cite:`Stephenson2021`).\n\n Parameters\n ----------\n adata\n annotated data matrix with :term:`IR` annotations.\n\n Returns\n -------\n `Dandelion` object.\n \"\"\"\n try:\n import dandelion as ddl\n except:\n raise ImportError(\"Please install dandelion: pip install sc-dandelion.\")\n airr_cells = to_airr_cells(adata)\n\n contig_dicts = {}\n for tmp_cell in airr_cells:\n for i, chain in enumerate(tmp_cell.to_airr_records(), start=1):\n # dandelion-specific modifications\n chain.update(\n {\n \"sequence_id\": f\"{tmp_cell.cell_id}_contig_{i}\",\n }\n )\n contig_dicts[chain[\"sequence_id\"]] = chain\n\n data = pd.DataFrame.from_dict(contig_dicts, orient=\"index\")\n return ddl.Dandelion(ddl.load_data(data))\n\n\n@_doc_params(doc_working_model=doc_working_model)\ndef from_dandelion(dandelion, transfer: bool = False, **kwargs) -> AnnData:\n \"\"\"\\\n Import data from `Dandelion `_ (:cite:`Stephenson2021`).\n\n Internally calls :func:`scirpy.io.read_airr`.\n\n {doc_working_model}\n\n Parameters\n ----------\n dandelion\n a `dandelion.Dandelion` instance\n transfer\n Whether to execute `dandelion.tl.transfer` to transfer all data\n to the :class:`anndata.AnnData` instance.\n **kwargs\n Additional arguments passed to :func:`scirpy.io.read_airr`.\n\n Returns\n -------\n A :class:`~anndata.AnnData` instance with AIRR information stored in `obs`.\n \"\"\"\n try:\n import dandelion as ddl\n except ImportError:\n raise ImportError(\"Please install dandelion: pip install sc-dandelion.\")\n\n dandelion_df = dandelion.data.copy()\n # replace \"unassigned\" with None\n for col in dandelion_df.columns:\n dandelion_df.loc[dandelion_df[col] == \"unassigned\", col] = None\n\n adata = read_airr(dandelion_df, **kwargs)\n\n if transfer:\n ddl.tl.transfer(\n adata, dandelion\n ) # need to make a version that is not so verbose?\n return adata\n","sub_path":"scirpy/io/_io.py","file_name":"_io.py","file_ext":"py","file_size_in_byte":27637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322817094","text":"\"\"\"\nFunctions for setting certain functions and parameters.\n\"\"\"\nimport numpy as np\n\nfrom . import measure as meas\nfrom . import preprocess as prep\nfrom . import plot as plt\nfrom . import resample as res\n\n\n__all__ = []\n\n\ndef set_random_state(random_state=None):\n if random_state:\n np.random.seed(random_state)\n return None\n\n\ndef set_parallel(n_core):\n n_core = int(n_core)\n if n_core == 1:\n parallel = False\n elif n_core > 1:\n parallel = True\n else:\n raise ValueError\n return parallel\n\n\ndef set_resample(resample=None, family=None):\n if not resample:\n if family == 'gaussian':\n resample = res.resample_simple_train_test_split\n elif family == 'binomial':\n resample = res.resample_stratified_class_train_test_split\n else:\n raise ValueError\n return resample\n\n\ndef set_preprocess(preprocess=None):\n if not preprocess:\n preprocess = prep.preprocess_identity\n return preprocess\n\n\ndef set_measure(measure=None, family=None):\n if not measure:\n if family == 'gaussian':\n measure = meas.measure_cor_score\n elif family == 'binomial':\n measure = meas.measure_area_under_curve\n else:\n raise ValueError\n return measure\n\n\ndef set_column_names(column_names, dependent_variable,\n exclude_variables=None, preprocess=None,\n categorical_variables=None):\n column_names = [c for c in column_names if c != dependent_variable]\n if exclude_variables:\n column_names = [c for c in column_names if c not in exclude_variables]\n if categorical_variables and preprocess is prep.preprocess_scale:\n column_names = [c for c in column_names if c not in categorical_variables]\n column_names = categorical_variables + column_names\n return column_names\n\n\ndef set_categorical_variables(column_names, categorical_variables=None):\n if categorical_variables:\n categorical_variables = np.in1d(column_names, categorical_variables)\n return categorical_variables\n\n\ndef set_dependent_variable(data, dependent_variable):\n y = data[dependent_variable].values\n return y\n\n\ndef set_independent_variables(data, dependent_variable):\n X = data.drop(dependent_variable, axis=1).values\n return X\n\n\ndef set_plot_predictions(family=None):\n if family == 'gaussian':\n plot_predictions = plt.plot_predictions_gaussian\n elif family == 'binomial':\n plot_predictions = plt.plot_predictions_binomial\n else:\n raise ValueError\n return plot_predictions\n\n\ndef set_plot_model_performance(measure):\n if measure == meas.measure_mean_squared_error:\n plot_model_performance = plt.plot_model_performance_gaussian_mean_squared_error\n elif measure == meas.measure_cor_score:\n plot_model_performance = plt.plot_model_performance_gaussian_cor_score\n elif measure == meas.measure_r2_score:\n plot_model_performance = plt.plot_model_performance_gaussian_r2_score\n elif measure == meas.measure_area_under_curve:\n plot_model_performance = plt.plot_model_performance_binomial_area_under_curve\n else:\n raise ValueError\n return plot_model_performance\n","sub_path":"Python/easyml/setters.py","file_name":"setters.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325742057","text":"import os\nimport json\nfrom PyQt5 import QtWidgets, QtCore\n\nfrom sources.controller import Controller\n\n\nclass ConfigInputDialog(QtWidgets.QDialog):\n\n \"\"\" Configure input dialog appears when\n users lauch this application the first time.\n It will automatically generate input boxes according to\n the `configs` variable, which is a dict. \"\"\"\n\n def __init__(self, configs, parent=None):\n QtWidgets.QDialog.__init__(self, parent)\n self.setWindowTitle(\"Config\")\n self.configs = configs\n self._init_ui()\n\n def _init_ui(self):\n\n def get_one_section(fields):\n \"\"\" return one input section \"\"\"\n name = fields.get('name')\n row_grid = QtWidgets.QGridLayout()\n label = QtWidgets.QLabel()\n label.setText(name+':')\n check_box = QtWidgets.QCheckBox()\n check_box.setObjectName(name+'_ck')\n row_grid.setColumnStretch(0, 1)\n row_grid.setColumnStretch(1, 3)\n row_grid.setAlignment(QtCore.Qt.AlignLeft)\n row_grid.addWidget(label, 0, 0)\n row_grid.addWidget(QtWidgets.QLabel('activate'), 1, 0)\n row_grid.addWidget(check_box, 1, 1)\n row = 2\n for field_name, field_value in fields.items():\n if field_name == 'name':\n continue\n row_grid.addWidget(QtWidgets.QLabel(field_name+':'), row, 0)\n line_edit = QtWidgets.QLineEdit(field_value)\n line_edit.setObjectName(name+'_'+field_name)\n row_grid.addWidget(line_edit, row, 1)\n row += 1\n space_row = QtWidgets.QLabel()\n row_grid.addWidget(space_row, row, 0)\n return row_grid\n\n vbox = QtWidgets.QVBoxLayout()\n self.setLayout(vbox)\n for one_config in self.configs:\n vbox.addLayout(get_one_section(one_config))\n confirm_button = QtWidgets.QPushButton('Confirm')\n confirm_button.clicked.connect(self.close)\n vbox.addWidget(confirm_button)\n self.show()\n\n def get_values(self):\n \"\"\" collect user's input after closing this dialog \"\"\"\n result_configs = dict()\n for one_config in self.configs:\n name = one_config['name']\n checked = self.findChild(QtWidgets.QCheckBox, name+'_ck')\n one_result_config = dict(name=name, activate=checked.isChecked())\n for field in one_config:\n if field == 'name':\n continue\n field_object = self.findChild(\n QtWidgets.QLineEdit, name+'_'+field)\n one_result_config[field] = field_object.text()\n result_configs[name] = one_result_config\n return result_configs\n\n\nclass UpdateDict(dict):\n \"\"\" Custom dict, whenever the object changed,\n the callback function will be called and\n only called once at one change. \"\"\"\n\n @classmethod\n def convert(cls, to_convert_dict, callback):\n converted_dict = cls(callback, converting=True)\n for key, value in to_convert_dict.items():\n if isinstance(value, dict):\n converted_dict[key] = cls.convert(value, callback)\n else:\n converted_dict[key] = value\n converted_dict.converting = False\n return converted_dict\n\n def __init__(self, callback, converting=False):\n self.callback = callback\n self.converting = converting\n dict.__init__(self)\n\n def __setitem__(self, key, value):\n if isinstance(value, dict) and not isinstance(value, UpdateDict):\n value = UpdateDict.convert(value, self.callback)\n dict.__setitem__(self, key, value)\n if not self.converting:\n self.callback()\n\n def update(self, *args, **kwargs):\n self.converting = True\n if args:\n if len(args) > 1:\n self.converting = False\n raise TypeError(\"update expected at most 1 arguments, \"\n \"got %d\" % len(args))\n other = dict(args[0])\n for key in other:\n self[key] = other[key]\n for key in kwargs:\n self[key] = kwargs[key]\n self.converting = False\n self.callback()\n\n\nclass Configs(UpdateDict):\n\n def __init__(self, config_path, device):\n self.config_path = config_path\n if not os.path.exists(config_path):\n # set root path\n root_path = QtWidgets.QFileDialog.getExistingDirectory(\n caption='Choose a directory to store wallpapers')\n if device.system == 'Windows':\n root_path = root_path.replace('/', '\\\\')\n print(root_path)\n root_image_path = os.path.join(root_path, 'images')\n # no config, display config dialog\n ci = ConfigInputDialog(Controller.get_source_configs())\n ci.exec_()\n raw_configs = ci.get_values()\n raw_configs['root_path'] = root_path\n for key, value in raw_configs.items():\n if isinstance(value, dict):\n value['config'] = config_path\n value['device_size'] = \\\n [device.width, device.height]\n value['root_image_dir'] = root_image_path\n else:\n # read from config file\n raw_configs = self._read_from_file()\n super(Configs, self).__init__(self._save_to_file)\n self.update(raw_configs)\n\n def _save_to_file(self):\n print('called')\n with open(self.config_path, 'w', encoding='utf8') as f:\n json.dump(self, f)\n\n def _read_from_file(self):\n with open(self.config_path, 'r', encoding='utf8') as f:\n configs = json.load(f)\n return configs\n","sub_path":"src/gui/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651148959","text":"from flask import render_template, request\n\nfrom uchan import app\nfrom uchan import g\nfrom uchan.lib import BadRequestError, ArgumentError\nfrom uchan.lib.utils import now\n\n\n@app.route('/banned/', methods=['GET', 'POST'])\ndef banned():\n if request.method == 'GET':\n method = g.verification_service.get_method()\n method_html = method.get_html()\n\n return render_template('banned.html', method_html=method_html)\n else:\n method = g.verification_service.get_method()\n try:\n method.verify_request(request)\n except ArgumentError as e:\n raise BadRequestError(e.message)\n\n bans = g.ban_service.get_request_bans()\n\n return render_template('banned.html', is_banned=len(bans) > 0, bans=bans, now=now)\n","sub_path":"uchan/view/banned.py","file_name":"banned.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581434944","text":"# -*- coding: utf-8 -*-\nfrom six import string_types\nfrom types import MethodType\nimport os\nfrom numpy import ndarray\nimport io\n\n\ndef is_binary(filename):\n \"\"\"\n Return true if the given filename is binary.\n\n :raises: IOError if the file cannot be opened.\n :returns: True if filename is a binary file (contains null byte)\n and False otherwise.\n\n Based on the idea (..seealso:: http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text)\n that file is binary if it contains null.\n\n .. warning:: this may not work for unicode.\"\"\"\n assert isinstance(filename, string_types), '%r is not a valid filename' % filename\n assert os.path.exists(filename), '%r does not exist' % filename\n with io.open(filename, mode='rb') as fil:\n for chunk in iter(lambda: fil.read(1024), bytes()):\n if b\"\\0\" in chunk: # found null byte\n return True\n return False\n\n\ndef print_bad_path(path):\n \"\"\"\n Prints information about the existence (access possibility) of the parts\n of the given path. Useful for debugging when the path to a given file\n is wrong.\n\n :param path: path to check\n :returns: string with informations whether access to parts of the path\n is possible\n \"\"\"\n path = os.path.abspath(path)\n npath = os.path.dirname(path)\n res = [path]\n while path != npath:\n path, npath = npath, os.path.dirname(npath)\n res.append(path)\n msg = {True: \"passed\", False: \"failed\"}\n return \"\\n\".join([\"%s: %s\" % (msg[os.path.exists(i)], i) for i in res])\n\n\ndef __object_attr(obj, mode, attr_type):\n \"\"\"list object attributes of a given type\"\"\"\n test = {\"public\": lambda k: not k.startswith('_'),\n \"private\": lambda k: (k.startswith('_') and not k.startswith('__')),\n \"both\": lambda k: not k.startswith('__'),\n \"all\": lambda k: True\n }\n\n if not mode in test:\n print(\"Wrong mode! Accepted modes: public, private, both, all.\")\n return None\n check = test[mode]\n\n out = []\n for k in dir(obj):\n if check(k) and attr_type(getattr(obj, k)):\n out.append(k)\n out.sort()\n return out\n #return sorted([k for k in dir(obj) if (check(k) and\n # attr_type(getattr(obj, k)))])\n\n\ndef object_methods(obj, mode = \"public\"):\n \"\"\"\n List the names of methods of a class as strings. Returns public methods\n as default.\n\n :param obj: the object for checking\n :param mode: defines what kind of methods will be listed\n * \"public\" - names that do not begin with underscore\n * \"private\" - names that begin with single underscore\n * \"both\" - private and public\n * \"all\" - all methods that are defined for the object\n :returns: sorted list of the names of methods of a given type\n or None if the mode is wrong\n \"\"\"\n return __object_attr(obj, mode, lambda x: isinstance(x, MethodType))\n\n\ndef object_attributes(obj, mode=\"public\"):\n \"\"\"\n List the names of attributes of a class as strings. Returns public attributes\n as default.\n\n :param obj: the object for checking\n :param mode: defines what kind of attributes will be listed\n * \"public\" - names that do not begin with underscore\n * \"private\" - names that begin with single underscore\n * \"both\" - private and public\n * \"all\" - all attributes that are defined for the object\n :returns: sorted list of the names of attributes of a given type or None\n if the mode is wrong\n \"\"\"\n return __object_attr(obj, mode, lambda x: not isinstance(x, MethodType))\n\n\ndef write_object_attributes(name, obj, nspaces=0, nbase=0, isClass=True, debug=False):\n \"\"\"\n Writes a series of nested objects\n \"\"\"\n spaces = (nbase+nspaces) * ' '\n msg = spaces\n xml = spaces\n if isClass:\n equals = '='\n else:\n equals = ':'\n\n if debug:\n print(\"attr=%s equals=|%s|\" % (name, equals))\n # name\n if isinstance(obj, dict):\n if nspaces == 0:\n msg += '%s %s ' % (name, equals)\n else:\n if isinstance(name, tuple):\n msg += \"%s %s \" % (str(name), equals)\n else:\n msg += \"'%s' %s \" % (name, equals)\n elif isinstance(name, str):\n if isClass:\n key = '%s' % name\n else:\n key = \"'%s'\" % name\n elif isinstance(name, unicode):\n if isClass:\n key = u'%s' % name\n else:\n key = \"u'%s'\" % name\n elif isinstance(name, int) or isinstance(name, float) or isinstance(name, tuple) or name is None:\n key = \"%s\" % str(name)\n else:\n raise RuntimeError('key=%s is not a string. Type=%s' % (name, type(name)))\n\n if debug:\n print(\"name=%s type=%s\" % (name, type(obj)))\n\n # write the object\n if isinstance(obj, int) or isinstance(obj, float) or obj is None:\n xml += '' % (name, obj, type(obj))\n msg += '%s %s %s,\\n' % (key, equals, write_value(obj, nspaces, nbase, isClass))\n elif is_string(obj):\n msg += \"%s %s %s,\\n\" % (key, equals, write_value(obj, nspaces, nbase, isClass))\n\n elif isinstance(obj, dict):\n msg += write_dict(obj, nspaces, nbase, isClass) + ',\\n'\n elif isinstance(obj, tuple) or isinstance(obj, list):\n msg += '%s %s %s,\\n' % (key, equals, write_value(obj, nspaces, nbase, isClass))\n\n elif isinstance(obj, ndarray):\n starter = '%s%s %s' % (nspaces, key, equals)\n msg += '%s %s %s,\\n' % (key, equals, write_array(obj, nspaces + 6 + len(starter)))\n else: # generic class\n objectType = obj.__class__.__name__\n #raise RuntimeError('objectType=%s is not supported' % objectType)\n msg += \"%s %s \" % (key, equals)\n msg += write_class(name, obj, nspaces, nbase) + ',\\n' # comma for class\n if nspaces == 0:\n msg = msg[:-2]\n if debug:\n print(\"|%r|\" % msg)\n return msg\n","sub_path":"pyNastran/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"591704102","text":"#Standard import\nimport copy\n\n# RootTools\nfrom RootTools.core.standard import *\n\n# Logging\nimport logging\nlogger = logging.getLogger(__name__)\n\n#user specific\nfrom TopEFT.Tools.user import analysis_results\nfrom TopEFT.Tools.helpers import getObjFromFile\n\n#define samples\nfrom TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *\nfrom TopEFT.samples.cmgTuples_Data25ns_80X_03Feb_postProcessed import *\n\n## Choices for specific samples\nTTZSample = TTZtoLLNuNu\nWZSample = WZ\nTTXSample = TTX\nTTWSample = TTW\nTZQSample = TZQ\nrareSample = rare\nnonpromptSample = nonprompt\npseudoDataSample= pseudoData\n\nfrom TopEFT.Analysis.SystematicEstimator import jmeVariations, metVariations\nfrom TopEFT.Analysis.SetupHelpers import getZCut, channels, allChannels\nfrom TopEFT.Tools.objectSelection import getFilterCut\nfrom TopEFT.Analysis.regions import *\n\n\n#to run on data\ndataLumi2016 = {'3mu':SingleMuon_Run2016.lumi, '3e':SingleElectron_Run2016.lumi, '2mu1e':SingleMuon_Run2016.lumi, '2e1mu':SingleElectron_Run2016.lumi}\ndataLumi20167 = {'3mu':80000, '3e':80000, '2mu1e':80000, '2e1mu':80000}\ndataLumi201678 = {'3mu':150000, '3e':150000, '2mu1e':150000, '2e1mu':150000}\n\ndataHighLumi = {'3mu':3e6, '3e':3e6, '2mu1e':3e6, '2e1mu':3e6}\n\n#10/fb to run on MC\n#lumi = {c:10000 for c in channels}\nlumi = dataLumi2016\n\n#Define defaults here\nzMassRange = 10\ndefault_mllMin = 0\ndefault_zWindow = \"onZ\"\ndefault_nJets = (3, -1) # written as (min, max)\ndefault_nBTags = (1, -1)\ndefault_metMin = 0\n\ndefault_sys = {'weight':'weight', 'reweight':['reweightPU36fb'], 'selectionModifier':None}\ndefault_parameters = {\n 'mllMin': default_mllMin,\n 'metMin': default_metMin,\n 'zWindow': default_zWindow,\n 'nJets': default_nJets,\n 'nBTags': default_nBTags,\n }\n\nclass Setup:\n def __init__(self):\n self.name = \"regionsE_xsec_lowUnc\"\n #self.name = \"regionsE_150fb_xsec_shape_statOnly\"\n self.channels = [\"all\"]\n self.regions = regionsE\n self.resultsFile= 'calculatedLimits_%s.db'%self.name\n\n self.analysis_results = analysis_results\n self.zMassRange = zMassRange\n self.prefixes = []\n self.externalCuts = []\n\n #Default cuts and requirements. Those three things below are used to determine the key in the cache!\n self.parameters = default_parameters \n self.sys = default_sys \n self.lumi = lumi\n self.dataLumi = lumi\n \n self.genSelection = \"Sum$(GenJet_pt>30)>=3&& abs(Z_mass-91.2)<10&&(abs(Z_daughterPdg)==11 || abs(Z_daughterPdg)==13 || abs(Z_daughterPdg)==15 )\"\n\n self.samples = {\n 'TTZ': {c:TTZSample for c in channels},\n 'WZ' : {c:WZSample for c in channels},\n 'TTX' : {c:TTXSample for c in channels},\n 'TTW' : {c:TTWSample for c in channels},\n 'TZQ' : {c:TZQSample for c in channels},\n 'rare': {c:rareSample for c in channels},\n 'nonprompt': {c:nonpromptSample for c in channels},\n 'pseudoData': {c:pseudoDataSample for c in channels},\n 'Data' : {'3mu': SingleMuon_Run2016, #FIXME This needs to be fixed when we have a decent trigger/backup trigger strategy\n '3e': SingleElectron_Run2016,\n '2mu1e': SingleMuon_Run2016,\n '2e1mu': SingleElectron_Run2016},\n }\n \n def prefix(self):\n return '_'.join(self.prefixes+[self.preselection('MC')['prefix']])\n\n def defaultCacheDir(self):\n return os.path.join(self.analysis_results, self.prefix(), 'cacheFiles')\n\n #Clone the setup and optinally modify the systematic variation\n def defaultClone(self):\n '''Clone setup and change systematics to default'''\n\n res = copy.copy(self)\n res.sys = copy.deepcopy(default_sys)\n res.parameters = copy.deepcopy(default_parameters)\n\n return res\n\n #Clone the setup and optinally modify the systematic variation\n def systematicClone(self, sys=None, parameters=None):\n '''Clone setup and change systematic if provided'''\n\n res = copy.copy(self)\n res.sys = copy.deepcopy(self.sys)\n res.parameters = copy.deepcopy(self.parameters)\n\n if sys:\n for k in sys.keys():\n if k=='remove':\n for i in sys[k]:\n res.sys['reweight'].remove(i)\n elif k=='reweight':\n res.sys[k] = list(set(res.sys[k]+sys[k])) #Add with unique elements\n for upOrDown in ['Up','Down']:\n if 'reweightPU36fb'+upOrDown in res.sys[k]: res.sys[k].remove('reweightPU36fb')\n if 'reweightDilepTriggerBackup'+upOrDown in res.sys[k]: res.sys[k].remove('reweightDilepTriggerBackup')\n if 'reweightBTag_SF_b_'+upOrDown in res.sys[k]: res.sys[k].remove('reweightBTag_SF')\n if 'reweightBTag_SF_l_'+upOrDown in res.sys[k]: res.sys[k].remove('reweightBTag_SF')\n if 'reweightLeptonSF'+upOrDown in res.sys[k]: res.sys[k].remove('reweightLeptonSF')\n if 'reweightLeptonFastSimSF'+upOrDown in res.sys[k]: res.sys[k].remove('reweightLeptonFastSimSF')\n else:\n res.sys[k] = sys[k] # if sys[k] else res.sys[k]\n\n if parameters:\n for k in parameters.keys():\n res.parameters[k] = parameters[k]\n\n\n return res\n\n def defaultParameters(self, update={}):\n assert type(update)==type({}), \"Update arguments with key arg dictionary. Got this: %r\"%update\n res = copy.deepcopy(self.parameters)\n res.update(update)\n return res\n\n def weightString(self):\n return \"*\".join([self.sys['weight']] + (self.sys['reweight'] if self.sys['reweight'] else []))\n\n def preselection(self, dataMC , channel='all', isFastSim = False):\n '''Get preselection cutstring.'''\n return self.selection(dataMC, channel = channel, isFastSim = isFastSim, hadronicSelection = False, **self.parameters)\n\n def selection(self, dataMC,\n mllMin, metMin, zWindow,\n nJets, nBTags,\n channel = 'all', hadronicSelection = False, isFastSim = False):\n '''Define full selection\n dataMC: 'Data' or 'MC'\n channel: all, EE, MuMu or EMu\n zWindow: offZ, onZ, or allZ\n hadronicSelection: whether to return only the hadronic selection\n isFastSim: adjust filter cut etc. for fastsim\n '''\n #Consistency checks\n if self.sys['selectionModifier']:\n assert self.sys['selectionModifier'] in jmeVariations+metVariations+['genMet'], \"Don't know about systematic variation %r, take one of %s\"%(self.sys['selectionModifier'], \",\".join(jmeVariations + ['genMet']))\n assert dataMC in ['Data','MC'], \"dataMC = Data or MC, got %r.\"%dataMC\n\n #Postfix for variables (only for MC and if we have a jme variation)\n sysStr = \"\"\n metStr = \"\"\n if dataMC == \"MC\" and self.sys['selectionModifier'] in jmeVariations: sysStr = \"_\" + self.sys['selectionModifier']\n if dataMC == \"MC\" and self.sys['selectionModifier'] in metVariations: metStr = \"_\" + self.sys['selectionModifier']\n\n res={'cuts':[], 'prefixes':[]}\n\n if nJets and not (nJets[0]==0 and nJets[1]<0):\n assert nJets[0]>=0 and (nJets[1]>=nJets[0] or nJets[1]<0), \"Not a good nJets selection: %r\"%nJets\n njetsstr = \"nJetSelected\"+sysStr+\">=\"+str(nJets[0])\n prefix = \"nJets\"+str(nJets[0])\n if nJets[1]>=0:\n njetsstr+= \"&&\"+\"nJetSelected\"+sysStr+\"<=\"+str(nJets[1])\n if nJets[1]!=nJets[0]: prefix+=str(nJets[1])\n else:\n prefix+='p'\n res['cuts'].append(njetsstr)\n res['prefixes'].append(prefix)\n\n if nBTags and not (nBTags[0]==0 and nBTags[1]<0):\n assert nBTags[0]>=0 and (nBTags[1]>=nBTags[0] or nBTags[1]<0), \"Not a good nBTags selection: %r\"% nBTags\n nbtstr = \"nBTag\"+sysStr+\">=\"+str(nBTags[0])\n prefix = \"nbtag\"+str(nBTags[0])\n if nBTags[1]>=0:\n nbtstr+= \"&&nBTag\"+sysStr+\"<=\"+str(nBTags[1])\n if nBTags[1]!=nBTags[0]: prefix+=str(nBTags[1])\n else:\n prefix+='p'\n res['cuts'].append(nbtstr)\n res['prefixes'].append(prefix)\n\n if metMin and metMin>0:\n res['cuts'].append('met_pt'+sysStr+metStr+'>='+str(metMin))\n res['prefixes'].append('met'+str(metMin))\n\n if not hadronicSelection:\n if mllMin and mllMin>0:\n res['cuts'].append('Z_mass>='+str(mllMin))\n res['prefixes'].append('mll'+str(mllMin))\n\n \n presel3mu = \"(nGoodMuons==3&&nGoodElectrons==0)\"\n presel2mu1e = \"(nGoodMuons==2&&nGoodElectrons==1)\"\n presel2e1mu = \"(nGoodMuons==1&&nGoodElectrons==2)\"\n presel3e = \"(nGoodMuons==0&&nGoodElectrons==3)\"\n allPresels = [presel3mu,presel2mu1e,presel2e1mu,presel3e]\n\n #Z window\n assert zWindow in ['offZ', 'onZ', 'allZ'], \"zWindow must be one of onZ, offZ, allZ. Got %r\"%zWindow\n if zWindow == 'onZ': res['cuts'].append(getZCut(zWindow, self.zMassRange))\n if zWindow == 'offZ' and channel!=\"EMu\": res['cuts'].append(getZCut(zWindow, self.zMassRange)) # Never use offZ when in emu channel, use allZ instead\n\n #lepton channel\n assert channel in allChannels, \"channel must be one of \"+\",\".join(allChannels)+\". Got %r.\"%channel\n\n if channel==\"3mu\": chStr = presel3mu\n elif channel==\"2mu1e\": chStr = presel2mu1e\n elif channel==\"2e1mu\": chStr = presel2e1mu\n elif channel==\"3e\": chStr = presel3e\n elif channel==\"all\": chStr = \"(\"+'||'.join(allPresels)+')'\n\n res['cuts'].append(chStr)\n\n res['cuts'].append('nlep==3')\n\n res['cuts'].append(\"lep_pt[0]>40&&lep_pt[1]>20&&lep_pt[2]>10\")\n\n\n res['cuts'].append(getFilterCut(isData=(dataMC=='Data'), isFastSim=isFastSim))\n res['cuts'].extend(self.externalCuts)\n \n return {'cut':\"&&\".join(res['cuts']), 'prefix':'-'.join(res['prefixes']), 'weightStr': ( self.weightString() if dataMC == 'MC' else 'weight')}\n","sub_path":"Analysis/python/Setup.py","file_name":"Setup.py","file_ext":"py","file_size_in_byte":10835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"174738969","text":"import os\nfrom itertools import zip_longest as lzip\nimport cat4py as cat\nimport numpy as np\n\nshape = (512, 512)\nchunkshape = (121, 99)\nblockshape = (12, 31)\n\n\ndtype = np.dtype(np.float64)\nitemsize = np.dtype(dtype).itemsize\n\n# Create a numpy array\nnparray = np.linspace(0, 1, int(np.prod(shape)), dtype=dtype).reshape(shape)\n\n# Create a caterva array from a numpy array\na = cat.asarray(nparray, chunkshape=chunkshape, blockshape=blockshape,\n enforceframe=True)\n\n# Create an empty caterva array (on disk)\nb = cat.empty(shape, itemsize, dtype=str(dtype))\n\n# Fill an empty caterva array using a block iterator\nfor block, info in b.iter_write():\n block[:] = bytes(nparray[info.slice])\n\n# Assert both caterva arrays\nfor (block1, info1), (block2, info2) in lzip(a.iter_read(blockshape), b.iter_read(blockshape)):\n np.testing.assert_equal(np.asarray(block1), np.asarray(block2))\n\nprint(np.asarray(b[5:10, 5:10]))\n","sub_path":"examples/ex_npcontainer.py","file_name":"ex_npcontainer.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78126875","text":"#http://www.statmt.org/wmt14/translation-task.html#download\n\n'''Run this code in python3 to ensure Russian translations hold'''\n\nimport os\nimport string\nfrom russian_dictionary import translit1\n\n# import savReaderWriter\n\ndef read_lines(filename):\n while True:\n f = open(filename)\n for line in f:\n yield line\n\n\ndef read_it(filename):\n temp_list=[]\n f = open(filename)\n for line in f:\n if line.__contains__('doc docid'):\n x = line.replace('=',':').split()\n keys = ['docid','genre','origlang']\n\n else:\n if line.__contains__('seg id'):\n seg_id = ((line.split('<'))[1].split('>')[0])\n text = ((line.split('>'))[1].split('<')[0])\n temp_list.append(translit1(text))\n return temp_list\n\n\n\n\nif __name__ == '__main__':\n\n english = read_it('../test-full/newstest2014-ruen-src.en.sgm')\n russian = read_it('../test-full/newstest2014-ruen-ref.ru.sgm')\n\n # print(russian)\n # french = read_it('test-full/newstest2014-fren-ref.fr.sgm')\n # english_french = read_it('test-full/newstest2014-fren-ref.en.sgm')\n # print len(english_french),len(russian), len(french)\n #\n #\n # english_file = open('data/english-french','w')\n # english_file.writelines([\"%s\\n\" % item for item in french])\n print('writing')\n russian_file= open('../data/english-russian',\"w\")\n russian_file.writelines([\"%s\\n\" % item for item in russian])\n english_file = open('../data/russian-english','w')\n english_file.writelines([\"%s\\n\" % item for item in english])\n #\n # french_file = open('data/french-english','w')\n # french_file.writelines([\"%s\\n\" % item for item in english_french])\n #\n #\n # german_english= read_it('test-full/newstest2014-fren-ref')\n # german_english=open('data/german-english')\n # german_english.writelines([\"%s\\n\" % item for item in german_english])\n # english_german=open('data/english-german')\n # english_german.writelines([\"%s\\n\" % item for item in english_german])\n","sub_path":"data_cleaning/configure_data.py","file_name":"configure_data.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116227556","text":"from app import db\nfrom sqlalchemy.orm import relationship\n\nfrom orm.entities import Party, ElectionParty\nfrom orm.entities.Result import PartyWiseResult\nfrom exception import NotFoundException\n\n\nclass PartyCountModel(db.Model):\n __tablename__ = 'partyWiseResult_partyCount'\n partyWiseResultId = db.Column(db.Integer, db.ForeignKey(PartyWiseResult.Model.__table__.c.partyWiseResultId),\n primary_key=True)\n partyId = db.Column(db.Integer, db.ForeignKey(Party.Model.__table__.c.partyId), primary_key=True)\n # electionId = db.Column(db.Integer, db.ForeignKey(ElectionParty.Model.__table__.c.electionId), primary_key=True)\n count = db.Column(db.Integer)\n countInWords = db.Column(db.String(1000), nullable=True)\n\n\nModel = PartyCountModel\n\n\ndef create(partyWiseResultId, partyId, count, countInWords=None, electionId=None):\n if electionId is not None:\n electionParty = ElectionParty.get_by_id(electionId=electionId, partyId=partyId)\n if electionParty is None:\n raise NotFoundException(\"Party is not registered for the given election. (partyId=%d)\" % partyId)\n else:\n party = Party.get_by_id(partyId=partyId)\n if party is None:\n raise NotFoundException(\"Party not found. (partyId=%d)\" % partyId)\n\n result = Model(\n partyWiseResultId=partyWiseResultId,\n partyId=partyId,\n count=count,\n countInWords=countInWords\n )\n db.session.add(result)\n db.session.commit()\n\n return result\n","sub_path":"orm/entities/Result/PartyWiseResult/PartyCount.py","file_name":"PartyCount.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319854444","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file', type=argparse.FileType('r'), help='Path to txt file')\nfile = parser.parse_args().file\n\nwith file:\n text = file.readline()\n\ncounter = 0\npre_char = \"\"\nresult = \"\"\nfor char in text:\n if char == pre_char:\n counter += 1\n continue\n elif counter % 2 == 0:\n result += \"\"\n elif counter % 2 == 1:\n result += pre_char\n pre_char = char\n counter = 1\nelse:\n if counter % 2 == 0:\n result += \"\"\n elif counter % 2 == 1:\n result += pre_char\n\nprint(result)\n","sub_path":"edward.py","file_name":"edward.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19961672","text":"import torch\nimport numpy as np\nfrom config import cfg\nfrom data import make_data_loader\nfrom modeling import build_model\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import Timer\nfrom ignite.metrics import RunningAverage\n\ndef create_evaluator(model, device=None):\n \n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.eval()\n \n with torch.no_grad():\n data, target = batch\n\n data = data.cuda()\n target = target.cuda()\n output = model(data)\n \n acc = (output.max(1)[1] == target).float().mean()\n\n return acc.item()\n\n return Engine(_update)\n \ndef do_validate(cfg, model, val_loader):\n device = cfg.MODEL.DEVICE\n if device == \"cuda\":\n torch.cuda.set_device(cfg.MODEL.CUDA)\n evaluator = create_evaluator(model, device=device)\n RunningAverage(output_transform=lambda x: x).attach(evaluator, 'eva_avg_acc')\n\n timer = Timer(average=True)\n\n timer.attach(evaluator, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n acc_list = list()\n\n @evaluator.on(Events.ITERATION_COMPLETED)\n def log_accuracy(engine):\n iter = (engine.state.iteration - 1) % len(val_loader) + 1\n print (\"Iteration[{}/{}]\".format(iter, len(val_loader)))\n acc_list.append(engine.state.metrics['eva_avg_acc'])\n \n evaluator.run(val_loader)\n print (\"Validation Accuracy: {:1%}\".format(np.array(acc_list).mean()))\n\ndef main():\n \n torch.backends.cudnn.benchmark = True\n\n train_loader, val_loader = make_data_loader(cfg)\n\n model = build_model(cfg)\n weight = torch.load(cfg.MODEL.TEST_MODEL)\n model.load_state_dict(weight)\n do_validate(\n cfg,\n model,\n val_loader\n )\n\nif __name__ == '__main__':\n main()","sub_path":"mt_tracker/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"331734650","text":"from odoo import models, fields, api\r\nfrom odoo.exceptions import Warning\r\n\r\n\r\nclass Port(models.Model):\r\n _inherit = 'freight.ports'\r\n\r\n\r\n name1 = fields.Char(string='Temp Name')\r\n code1 = fields.Char(string='Temp Code')\r\n\r\n @api.multi\r\n def action_copy_old2new(self):\r\n ports = self.env['freight.ports'].sudo().search([])\r\n for port in ports:\r\n port.write({'name1': port.code,\r\n 'code1': port.name,\r\n })\r\n\r\n @api.multi\r\n def action_copy_new2old(self):\r\n ports = self.env['freight.ports'].sudo().search([])\r\n for port in ports:\r\n port.write({'name': port.name1,\r\n 'code': port.code1,\r\n })\r\n\r\n","sub_path":"sci_goexcel_freight_port_fixing/models/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196690766","text":"#!/usr/bin/env python3\n\nimport anndata as ad\nimport numpy as np\n\nfrom typing import List\n\nfrom ..transformation import Transformation\nfrom ....utils.matrix import pooling, extract_chunks, sort_sparse_matrix, perturbate\nfrom ....utils.graph import nearest_neighbors, nearest_neighbors_custom\n\n\nclass Pooling(Transformation):\n \"\"\"\n Replaces each sample by the average of its neighbors. This\n is a useful to smooth data manifolds, and reduce the impact\n of outliers or artifacts. It can also be used after a merging\n to smooth the result, by toggling per_dataset parameter.\n\n Parameters\n ----------\n n_neighbors: int, default = 5\n Number of neighhbors to use for the pooling. The higher,\n the smoother data is.\n\n jitter_std: float, default = 0.01\n If > 0, applies a small perturbation at the end of pooling of\n standard deviation $jitter_std to unstick samples.\n\n per_dataset: bool, default = True\n Performs the pooling for each dataset independently. If false,\n all datasets are concatenated (they are then expected to be\n embedded in the same features space).\n \"\"\"\n\n def __init__(\n self,\n n_neighbors: int = 5,\n transformation_rate: float = 1.0,\n jitter_std: float = 0.01,\n per_dataset: bool = True,\n ):\n Transformation.__init__(self, \"POOLING\", True, transformation_rate)\n self.n_neighbors = n_neighbors\n self.jitter_std = jitter_std\n self.per_dataset = per_dataset\n\n def transform(\n self,\n datasets: List[ad.AnnData],\n embeddings: List[np.ndarray],\n ) -> List[np.ndarray]:\n \"\"\"\n Applies pooling, potentially partial.\n \"\"\"\n if self.per_dataset:\n result = []\n for adata, X in zip(datasets, embeddings):\n neighbors = nearest_neighbors(\n adata,\n mode=\"edges\",\n n_neighbors=self.n_neighbors,\n )\n indices, _ = sort_sparse_matrix(neighbors)\n X_pooled = pooling(X, indices)\n if self.transformation_rate <= 1.0:\n X_pooled *= self.transformation_rate\n X_pooled += X * (1.0 - self.transformation_rate)\n result.append(X_pooled)\n else:\n X_all = np.concatenate(datasets, axis=0)\n nn_matrix = nearest_neighbors_custom(\n X_all,\n \"edges\",\n n_neighbors=self.n_neighbors,\n )\n indices, _ = sort_sparse_matrix(nn_matrix, fill_empty=True)\n X_pooled = pooling(X_all, indices)\n if self.transformation_rate <= 1.0:\n X_pooled *= self.transformation_rate\n X_pooled += X_all * (1.0 - self.transformation_rate)\n result = extract_chunks(X_pooled, [X.shape[0] for X in datasets])\n\n if self.jitter_std > 0:\n for X in result:\n X = perturbate(X, std=self.jitter_std)\n\n return result\n","sub_path":"src/transmorph/engine/transforming/algorithms/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249575974","text":"from django.conf.urls import url\nfrom django.http import HttpResponse\nfrom django.shortcuts import render,get_object_or_404\nfrom django.http import Http404\nfrom django.urls import reverse\nfrom .models import Vote,Candidate\nfrom django.contrib.auth import(\n authenticate,\n get_user_model,\n login,\n logout,\n )\nfrom django.contrib.auth.models import User\nfrom .forms import userLoginForm\n\ndef index(request):\n did_vote = False\n if request.user.is_authenticated:\n pk = request.user.username\n print(request.user.username)\n voter = Vote.objects.get(user_id=pk)\n did_vote = voter.did_vote\n \n list_candidates = []\n votes_count = []\n list_candidates.append(Candidate.objects.get(id=6).candidate_name)\n list_candidates.append(Candidate.objects.get(id=7).candidate_name)\n list_candidates.append(Candidate.objects.get(id=8).candidate_name)\n list_candidates.append(Candidate.objects.get(id=9).candidate_name)\n list_candidates.append(Candidate.objects.get(id=10).candidate_name)\n votes_count.append(Candidate.objects.get(id=6).count_votes)\n votes_count.append(Candidate.objects.get(id=7).count_votes)\n votes_count.append(Candidate.objects.get(id=8).count_votes)\n votes_count.append(Candidate.objects.get(id=9).count_votes)\n votes_count.append(Candidate.objects.get(id=10).count_votes)\n #print(list_candidates[0])\n context = {'list_candidates':list_candidates,'votes_count':votes_count,'did_vote':did_vote}\n return render(request, 'polls/index.html', context)\n\ndef success(request):\n pk = request.user.username\n #print(Vote.objects.get(id=pk-1).user_id)\n voter = Vote.objects.get(user_id=pk)\n h_type = request.POST.get('h_type')\n candid = Candidate.objects.get(candidate_name=h_type)\n print(candid.candidate_name)\n candid.count_votes = candid.count_votes + 1\n candid.save()\n print(candid.count_votes)\n voter.did_vote = True\n voter.save()\n context = {'did_vote':voter.did_vote}\n return render(request,'polls/success.html',context) \n\n\ndef results(request, question_id):\n response = \"You're looking at the results of question %s.\"\n return HttpResponse(response % question_id)\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))\n\n\n","sub_path":"OVS/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10154583","text":"# -*- encoding: UTF-8 -*-\n\nfrom unittest import TestCase\nfrom numpy import ndarray\n\n\nclass BaseTest(TestCase):\n\n def assertListsAlmostEqual(self, seq1, seq2, places=7):\n sequence = (tuple, list, ndarray)\n if len(seq1) != len(seq2):\n raise AssertionError(\"%s != %s\"%(str(seq1), str(seq2)))\n for i, j in zip(seq1, seq2):\n if isinstance(i, sequence) and isinstance(j, (list, sequence)):\n self.assertListsAlmostEqual(i, j, places)\n else:\n self.assertAlmostEqual(i, j, places)\n","sub_path":"tests/ArborisTests.py","file_name":"ArborisTests.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285562910","text":"import pandas as pd\nimport os\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import pearsonr\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nimport random as rd \nfrom math import sqrt\nfrom sklearn.datasets import make_classification\nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nimport seaborn as sns\nimport datetime\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.special import expit\ndef set_wd():\n sns.set()\n#stock données\ndef import_data():\n\tstorage_data=pd.read_excel(\"storage_datarealone.xlsx\",sheet_name=None)\n#time spread données\n\tprice_data = pd.read_csv(\"price_data.csv\", sep=\";\")\n\tprice_data.rename(columns={\"Date\": \"gasDayStartedOn\"}, inplace=True)\t\n\tprice_data[\"gasDayStartedOn\"]=pd.to_datetime(price_data[\"gasDayStartedOn\"])\n\n#dictionnaire ou l'on met nos résultats\nLogistic_Regression={}\nrandom_forest={}\n\n#boucle ou l'on travail sur les données\nfor key in storage_data:\n#calcul NW\n\tinj=storage_data[key][\"injection\"].values\n\twit=storage_data[key][\"withdrawal\"].values\n\tl=[]\n\tfor i in range(len(inj)):\n\t\tl.append(wit[i]-inj[i])\n\tstorage_data[key][\"NW\"]=pd.DataFrame(l)\n#calcul lagged_NW\n\tl1=storage_data[key][\"NW\"].values\n\tl2=[0]\n\tfor i in range(len(l1)-1):\n\t\tl2.append(l1[i])\n\tstorage_data[key][\"lagged_NW\"]=pd.DataFrame(l2)\n#Colonne binaire\n\tl1=storage_data[key][\"NW\"].values\n\tl2=[]\n\tfor i in range(len(l1)):\n\t\tif l1[i]>0:\n\t\t\tl2.append(1)\n\t\telse :\n\t\t\tl2.append(0)\n\tstorage_data[key][\"Net Withdrawal_binary\"]=pd.DataFrame(l2)\n#calcul FSW1 = max(Full Stock - 45, 0) et FSW2 = max(45 - Full Stock, 0)\n\tl=storage_data[key][\"full\"].values\n\tl1=[]\n\tl2=[]\n\tfor i in range(len(l)):\n\t\tl1.append(max(l[i]-45,0))\n\t\tl2.append(max(45-l[i],0))\n\tstorage_data[key][\"FSW1\"]=pd.DataFrame(l1)\n\tstorage_data[key][\"FSW2\"]=pd.DataFrame(l2)\n#jointure avec le time spread\n\tstorage_data[key]=storage_data[key].merge(price_data, left_on=\"gasDayStartedOn\", right_on=\"gasDayStartedOn\")\n\n# logistic regression\n#X matrix is composed of the Lagged_NW, FSW1, FSW2 and all the time spreads price columns \n\tstorage_data[key] = storage_data[key].dropna()\n\t#y=np.array(storage_data[key][\"Net Withdrawal_binary\"].values)\n\ty=storage_data[key][\"Net Withdrawal_binary\"].to_numpy()\n\tx = storage_data[key].loc[:,[\"lagged_NW\",\"FSW1\",\"FSW2\",\"SAS_GPL\",\"SAS_TTF\",\"SAS_NCG\",\"SAS_NBP\"]].to_numpy()\n\t#x=np.array([storage_data[key][\"gasDayStartedOn\"].values,storage_data[key][\"lagged_NW\"].values,storage_data[key][\"FSW1\"].values,storage_data[key][\"FSW2\"].values,storage_data[key][\"SAS_GPL\"].values,storage_data[key][\"SAS_TTF\"].values,storage_data[key][\"SAS_NCG\"].values,storage_data[key][\"SAS_NBP\"].values])\n\t\n\t#x=x.transpose()\n\tx_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)\n\tLogi=LogisticRegression().fit(x_train, y_train)\n\ty_pred = Logi.predict(x_test)\n\tcm=confusion_matrix(y_test, y_pred)\n\tproba=Logi.predict_proba(x_test)[:,1]\n\tLogistic_Regression[key]={\"recall\": metrics.recall_score(y_test, y_pred), \"neg_recall\": cm[1,1]/(cm[0,1] + cm[1,1]),\\\n\t \"confusion\": cm,\"precision\": metrics.precision_score(y_test, y_pred),\\\n\t \"neg_precision\":cm[1,1]/cm.sum(axis=1)[1],\"roc\": metrics.roc_auc_score(y_test, proba),\"class_mod\": Logi}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\n\"\"\"#Part 2:\n#dictionnaire contenant les metriques\nregression_model={}\n\n#on travaille sur un nouveau dictionnaire \"data2\" pour lequel NW_binary=1\nfor key in data:\n for i in range (len(data[key][\"Net Withdrawal_binary\"])): #longueur d'une colonne\n if (data[key][\"Net Withdrawal_binary\"].values[i]==0):\n data2[key][\"Net Withdrawal_binary\"].drop(i,0,inplace=True)\nf = f[f.NW_b != 0]\n\n\n\n\n\ny1=np.array(data2[key][\"NW\"].values)\nx1=np.array([data2[key][\"gasDayStartedOn\"].values,data2[key][\"lagged_NW\"].values,data2[key][\"FSW1\"].values,data2[key][\"FSW2\"].values,data2[key][\"SAS_GPL\"].values,data2[key][\"SAS_TTF\"].values,data2[key][\"SAS_NCG\"].values,data2[key][\"SAS_NBP\"].values])\n\n\n# print(data[['SF -UGS Rehden']][\"Net Withdrawal_binary\"])\n\"\"\"","sub_path":"projet/rawrealsupply.py","file_name":"rawrealsupply.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"53367334","text":"import requests\n\n\nclass BatchAPIMethods:\n GET = 'GET'\n PUT = 'PUT'\n POST = 'POST'\n\n\nclass BatchAPIAction:\n\n def __init__(self, url=None, headers=None, uri_values=None, json=None, method=None, **kwargs):\n self.method = method\n self.json = json\n self.request_url = url.format(*uri_values) if uri_values else url\n self.headers = headers\n\n def send(self):\n try:\n resp = requests.request(self.method, self.request_url, headers=self.headers, json=self.json)\n # print(f'{resp.status_code} {self.method} {self.request_url}: {self.json}')\n # Raise 401 immediately\n if resp.status_code == 401:\n raise Exception('401 status returned. Check credentials')\n\n except Exception as e:\n raise e\n\n return resp\n\n async def send_batch(self, session):\n try:\n async with await session.request(\n method=self.method,\n url=self.request_url,\n json=self.json,\n headers=self.headers,\n ) as resp:\n\n # Raise 401 immediately\n if resp.status == 401:\n raise Exception('401 status returned. Check credentials')\n # print(f'{resp.status} {self.method} {self.request_url}: {self.json}')\n return resp\n\n except Exception as e:\n raise e\n","sub_path":"batch_api/batch_api_action.py","file_name":"batch_api_action.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152911069","text":"########################################################\n#\n# This sets up the connection to the database\n# You can open it in terminal and just run it to connect\n# \n# This was made based on the code prvided by PYnative\n# available at https://pynative.com/python-mysql-database-connection/ \n#\n# Look in the bottom test section to see a simple implementation\n#\n########################################################\n\nimport mysql.connector\nfrom mysql.connector import Error\n\nclass Database:\n def __init__(self, db_user, db_pass, db_host, db_port, db_name):\n self.__user = db_user\n self.__password = db_pass\n self.__host = db_host\n self.__port = db_port\n self.__name = db_name\n self.__connection = None\n self.__cursor = None\n\n def setup(self):\n '''\n Set up a connection and cursor to implement database actions\n if it fails throw an error message on the terminal\n '''\n try: \n # initialize the connection\n self.__connection = mysql.connector.connect(host = self.__host,\n user= self.__user,\n port=self.__port,\n password = self.__password,\n database = self.__name)\n # set up autocommit\n self.__connection.autocommit = True\n # create a cursor\n self.__cursor = self.__connection.cursor()\n self.__cursor.execute(\"select database();\")\n print(\"Connected to MySQL Server version \", self.__connection.get_server_info())\n print(\"You are connected to {}\\n\".format(self.__cursor.fetchone()[0]))\n except Error as err:\n print(\"ERROR:\" + err)\n except Exception as err:\n print(\"ERROR:\")\n for arg in err.args:\n print(arg)\n finally:\n return self.isConnected()\n \n def close(self):\n if self.isConnected():\n self.__cursor.close()\n self.__connection.close()\n print(\"MySQL connection is closed\")\n\n def createTable(self, table_name, primary_key, opt_args=None):\n query = 'CREATE TABLE ' + table_name + '(' \n if opt_args is not None:\n for arg in opt_args:\n query += arg[0] + ' ' + arg[1] + ','\n query += 'PRIMARY KEY (' + primary_key[0] + '));'\n\n if self.execute(query):\n print(table_name + ' successfully created.')\n \n def deleteTable(self, table_name):\n if self.execute('DROP TABLE ' + table_name +';'):\n print(table_name + ' successfully deleted.')\n\n def deleteRow(self, table_name, condition):\n query = 'DELETE FROM '+table_name+' WHERE '+condition\n if self.execute(query):\n print('Rows containing',condition,'in',table_name,'has been deleted.')\n \n def deleteColumn(self, table_name, column_name):\n query = 'ALTER TABLE '+table_name+' DROP COLUMN '+column_name\n if self.execute(query):\n print('Column', column_name, 'has been deleted.')\n \n def getQuery(self, query):\n '''\n arguments: query string in sql\n returns: list of tuples of table entries\n '''\n if self.execute(query):\n return self.__cursor.fetchall()\n\n def execute(self, query):\n '''\n Simple execution of sql commands\n arguments: query string\n returns: Boolean\n '''\n try:\n self.__cursor.execute(query)\n except Error as err:\n print(err)\n return False\n except Exception as err:\n print(err)\n return False\n else:\n return True\n\n def getHost(self):\n return self.__host\n \n def getPort(self):\n return self.__port\n \n def getDatabaseName(self):\n return self.__name\n \n def isConnected(self):\n return self.__connection.is_connected()\n\n\n##########################################################################\n########################## TESTING #######################################\n##########################################################################\nfrom db_settings_secret import *\nimport time\nif __name__ == \"__main__\":\n # This is used to test out if the database can be connected\n db = Database(DB_USERNAME,\n DB_PASSWORD,\n 'localhost', \n '3306',\n 'smart_meter_db')\n db.setup()\n # Test all the different functions offered by the database class\n # Perform the tasks that the user wants\n done = False\n while not done:\n print('Here are what you can do:')\n print('\\t1) Create a new table')\n print('\\t2) Delete a table')\n print('\\t3) Insert into a table')\n print('\\t4) Drop an entry from a table')\n print('\\t5) Select command')\n print('\\t6) Exit')\n\n user_input = int(input('What would you like to do? \\n> '))\n if user_input == 1:\n # Each attribute/column of the table will need two parameters:\n # (name, data type)\n primary_key = ('time', 'TIME')\n attributes = [ ('time', 'TIME'),\n ('current','FLOAT'),\n ('voltage','FLOAT'),\n ('phasor','FLOAT'),\n ('power','FLOAT')]\n db.createTable(table_name='energy_use', primary_key=primary_key, opt_args=attributes)\n elif user_input == 2:\n db.deleteTable('energy_use')\n elif user_input == 3:\n db.execute('insert into energy_use values (now(), 2.0, 120, 0.73, 240)')\n time.sleep(5)\n db.execute('insert into energy_use values (now(), 2.0, 120, 1, 120)')\n elif user_input == 4:\n db.deleteColumn('energy_use','current')\n elif user_input == 5:\n data = db.getQuery('select * from energy_use')\n print(type(data))\n for d in data:\n print(type(d))\n elif user_input == 6:\n done = True\n db.close()\n print('Good bye!')\n else:\n print('ERROR: You have entered an invalid number, please try again.\\n')\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"165013205","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n# 2 May 2020 - Modified by urw7rs\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport collections\nimport logging\nimport os\nimport signal\nimport subprocess\nimport threading\nimport time\nimport timeit\nimport traceback\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\" # noqa Necessary for multithreading.\n\nimport nest\nimport torch\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom queue import Queue\nfrom libtorchbeast import actorpool\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision.datasets import CelebA, Omniglot, MNIST\n\nfrom torch.utils.data import DataLoader\nfrom torchbeast.core import file_writer\nfrom torchbeast.core import vtrace\nfrom torchbeast.core import models\nfrom torchbeast.core import datasets\n\nfrom torchbeast import env_wrapper\n\n\n# yapf: disable\nparser = argparse.ArgumentParser(description=\"PyTorch Scalable Agent\")\n\nparser.add_argument(\"--pipes_basename\", default=\"unix:/tmp/polybeast\",\n help=\"Basename for the pipes for inter-process communication. \"\n \"Has to be of the type unix:/some/path.\")\nparser.add_argument(\"--mode\", default=\"train\",\n choices=[\"train\", \"test\", \"test_render\"],\n help=\"Training or test mode.\")\nparser.add_argument(\"--xpid\", default=None,\n help=\"Experiment id (default: None).\")\nparser.add_argument(\"--start_servers\", dest=\"start_servers\", action=\"store_true\",\n help=\"Spawn polybeast_env servers automatically.\")\nparser.add_argument(\"--no_start_servers\", dest=\"start_servers\", action=\"store_false\",\n help=\"Don't spawn polybeast_env servers automatically.\")\nparser.set_defaults(start_servers=True)\n\n# Environment settings\nparser.add_argument(\"--env_type\", type=str, default=\"libmypaint\",\n help=\"Environment. Ignored if --no_start_servers is passed.\")\nparser.add_argument(\"--episode_length\", type=int, default=20,\n help=\"Set epiosde length\")\nparser.add_argument(\"--canvas_width\", type=int, default=256,\n help=\"Set canvas render width\")\nparser.add_argument(\"--brush_type\", type=str, default=\"classic/dry_brush\",\n help=\"Set brush type from brush dir\")\nparser.add_argument(\"--brush_sizes\", nargs='+', type=int,\n default=[1, 2, 4, 8, 12, 24],\n help=\"Set brush_sizes float is allowed\")\nparser.add_argument(\"--use_pressure\", action=\"store_true\",\n help=\"use_pressure flag\")\nparser.add_argument(\"--use_compound\", action=\"store_true\",\n help=\"use compound action space\")\nparser.add_argument(\"--new_stroke_penalty\", type=float, default=0.0,\n help=\"penalty for new stroke\")\nparser.add_argument(\"--stroke_length_penalty\", type=float, default=0.0,\n help=\"penalty for stroke length\")\n\n# Training settings.\nparser.add_argument(\"--disable_checkpoint\", action=\"store_true\",\n help=\"Disable saving checkpoint.\")\nparser.add_argument(\"--savedir\", default=\"~/logs/torchbeast\",\n help=\"Root dir where experiment data will be saved.\")\nparser.add_argument(\"--num_actors\", default=4, type=int, metavar=\"N\",\n help=\"Number of actors.\")\nparser.add_argument(\"--total_steps\", default=100000, type=int, metavar=\"T\",\n help=\"Total environment steps to train for.\")\nparser.add_argument(\"--batch_size\", default=64, type=int, metavar=\"B\",\n help=\"Learner batch size.\")\nparser.add_argument(\"--num_learner_threads\", default=2, type=int,\n metavar=\"N\", help=\"Number learner threads.\")\nparser.add_argument(\"--num_inference_threads\", default=2, type=int,\n metavar=\"N\", help=\"Number learner threads.\")\nparser.add_argument(\"--disable_cuda\", action=\"store_true\",\n help=\"Disable CUDA.\")\nparser.add_argument(\"--max_learner_queue_size\", default=None, type=int, metavar=\"N\",\n help=\"Optional maximum learner queue size. Defaults to batch_size.\")\nparser.add_argument(\"--unroll_length\", default=20, type=int, metavar=\"T\",\n help=\"The unroll length (time dimension).\")\nparser.add_argument(\"--condition\", action=\"store_true\",\n help='condition flag')\nparser.add_argument(\"--use_tca\", action=\"store_true\",\n help=\"temporal credit assignment flag\")\nparser.add_argument(\"--power_iters\", default=20, type=int,\n help=\"Spectral normalization power iterations\")\nparser.add_argument(\"--dataset\", default=\"celeba-hq\",\n help=\"Dataset name. MNIST, Omniglot, CelebA, CelebA-HQ is supported\")\n\n# Loss settings.\nparser.add_argument(\"--entropy_cost\", default=0.01, type=float,\n help=\"Entropy cost/multiplier.\")\nparser.add_argument(\"--baseline_cost\", default=0.5, type=float,\n help=\"Baseline cost/multiplier.\")\nparser.add_argument(\"--discounting\", default=0.99, type=float,\n help=\"Discounting factor.\")\n\n# Optimizer settings.\nparser.add_argument(\"--policy_learning_rate\", default=0.0003, type=float,\n metavar=\"LRP\", help=\"Policy learning rate.\")\nparser.add_argument(\"--discriminator_learning_rate\", default=0.0001, type=float,\n metavar=\"LRD\", help=\"Discriminator learning rate.\")\nparser.add_argument(\"--grad_norm_clipping\", default=40.0, type=float,\n help=\"Global gradient norm clip.\")\n\n# Misc settings.\nparser.add_argument(\"--write_profiler_trace\", action=\"store_true\",\n help=\"Collect and write a profiler trace \"\n \"for chrome://tracing/.\")\n\n# yapf: enable\n\n\nlogging.basicConfig(\n format=(\n \"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] \" \"%(message)s\"\n ),\n level=0,\n)\n\npil_logger = logging.getLogger(\"PIL\")\npil_logger.setLevel(logging.INFO)\n\nframe_width = 64\ngrid_width = 32\n\n\ndef compute_baseline_loss(advantages):\n return 0.5 * torch.sum(advantages ** 2)\n\n\ndef compute_entropy_loss(logits):\n \"\"\"Return the entropy loss, i.e., the negative entropy of the policy.\"\"\"\n entropy = 0\n for logit in logits:\n policy = F.softmax(logit, dim=-1)\n log_policy = F.log_softmax(logit, dim=-1)\n entropy += torch.sum(policy * log_policy)\n return entropy\n\n\ndef compute_policy_gradient_loss(logits, actions, advantages):\n cross_entropy = 0\n for logit, action in zip(logits, actions):\n cross_entropy += F.nll_loss(\n F.log_softmax(torch.flatten(logit, 0, 1), dim=-1),\n target=torch.flatten(action.long(), 0, 1).squeeze(dim=-1),\n reduction=\"none\",\n )\n cross_entropy = cross_entropy.view_as(advantages)\n return torch.sum(cross_entropy * advantages.detach())\n\n\ndef inference(flags, inference_batcher, model, image_queue, lock=threading.Lock()):\n with torch.no_grad():\n for batch in inference_batcher:\n batched_env_outputs, action, agent_state, image = batch.get_inputs()\n action = action.to(flags.actor_device, non_blocking=True)\n\n frame, _, done, *_ = batched_env_outputs\n frame = frame.to(flags.actor_device, non_blocking=True)\n done = done.to(flags.actor_device, non_blocking=True)\n\n if done.any().item():\n image_list = []\n for i in range(done.shape[1]):\n image_list.append(image_queue.get())\n image = torch.stack(image_list, dim=1)\n\n if flags.condition:\n image = image.to(flags.actor_device)\n condition = image\n else:\n condition = None\n\n agent_state = nest.map(\n lambda t: t.to(flags.actor_device, non_blocking=True), agent_state,\n )\n\n with lock:\n T, B, *_ = frame.shape\n noise = torch.randn(T, B, 10).to(flags.actor_device, non_blocking=True)\n model = model.eval()\n outputs = model(\n dict(\n obs=frame,\n condition=condition,\n action=action,\n noise=noise,\n done=done,\n ),\n agent_state,\n )\n\n outputs = nest.map(lambda t: t.cpu(), outputs)\n core_output, core_state = outputs\n\n batch.set_outputs((core_output, core_state, noise, image.cpu()))\n\n\nEnvOutput = collections.namedtuple(\n \"EnvOutput\", \"frame, reward, done, episode_step episode_return\"\n)\nAgentOutput = collections.namedtuple(\"AgentOutput\", \"action policy_logits baseline\")\nBatch = collections.namedtuple(\"Batch\", \"env agent\")\n\n\ndef learn(\n flags,\n learner_queue,\n d_queue,\n model,\n actor_model,\n D,\n optimizer,\n scheduler,\n stats,\n plogger,\n lock=threading.Lock(),\n):\n for tensors in learner_queue:\n tensors = nest.map(\n lambda t: t.to(flags.learner_device, non_blocking=True), tensors\n )\n\n batch, agent_state, image = tensors\n\n env_outputs, actor_outputs, noise = batch\n batch = (env_outputs, actor_outputs)\n frame, reward, done, *_ = env_outputs\n\n d_queue.put((frame, image.squeeze(0)))\n\n lock.acquire() # Only one thread learning at a time.\n optimizer.zero_grad()\n\n actor_outputs = AgentOutput._make(actor_outputs)\n\n if flags.condition:\n condition = image\n else:\n condition = None\n\n model = model.train()\n learner_outputs, agent_state = model(\n dict(\n obs=frame,\n condition=condition,\n action=actor_outputs.action,\n noise=noise,\n done=done,\n ),\n agent_state,\n )\n\n if flags.use_tca:\n frame = torch.flatten(frame, 0, 1)\n if flags.condition:\n condition = torch.flatten(condition, 0, 1)\n else:\n frame = frame[-1]\n if flags.condition:\n condition = condition[-1]\n\n D = D.eval()\n with torch.no_grad():\n if flags.condition:\n p = D(frame, condition).view(-1, flags.batch_size)\n else:\n p = D(frame).view(-1, flags.batch_size)\n\n if flags.use_tca:\n d_reward = p[1:] - p[:-1]\n reward = reward[1:] + d_reward\n else:\n reward[-1] = reward[-1] + p\n reward = reward[1:]\n\n # empty condition\n condition = None\n\n # Take final value function slice for bootstrapping.\n learner_outputs = AgentOutput._make(learner_outputs)\n bootstrap_value = learner_outputs.baseline[-1]\n\n # Move from obs[t] -> action[t] to action[t] -> obs[t].\n batch = nest.map(lambda t: t[1:], batch)\n learner_outputs = nest.map(lambda t: t[:-1], learner_outputs)\n\n # Turn into namedtuples again.\n env_outputs, actor_outputs = batch\n\n env_outputs = EnvOutput._make(env_outputs)\n actor_outputs = AgentOutput._make(actor_outputs)\n learner_outputs = AgentOutput._make(learner_outputs)\n\n discounts = (~env_outputs.done).float() * flags.discounting\n\n action = actor_outputs.action.unbind(dim=2)\n\n vtrace_returns = vtrace.from_logits(\n behavior_policy_logits=actor_outputs.policy_logits,\n target_policy_logits=learner_outputs.policy_logits,\n actions=action,\n discounts=discounts,\n rewards=reward,\n values=learner_outputs.baseline,\n bootstrap_value=bootstrap_value,\n )\n\n pg_loss = compute_policy_gradient_loss(\n learner_outputs.policy_logits, action, vtrace_returns.pg_advantages,\n )\n baseline_loss = flags.baseline_cost * compute_baseline_loss(\n vtrace_returns.vs - learner_outputs.baseline\n )\n entropy_loss = flags.entropy_cost * compute_entropy_loss(\n learner_outputs.policy_logits\n )\n\n total_loss = pg_loss + baseline_loss + entropy_loss\n\n total_loss.backward()\n\n nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)\n\n optimizer.step()\n scheduler.step()\n\n actor_model.load_state_dict(model.state_dict())\n\n stats[\"step\"] = stats.get(\"step\", 0) + flags.unroll_length * flags.batch_size\n stats[\"total_loss\"] = total_loss.item()\n stats[\"pg_loss\"] = pg_loss.item()\n stats[\"baseline_loss\"] = baseline_loss.item()\n stats[\"entropy_loss\"] = entropy_loss.item()\n stats[\"final_reward\"] = reward[-1].mean().item()\n stats[\"episode_reward\"] = reward.mean(dim=1).sum().item()\n stats[\"learner_queue_size\"] = learner_queue.size()\n\n if flags.condition:\n if flags.use_tca:\n _, C, H, W = frame.shape\n frame = frame.view(flags.unroll_length, flags.batch_size, C, H, W)\n frame = frame[-1]\n stats[\"l2_loss\"] = F.mse_loss(frame, image.squeeze(0)).item()\n\n plogger.log(stats)\n lock.release()\n\n\nreal_label = 1.0\nfake_label = 0.0\n\n\ndef learn_D(\n flags,\n queue,\n D,\n D_eval,\n optimizer,\n scheduler,\n stats,\n plogger,\n lock=threading.Lock(),\n):\n while True:\n fake, real = nest.map(\n lambda t: t.to(flags.learner_device, non_blocking=True), queue.get()\n )\n\n if flags.condition:\n condition = real\n else:\n condition = None\n\n lock.acquire()\n optimizer.zero_grad()\n\n D = D.train()\n if flags.condition:\n p_real = D(real, condition).view(-1)\n else:\n p_real = D(real).view(-1)\n\n label = torch.full((flags.batch_size,), real_label, device=flags.learner_device)\n real_loss = F.binary_cross_entropy_with_logits(p_real, label)\n\n real_loss.backward()\n D_x = torch.sigmoid(p_real).mean()\n\n nn.utils.clip_grad_norm_(D.parameters(), flags.grad_norm_clipping)\n\n if flags.condition:\n T, *_ = fake.shape\n condition = condition.repeat(T, 1, 1, 1)\n\n fake = torch.flatten(fake, 0, 1)\n\n D = D.train()\n if flags.condition:\n p_fake = D(fake, condition).view(-1)\n else:\n p_fake = D(fake).view(-1)\n\n label.fill_(fake_label)\n fake_loss = F.binary_cross_entropy_with_logits(\n p_fake, label.repeat(flags.unroll_length + 1)\n )\n\n fake_loss.backward()\n D_G_z1 = torch.sigmoid(p_fake).mean()\n\n loss = real_loss + fake_loss\n\n nn.utils.clip_grad_norm_(D.parameters(), flags.grad_norm_clipping)\n\n optimizer.step()\n scheduler.step()\n\n D_eval.load_state_dict(D.state_dict())\n\n stats[\"D_loss\"] = loss.item()\n stats[\"fake_loss\"] = fake_loss.item()\n stats[\"real_loss\"] = real_loss.item()\n stats[\"D_x\"] = D_x.item()\n stats[\"D_G_z1\"] = D_G_z1.item()\n\n lock.release()\n\n\ndef data_loader(\n flags, dataloader, image_queue,\n):\n while True:\n for tensors in dataloader:\n if len(tensors) == 1:\n image = tensors\n elif len(tensors) <= 2:\n image = tensors[0]\n image_queue.put(image)\n\n\nBRUSHES_BASEDIR = os.path.join(os.getcwd(), \"third_party/mypaint-brushes-1.3.0\")\nBRUSHES_BASEDIR = os.path.abspath(BRUSHES_BASEDIR)\n\nSHADERS_BASEDIR = os.path.join(os.getcwd(), \"third_party/paint/shaders\")\nSHADERS_BASEDIR = os.path.abspath(SHADERS_BASEDIR)\n\n\ndef train(flags):\n if flags.xpid is None:\n flags.xpid = \"torchbeast-%s\" % time.strftime(\"%Y%m%d-%H%M%S\")\n plogger = file_writer.FileWriter(\n xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir\n )\n checkpointpath = os.path.expandvars(\n os.path.expanduser(\"%s/%s/%s\" % (flags.savedir, flags.xpid, \"model.tar\"))\n )\n\n if not flags.disable_cuda and torch.cuda.is_available():\n logging.info(\"Using CUDA.\")\n flags.learner_device = torch.device(\"cuda\")\n flags.actor_device = torch.device(\"cuda\")\n else:\n logging.info(\"Not using CUDA.\")\n flags.learner_device = torch.device(\"cpu\")\n flags.actor_device = torch.device(\"cpu\")\n\n if flags.max_learner_queue_size is None:\n flags.max_learner_queue_size = flags.batch_size\n\n # The queue the learner threads will get their data from.\n # Setting `minimum_batch_size == maximum_batch_size`\n # makes the batch size static.\n learner_queue = actorpool.BatchingQueue(\n batch_dim=1,\n minimum_batch_size=flags.batch_size,\n maximum_batch_size=flags.batch_size,\n check_inputs=True,\n maximum_queue_size=flags.max_learner_queue_size,\n )\n\n d_queue = Queue(maxsize=flags.max_learner_queue_size // flags.batch_size)\n image_queue = Queue(maxsize=flags.max_learner_queue_size)\n\n # The \"batcher\", a queue for the inference call. Will yield\n # \"batch\" objects with `get_inputs` and `set_outputs` methods.\n # The batch size of the tensors will be dynamic.\n inference_batcher = actorpool.DynamicBatcher(\n batch_dim=1,\n minimum_batch_size=1,\n maximum_batch_size=512,\n timeout_ms=100,\n check_outputs=True,\n )\n\n addresses = []\n connections_per_server = 1\n pipe_id = 0\n while len(addresses) < flags.num_actors:\n for _ in range(connections_per_server):\n addresses.append(f\"{flags.pipes_basename}.{pipe_id}\")\n if len(addresses) == flags.num_actors:\n break\n pipe_id += 1\n\n config = dict(\n episode_length=flags.episode_length,\n canvas_width=flags.canvas_width,\n grid_width=grid_width,\n brush_sizes=flags.brush_sizes,\n )\n\n if flags.dataset == \"celeba\" or flags.dataset == \"celeba-hq\":\n use_color = True\n else:\n use_color = False\n\n if flags.env_type == \"fluid\":\n env_name = \"Fluid\"\n config[\"shaders_basedir\"] = SHADERS_BASEDIR\n elif flags.env_type == \"libmypaint\":\n env_name = \"Libmypaint\"\n config.update(\n dict(\n brush_type=flags.brush_type,\n use_color=use_color,\n use_pressure=flags.use_pressure,\n use_alpha=False,\n background=\"white\",\n brushes_basedir=BRUSHES_BASEDIR,\n )\n )\n\n if flags.use_compound:\n env_name += \"-v1\"\n else:\n env_name += \"-v0\"\n\n env = env_wrapper.make_raw(env_name, config)\n if frame_width != flags.canvas_width:\n env = env_wrapper.WarpFrame(env, height=frame_width, width=frame_width)\n env = env_wrapper.wrap_pytorch(env)\n\n obs_shape = env.observation_space.shape\n if flags.condition:\n c, h, w = obs_shape\n c *= 2\n obs_shape = (c, h, w)\n\n action_shape = env.action_space.nvec.tolist()\n order = env.order\n env.close()\n\n model = models.Net(\n obs_shape=obs_shape,\n action_shape=action_shape,\n grid_shape=(grid_width, grid_width),\n order=order,\n )\n if flags.condition:\n model = models.Condition(model)\n model = model.to(device=flags.learner_device)\n\n actor_model = models.Net(\n obs_shape=obs_shape,\n action_shape=action_shape,\n grid_shape=(grid_width, grid_width),\n order=order,\n )\n if flags.condition:\n actor_model = models.Condition(actor_model)\n actor_model.to(device=flags.actor_device)\n\n D = models.Discriminator(obs_shape, flags.power_iters)\n if flags.condition:\n D = models.Conditional(D)\n D.to(device=flags.learner_device)\n\n D_eval = models.Discriminator(obs_shape, flags.power_iters)\n if flags.condition:\n D_eval = models.Conditional(D_eval)\n D_eval = D_eval.to(device=flags.learner_device)\n\n optimizer = optim.Adam(model.parameters(), lr=flags.policy_learning_rate)\n D_optimizer = optim.Adam(\n D.parameters(), lr=flags.discriminator_learning_rate, betas=(0.5, 0.999)\n )\n\n def lr_lambda(epoch):\n return (\n 1\n - min(epoch * flags.unroll_length * flags.batch_size, flags.total_steps)\n / flags.total_steps\n )\n\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)\n D_scheduler = torch.optim.lr_scheduler.LambdaLR(D_optimizer, lr_lambda)\n\n C, H, W = obs_shape\n if flags.condition:\n C //= 2\n # The ActorPool that will run `flags.num_actors` many loops.\n actors = actorpool.ActorPool(\n unroll_length=flags.unroll_length,\n learner_queue=learner_queue,\n inference_batcher=inference_batcher,\n env_server_addresses=addresses,\n initial_action=actor_model.initial_action(),\n initial_agent_state=actor_model.initial_state(),\n image=torch.zeros(1, 1, C, H, W),\n )\n\n def run():\n try:\n actors.run()\n print(\"actors are running\")\n except Exception as e:\n logging.error(\"Exception in actorpool thread!\")\n traceback.print_exc()\n print()\n raise e\n\n actorpool_thread = threading.Thread(target=run, name=\"actorpool-thread\")\n\n c, h, w = obs_shape\n tsfm = transforms.Compose([transforms.Resize((h, w)), transforms.ToTensor()])\n\n dataset = flags.dataset\n\n if dataset == \"mnist\":\n dataset = MNIST(root=\"./\", train=True, transform=tsfm, download=True)\n elif dataset == \"omniglot\":\n dataset = Omniglot(root=\"./\", background=True, transform=tsfm, download=True)\n elif dataset == \"celeba\":\n dataset = CelebA(\n root=\"./\", split=\"train\", target_type=None, transform=tsfm, download=True\n )\n elif dataset == \"celeba-hq\":\n dataset = datasets.CelebAHQ(\n root=\"./\", split=\"train\", transform=tsfm, download=True\n )\n else:\n raise NotImplementedError\n\n dataloader = DataLoader(\n dataset, batch_size=1, shuffle=True, drop_last=True, pin_memory=True\n )\n\n stats = {}\n\n # Load state from a checkpoint, if possible.\n if os.path.exists(checkpointpath):\n checkpoint_states = torch.load(\n checkpointpath, map_location=flags.learner_device\n )\n model.load_state_dict(checkpoint_states[\"model_state_dict\"])\n D.load_state_dict(checkpoint_states[\"D_state_dict\"])\n optimizer.load_state_dict(checkpoint_states[\"optimizer_state_dict\"])\n D_optimizer.load_state_dict(checkpoint_states[\"D_optimizer_state_dict\"])\n scheduler.load_state_dict(checkpoint_states[\"D_scheduler_state_dict\"])\n D_scheduler.load_state_dict(checkpoint_states[\"scheduler_state_dict\"])\n stats = checkpoint_states[\"stats\"]\n logging.info(f\"Resuming preempted job, current stats:\\n{stats}\")\n\n # Initialize actor model like learner model.\n actor_model.load_state_dict(model.state_dict())\n D_eval.load_state_dict(D.state_dict())\n\n learner_threads = [\n threading.Thread(\n target=learn,\n name=\"learner-thread-%i\" % i,\n args=(\n flags,\n learner_queue,\n d_queue,\n model,\n actor_model,\n D_eval,\n optimizer,\n scheduler,\n stats,\n plogger,\n ),\n )\n for i in range(flags.num_learner_threads)\n ]\n inference_threads = [\n threading.Thread(\n target=inference,\n name=\"inference-thread-%i\" % i,\n args=(flags, inference_batcher, actor_model, image_queue,),\n )\n for i in range(flags.num_inference_threads)\n ]\n\n d_learner = [\n threading.Thread(\n target=learn_D,\n name=\"d_learner-thread-%i\" % i,\n args=(flags, d_queue, D, D_eval, D_optimizer, D_scheduler, stats, plogger,),\n )\n for i in range(flags.num_learner_threads)\n ]\n for thread in d_learner:\n thread.daemon = True\n\n dataloader_thread = threading.Thread(\n target=data_loader, args=(flags, dataloader, image_queue,)\n )\n dataloader_thread.daemon = True\n\n actorpool_thread.start()\n\n threads = learner_threads + inference_threads\n daemons = d_learner + [dataloader_thread]\n\n for t in threads + daemons:\n t.start()\n\n def checkpoint():\n if flags.disable_checkpoint:\n return\n logging.info(\"Saving checkpoint to %s\", checkpointpath)\n torch.save(\n {\n \"model_state_dict\": model.state_dict(),\n \"D_state_dict\": D.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"D_optimizer_state_dict\": D_optimizer.state_dict(),\n \"scheduler_state_dict\": scheduler.state_dict(),\n \"D_scheduler_state_dict\": D_scheduler.state_dict(),\n \"stats\": stats,\n \"flags\": vars(flags),\n },\n checkpointpath,\n )\n\n def format_value(x):\n return f\"{x:1.5}\" if isinstance(x, float) else str(x)\n\n try:\n last_checkpoint_time = timeit.default_timer()\n while True:\n start_time = timeit.default_timer()\n start_step = stats.get(\"step\", 0)\n if start_step >= flags.total_steps:\n break\n time.sleep(5)\n end_step = stats.get(\"step\", 0)\n\n if timeit.default_timer() - last_checkpoint_time > 10 * 60:\n # Save every 10 min.\n checkpoint()\n last_checkpoint_time = timeit.default_timer()\n\n logging.info(\n \"Step %i @ %.1f SPS. Inference batcher size: %i.\"\n \" Learner queue size: %i.\"\n \" Other stats: (%s)\",\n end_step,\n (end_step - start_step) / (timeit.default_timer() - start_time),\n inference_batcher.size(),\n learner_queue.size(),\n \", \".join(\n f\"{key} = {format_value(value)}\" for key, value in stats.items()\n ),\n )\n except KeyboardInterrupt:\n pass # Close properly.\n else:\n logging.info(\"Learning finished after %i steps.\", stats[\"step\"])\n checkpoint()\n\n # Done with learning. Stop all the ongoing work.\n inference_batcher.close()\n learner_queue.close()\n\n actorpool_thread.join()\n\n for t in threads:\n t.join()\n\n\ndef test(flags):\n if flags.xpid is None:\n checkpointpath = \"./latest/model.tar\"\n else:\n checkpointpath = os.path.expandvars(\n os.path.expanduser(\"%s/%s/%s\" % (flags.savedir, flags.xpid, \"model.tar\"))\n )\n\n config = dict(\n episode_length=flags.episode_length,\n canvas_width=flags.canvas_width,\n grid_width=grid_width,\n brush_sizes=flags.brush_sizes,\n )\n\n if flags.dataset == \"celeba\" or flags.dataset == \"celeba-hq\":\n use_color = True\n else:\n use_color = False\n\n if flags.env_type == \"fluid\":\n env_name = \"Fluid\"\n config[\"shaders_basedir\"] = SHADERS_BASEDIR\n elif flags.env_type == \"libmypaint\":\n env_name = \"Libmypaint\"\n config.update(\n dict(\n brush_type=flags.brush_type,\n use_color=use_color,\n use_pressure=flags.use_pressure,\n use_alpha=False,\n background=\"white\",\n brushes_basedir=BRUSHES_BASEDIR,\n )\n )\n\n if flags.use_compound:\n env_name += \"-v1\"\n config.update(\n dict(\n new_stroke_penalty=flags.new_stroke_penalty,\n stroke_length_penalty=flags.stroke_length_penalty,\n )\n )\n else:\n env_name += \"-v0\"\n\n env = env_wrapper.make_raw(env_name, config)\n if frame_width != flags.canvas_width:\n env = env_wrapper.WarpFrame(env, height=frame_width, width=frame_width)\n env = env_wrapper.wrap_pytorch(env)\n env = env_wrapper.AddDim(env)\n\n obs_shape = env.observation_space.shape\n if flags.condition:\n c, h, w = obs_shape\n c *= 2\n obs_shape = (c, h, w)\n\n action_shape = env.action_space.nvec.tolist()\n order = env.order\n\n model = models.Net(\n obs_shape=obs_shape,\n action_shape=action_shape,\n grid_shape=(grid_width, grid_width),\n order=order,\n )\n if flags.condition:\n model = models.Condition(model)\n model.eval()\n\n D = models.Discriminator(obs_shape, flags.power_iters)\n if flags.condition:\n D = models.Conditional(D)\n D.eval()\n\n checkpoint = torch.load(checkpointpath, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n D.load_state_dict(checkpoint[\"D_state_dict\"])\n\n if flags.condition:\n from random import randrange\n\n c, h, w = obs_shape\n tsfm = transforms.Compose([transforms.Resize((h, w)), transforms.ToTensor()])\n dataset = flags.dataset\n\n if dataset == \"mnist\":\n dataset = MNIST(root=\"./\", train=True, transform=tsfm, download=True)\n elif dataset == \"omniglot\":\n dataset = Omniglot(\n root=\"./\", background=True, transform=tsfm, download=True\n )\n elif dataset == \"celeba\":\n dataset = CelebA(\n root=\"./\",\n split=\"train\",\n target_type=None,\n transform=tsfm,\n download=True,\n )\n elif dataset == \"celeba-hq\":\n dataset = datasets.CelebAHQ(\n root=\"./\", split=\"train\", transform=tsfm, download=True\n )\n else:\n raise NotImplementedError\n\n condition = dataset[randrange(len(dataset))].view((1, 1) + obs_shape)\n else:\n condition = None\n\n frame = env.reset()\n action = model.initial_action()\n agent_state = model.initial_state()\n done = torch.tensor(False).view(1, 1)\n rewards = []\n frames = [frame]\n\n for i in range(flags.episode_length - 1):\n if flags.mode == \"test_render\":\n env.render()\n noise = torch.randn(1, 1, 10)\n agent_outputs, agent_state = model(\n dict(\n obs=frame, condition=condition, action=action, noise=noise, done=done,\n ),\n agent_state,\n )\n action, *_ = agent_outputs\n frame, reward, done, _ = env.step(action)\n\n rewards.append(reward)\n frames.append(frame)\n\n reward = torch.cat(rewards)\n frame = torch.cat(frames)\n\n if flags.use_tca:\n frame = torch.flatten(frame, 0, 1)\n if flags.condition:\n condition = torch.flatten(condition, 0, 1)\n else:\n frame = frame[-1]\n if flags.condition:\n condition = condition[-1]\n\n D = D.eval()\n with torch.no_grad():\n if flags.condition:\n p = D(frame, condition).view(-1, 1)\n else:\n p = D(frame).view(-1, 1)\n\n if flags.use_tca:\n d_reward = p[1:] - p[:-1]\n reward = reward[1:] + d_reward\n else:\n reward[-1] = reward[-1] + p\n reward = reward[1:]\n\n # empty condition\n condition = None\n\n logging.info(\n \"Episode ended after %d steps. Final reward: %.4f. Episode reward: %.4f,\",\n flags.episode_length,\n reward[-1].item(),\n reward.sum(),\n )\n env.close()\n\n\ndef main(flags):\n if not flags.pipes_basename.startswith(\"unix:\"):\n raise Exception(\"--pipes_basename has to be of the form unix:/some/path.\")\n\n if flags.mode != \"train\":\n flags.start_servers = False\n\n if flags.start_servers:\n\n if flags.env_type == \"fluid\":\n env_name = \"Fluid\"\n elif flags.env_type == \"libmypaint\":\n env_name = \"Libmypaint\"\n\n if flags.use_compound:\n env_name += \"-v1\"\n else:\n env_name += \"-v0\"\n\n command = [\n \"python\",\n \"-m\",\n \"torchbeast.polybeast_env\",\n f\"--num_servers={flags.num_actors}\",\n f\"--pipes_basename={flags.pipes_basename}\",\n f\"--env={env_name}\",\n f\"--env_type={flags.env_type}\",\n f\"--episode_length={flags.episode_length}\",\n f\"--canvas_width={flags.canvas_width}\",\n f\"--brush_sizes={flags.brush_sizes}\",\n f\"--new_stroke_penalty={flags.new_stroke_penalty}\",\n f\"--stroke_length_penalty={flags.stroke_length_penalty}\",\n ]\n\n if flags.env_type == \"fluid\":\n assert flags.dataset != \"omniglot\" and flags.dataset != \"mnist\"\n\n command.extend(\n [f\"--env={env_name}\", f\"--shaders_basedir={SHADERS_BASEDIR}\"]\n )\n elif flags.env_type == \"libmypaint\":\n if flags.dataset == \"celeba\" or flags.dataset == \"celeba-hq\":\n command.append(\"--use_color\")\n\n command.extend(\n [\n f\"--env={env_name}\",\n f\"--brush_type={flags.brush_type}\",\n f\"--background=white\",\n f\"--brushes_basedir={BRUSHES_BASEDIR}\",\n ]\n )\n\n if flags.use_pressure:\n command.append(\"--use_pressure\")\n\n logging.info(\"Starting servers with command: \" + \" \".join(command))\n server_proc = subprocess.Popen(command)\n\n if flags.mode == \"train\":\n if flags.write_profiler_trace:\n logging.info(\"Running with profiler.\")\n with torch.autograd.profiler.profile() as prof:\n train(flags)\n filename = \"chrome-%s.trace\" % time.strftime(\"%Y%m%d-%H%M%S\")\n logging.info(\"Writing profiler trace to '%s.gz'\", filename)\n prof.export_chrome_trace(filename)\n os.system(\"gzip %s\" % filename)\n else:\n train(flags)\n else:\n test(flags)\n\n if flags.start_servers:\n # Send Ctrl-c to servers.\n server_proc.send_signal(signal.SIGINT)\n\n\nif __name__ == \"__main__\":\n torch.backends.cudnn.benchmark = True\n\n flags = parser.parse_args()\n main(flags)\n","sub_path":"torchbeast/polybeast.py","file_name":"polybeast.py","file_ext":"py","file_size_in_byte":34935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"481601335","text":"from setuptools import setup, find_packages\r\nimport subprocess\r\nimport os\r\nfrom pathlib import Path\r\n\r\ndir_ = Path(__file__).parent\r\n\r\ndef subprocess_cmd(*commands, decode=True):\r\n cmdstr = ''\r\n for c in commands:\r\n cmdstr += (c + ' && ')\r\n cmdstr = cmdstr.strip(' &&')\r\n process = subprocess.Popen(cmdstr, stdout=subprocess.PIPE, shell=True)\r\n ret = process.communicate()[0]\r\n if (decode):\r\n ret = ret.decode('utf-8')\r\n return ret\r\n\r\nsubprocess_cmd('pip install -r {}'.format(dir_ / \"requirements.txt\"))\r\n\r\nsetup(\r\n name='noaahistory',\r\n description='dev package',\r\n author='rlyon14',\r\n author_email='rlyon14@yahoo.com',\r\n version='0.1.1',\r\n packages=['noaahistory',],\r\n url=\"https://github.com/rlyon14/noaahistory\",\r\n install_requires=(\r\n ),\r\n entry_points='''\r\n [console_scripts]\r\n weather=noaahistory.cli:cli\r\n ''',\r\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"279062424","text":"import requests\nimport re\nB = '\\033[1;3;94m' #blue\nW = '\\033[1;97m' # white\nheaders = {\n 'Host' : 'bing.com',\n 'User-Agent' : 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'Accept': 'text/html,application/html+xml,q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Connection': 'keep-alive',\n }\n\n\nwpdorks = {\n 'wysija' : 'inurl\":/wp-admin/admin-post.php?page=wysija_campaigns\"',\n 'blaze' : 'inurl:\"/wp-content/plugins/blaze-slide-show-for-wordpress/\"',\n 'catpro' : 'inurl:\"/wp-content/plugins/wp-catpro/\"',\n 'cherry' : 'inurl:\"/wp-content/plugins/cherry-plugin/\"',\n 'dm' : 'inurl:\"/wp-content/plugins/downloads-manager/\"',\n 'fromcraft' : 'inurl:\"/wp-content/plugins/formcraft/file-upload/\"',\n 'jobmanager' : 'inurl:\"/jm-ajax/upload_file/\"',\n 'showbiz' : 'inurl:\"/wp-admin/admin-ajax.php\"',\n 'synoptic' : 'inurl:\"/wp-content/themes/synoptic/lib/avatarupload\"',\n 'shop' : 'inurl:\"/wp-content/plugins/wpshop/includes/\"',\n 'injection' : 'inurl:\"/index.php/wp-json/wp/\"',\n 'powerzoomer' : 'inurl:\"/wp-admin/admin.php?page=powerzoomer_manage\"',\n 'revslider' : 'inurl \"/wp-content/plugins/revslider/\"',\n 'adsmanager' : 'inurl:\"/wp-content/plugins/simple-ads-manager/\"',\n 'inboundiomarketing': 'inurl:\"/wp-content/plugins/inboundio-marketing/\"',\n}\njoomla = {\n 'comjce' : 'inurl\":index.php?option=com_jce\"'\n}\n\ndef getdorksbyname(xname):\n if xname in wpdorks:\n return wpdorks[xname]\n\ndef searchengine(xname):\n webs = []\n bingquery = 'https://www.google.com/search?q=' + getdorksbyname(xname)\n res = requests.get(bingquery,headers).text\n dorks = re.findall(re.compile(r'\\w+/wp-content'),res)\n for plug in dorks:\n if plug not in webs:\n webs.append(plug)\n print ('%s [*] Plugins : %s %s' %(B,\" \\n [*] Plugins : \".join(webs),W))\n\n\nsearchengine('adsmanager')","sub_path":"common/vx_dorks.py","file_name":"vx_dorks.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"70881633","text":"import pgzrun # 导入游戏库\n\nWIDTH = 600 # 设置窗口的宽度\nHEIGHT = 800 # 设置窗口的高度\nplayerSpeed = 5 # 玩家水平移动速度\n\nalien = Actor('alien') # 导入玩家图片\nalien.x = WIDTH/2 # 设置玩家的x坐标\nalien.y = HEIGHT/5 # 设置玩家的y坐标\n\nbricks = [] # 存储所有砖块的列表,开始为空\nfor i in range(5):\n brick = Actor('brick') # 导入砖块图片\n brick.pos = 100*(i+1), 150*(i+1) # 设置砖块的(x,y)坐标\n bricks.append(brick) # 把当前砖块加入列表中\n\ndef draw(): # 绘制模块,每帧重复执行\n screen.clear() # 清空游戏画面\n alien.draw() # 绘制玩家\n for brick in bricks: # 绘制列表中每个砖块\n brick.draw() # 绘制砖块\n\ndef update(): # 更新模块,每帧重复操作\n isPlayerOnGround = False # 开始假设角色没有站在砖块上\n for brick in bricks: # 对列表中所有砖块遍历\n # 玩家正好站在砖块上面,在方块左右之间,可以左右移动\n if abs(alien.bottom-brick.top) < 5 \\\n and brick.left - alien.left < alien.width*2/3 \\\n and alien.right - brick.right < alien.width*2/3:\n \n isPlayerOnGround = True # 玩家在一块砖上\n alien.bottom = brick.top # 玩家跟着砖块一直向上移动\n\n if keyboard.left: # 如果按下键盘左键\n alien.x = alien.x - playerSpeed # 玩家左移\n if keyboard.right: # 如果按下键盘右键\n alien.x = alien.x + playerSpeed # 玩家右移\n \n if not isPlayerOnGround:\n alien.y += 5 # 玩家不在任何一块砖上,就下落\n\n for birck in bricks: # 所有砖块缓慢上移\n birck.y -= 1\n\n\npgzrun.go() # 开始执行游戏\n","sub_path":"PygameZero/python游戏趣味编程代码/第8章/8-5-1.py","file_name":"8-5-1.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"37948614","text":"# 自定义标签\nimport time\nfrom django import template\nimport platform\n\nregister=template.Library() # 实例化\n\n#实现功能:自定义一个标签,获取系统当前时间\n@register.simple_tag(name='current_time')\ndef get_current_time():\n timestr=time.strftime('%Y-%m-%d %H:%M:%S')\n return timestr\n#实现功能:自定义一个标签,获取当前操作系统\n@register.simple_tag(name='get_system')\ndef get_system():\n return platform.system()\n\n\n","sub_path":"MyDjango/business/templatetags/mytags.py","file_name":"mytags.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"276255874","text":"\"\"\"\nThis is the same function as in 18.py\nThe problem is the same, except 67 uses a larger triangle\n\"\"\"\n\ndef solve_18(file_name):\n\tf = open(file_name)\n\ttriangle = []\n\tfor line in f:\n\t\ttriangle.append([int(s) for s in line.split(' ')])\n\tf.close()\n\n\n\tbest_paths = []\n\n\tfor row in triangle:\n\t\tif not best_paths:\n\t\t\tbest_paths.append([(row[0], None)])\n\t\telse:\n\t\t\tbest_paths.append([])\n\t\t\tfor i,value in enumerate(row):\n\t\t\t\tn = len(row)\n\t\t\t\tif i == 0:\n\t\t\t\t\tprev = best_paths[-2][0][0]\n\t\t\t\t\tbest_paths[-1].append((value+prev, 0))\n\t\t\t\telif i == n-1:\n\t\t\t\t\tprev = best_paths[-2][i-1][0]\n\t\t\t\t\tbest_paths[-1].append((value+prev,i-1))\n\t\t\t\telse:\n\t\t\t\t\tpath1 = best_paths[-2][i-1][0] + value\n\t\t\t\t\tpath2 = best_paths[-2][i][0] + value\n\t\t\t\t\tif path1 > path2:\n\t\t\t\t\t\tbest_paths[-1].append((path1, i-1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tbest_paths[-1].append((path2, i))\n\n\tmax_val = float('-Inf')\n\tidx = 0\n\tfor i, tup in enumerate(best_paths[-1]):\n\t\tif tup[0] > max_val:\n\t\t\tmax_val = tup[0]\n\t\t\tprev = i\n\n\tpath = [prev]\n\ti = -1\n\twhile prev is not None:\n\t\tprev = best_paths[i][prev][1]\n\t\ti -= 1\n\t\tpath.append(prev)\n\tpath.pop()\n\tpath.reverse()\n\n\treturn max_val, path\n\n\n\n\n\n\n\n","sub_path":"euler/67/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361112054","text":"from math import *\r\nimport random\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n\r\ndef f(x):\r\n try:\r\n return 1/x**3\r\n except:\r\n return inf\r\n\r\ndef integr(a,b):\r\n sum = 0\r\n N = 8000\r\n h = (b-a) / N\r\n for i in range (N):\r\n try:\r\n sum += h*(f(a+h*(i+1/2))+f(a+h*(i-1/2)))/2\r\n except:\r\n sum+= inf\r\n return sum\r\n\r\nprint(integr(-1, 1))","sub_path":"Task_4_13.py","file_name":"Task_4_13.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408395882","text":"from testmodule import *\nimport time\n\nclass TestWrites(TestRunner):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\tdef mthd(self):\n\n\t\timport pysharkbite\n\n\t\ttableOperations = super().getTableOperations()\n\n\t\tif not tableOperations.exists(False):\n\t\t print (\"Creating table\")\n\t\t if not tableOperations.create(False):\n\t\t \tprint(\"Could not create table\")\n\t\telse:\n\t\t\ttableOperations.remove()\n\t\t\ttime.sleep(2)\n\t\t\ttableOperations.create(False)\n\t\t\tprint (\"Table already exists, so not creating it\")\n\n\t\ttableOperations = super().newtableOperations()\n\t\tsplits = {\"row\",\"row5\",\"row9\", \"sow\"}\n\t\ttableOperations.addSplits(splits)\n\n\t\tauths = pysharkbite.Authorizations()\n\n\t\t\"\"\" Add authorizations \"\"\"\n\t\t\"\"\" mutation.put(\"cf\",\"cq\",\"cv\",1569786960) \"\"\"\n\n\t\twriter = tableOperations.createWriter(auths, 10)\n\n\t\trng = range(0,500)\n\t\tfor i in rng:\n\t\t\trow = (\"row%i\" % (i+5))\n\t\t\tmutation = pysharkbite.Mutation(row);\n\t\t\trng2 = range(0,100)\n\t\t\tfor j in rng:\n\t\t\t\tmutation.put(\"cf%i\" % j ,\"cq\",\"\",1569786960, \"value\")\n\t\t\twriter.addMutation(mutation)\n\t\t\tprint(\"wrote %i entries\" % ((i+1)*(j+1)))\n\n\t\twriter.close()\n\n\t\ttableOperations.compact(\"a\",\"s\",True)\n\n\t\tprint(\"written\")\n\n\t\tscanner = tableOperations.createScanner(auths, 2)\n\n\t\ttime.sleep(1)\n\n\t\taccumuloRange = pysharkbite.Range(\"a\",True,\"sow\",False)\n\n\t\tscanner.addRange( accumuloRange )\n\n\t\tresultset = scanner.getResultSet()\n\t\tcount=0\n\t\tfor keyvalue in resultset:\n\t\t\tkey = keyvalue.getKey()\n\t\t\tcount=count+1\n\t\tprint(\"count is \", count)\n\n\t\t\"\"\" delete your table if user did not create temp \"\"\"\n\n#\t\ttableOperations.remove()\n\n\nrunner = TestWrites()\nrunner.mthd()\n","sub_path":"test/performance/QuarterMillionWrites.py","file_name":"QuarterMillionWrites.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36508709","text":"import read\n\ndef remove_subdomains(domain):\n domain_to_string = str(domain)\n domain_ = domain_to_string.split('.')\n #if __name__ == '__main__': print(domain_)\n if len(domain_) > 2:\n return domain_[-2] + '.' + domain_[-1]\n\ndf = read.load_data()\ndf['url'] = df['url'].apply(remove_subdomains)\ndomains = df['url'].value_counts()\n\nif __name__ == '__main__':\n for name,row in domains.items():\n print(\"{0}:{1}\".format(name,row))","sub_path":"Step_3_The_Command_Line/Guided_Project-_Transforming_data_with_Python/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475580429","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\nfrom geopy.geocoders import Here as Geolocator\nfrom geopy.exc import GeocoderTimedOut, GeocoderQuotaExceeded\nimport win_unicode_console\nimport math\nimport time\nimport os\n#I need import this because I have s problem with encoding in my notebook\nwin_unicode_console.enable()\n#I need import this because I have s problem with encoding in my notebook\n\npage_count = 91\ndate_column = []\nplace_column = []\nname_column = []\nlatitude_column = []\nlongitude_column = []\napp_id = os.environ[\"API_KEY_MAPS\"]\napp_code = os.environ[\"API_CODE_MAPS\"]\ngeolocator = Geolocator(app_id=app_id, app_code=app_code)\n\ndef do_geocode(place, attempts=0):\n if attempts > 3:\n print('Too many failed attempts.')\n return None\n elif attempts == 2:\n print('Good night!')\n time.sleep(3*60)\n return do_geocode(place, attempts + 1)\n\n try:\n return geolocator.geocode(place)\n #we need to handle connection problems to the server.\n except GeocoderTimedOut:\n print('GeocoderTimedOut!!!!')\n time.sleep(2)\n #recursion is a new attempt, it calls the the function itself\n return do_geocode(place, attempts + 1)\n #we need to handle quota of maximum requests per some time period\n except GeocoderQuotaExceeded:\n print('Quota exceeded!!!')\n time.sleep(10)\n #recursion is a new attempt, it calls the function itself\n return do_geocode(place, attempts + 1)\n\n\nfor i in range(0, math.ceil(page_count/10) + 1):\n first_page = i * 10\n last_page = first_page + 10\n for p in range(first_page,last_page):\n page = str(p)\n print('scrapuju stranku '+ page)\n my_url = 'https://ceskybeh.cz/terminovka/?region=0&dfrom=01.%2001.%202019&dto=31.%2003.%202021&rlength=0&rtype=0&advanced=1&search=&page=' + page\n uClient = uReq(my_url)\n page_html = uClient.read()\n uClient.close()\n page_soup = soup(page_html, \"html.parser\")\n\n\n # This function find list with 35 items, which have selector \"class = row\".\n # Unfortunately I am not able to find more relevant selector in the web page to find a specific container with get_races.\n containers = page_soup.findAll(\"div\", { \"class\" : \"row\"})\n #print(len(containers))\n # I found by trial-error method that container with relevant content is the 13th one.\n # So I need to select container with index 12.\n # print(container)\n container = containers[12]\n # This function will find a list of items, which contains two kinds of span with \"class = text-muted iframe-hidden\".\n # First is for date of the race, the second for the place of the race.\n date_place = container.findAll(\"span\", {\"class\" : \"text-muted iframe-hidden\"})\n # I decided to take a place of the race from another (for me more suitable part) of the webpage, so right now\n # I need only each second item from the list (starting from the first item).\n # I decided delete every second item from the list (starting from the second item).\n del date_place[1::2]\n dates = date_place\n for d in dates:\n d = dates[0]\n date_with_days = d.text\n sliced_date = date_with_days.split(' ')\n day_with_one_digit = int(sliced_date[1].strip('.'))\n if day_with_one_digit < 10:\n day = '0' + str(day_with_one_digit)\n else:\n day = str(day_with_one_digit)\n #Because of our database I need to change names of months to numbers,\n #starting with 0, if the number of the month is smaller than 10. At first I made it with a condition,\n #but it is a quite long and same code. So I tried to make it through dictionary.\n #Keys of dictionary are names of months in the form of the word the same as at the page.\n names_of_months = { 'ledna': '01', 'února': '02', 'března': '03', 'dubna': '04', 'května': '05', 'června':'06', 'července': '07', 'srpna': '08', 'září': '09', 'října': '10', 'listopadu': '11', 'prosince': '12' }\n name_of_month = sliced_date[2]\n if name_of_month in names_of_months:\n month = names_of_months[name_of_month]\n else:\n raise RuntimeError(\"U závodu není uvedeno datum v potřebném formátu.\")\n year = sliced_date[3].replace(',', ' ').strip()\n sliced_date[1] = year\n sliced_date[2] = month\n sliced_date[3] = day\n # '-'.join(only_date) - previous used, not so understadable?\n #only_date = sliced_date[1:4]\n # the name of the column in our database in PostgreSQL should be the same -> \"date_of_race\" instead of \"datum_zavodu\"\n date_of_race = year + '-' + month + '-' + day\n date_column.append(date_of_race)\n\n place = container.findAll(\"p\", {\"class\" : \"iframe-visible cb-iframe-place\"})\n for p in place:\n # the name of the column in our database in PostgreSQL should be the same -> \"place_of_race\" instead of \"misto_zavodu\"\n place_of_race = p.text.strip('()')\n location = do_geocode(place_of_race)\n if location is None:\n latitude_column.append(.0)\n longitude_column.append(.0)\n else:\n latitude_column.append(location.latitude)\n longitude_column.append(location.longitude)\n place_column.append(place_of_race)\n\n name = container.findAll(\"h4\", {\"class\": \"mt-0 mb-0\"})\n for n in name:\n #the name of the column in our database in PostgreSQL should be the same -> \"name_of_race\" instead of \"nazev_zavodu\"\n #some of the names have \"DOPORUČUJEME\" in the name co we need to delete this word\n name_of_race = n.text.strip()\n if name_of_race.startswith('DOPORUČUJEME'):\n name_of_race = name_of_race[12:]\n name_column.append(name_of_race)\n print('Finished ten page, going to sleep for 10 seconds.')\n time.sleep(10)\n\nfilename = \"C:/Users/fischerova/Desktop/bezcisobe/get_races.csv\"\nf = open(filename, \"w\", encoding='UTF-8')\nheaders=\"datum_zavodu,misto_zavodu,latitude,longitude,nazev\\n\"\nf.write(headers)\n\nall_races = zip(date_column, place_column, latitude_column, longitude_column, name_column)\n\nfor race in all_races:\n #f.write(race[0] + ';\\'' + race[1] + '\\';\\'' + race[2] + '\\'' + '\\n')\n #f.write(race[0] + \";'\" + race[1] + \"';'\" + str(race[2]).strip('') + '\\';\\'' + str(race[3]).strip('') + '\\';\\'' + race[4] + '\\'' + '\\n')\n f.write(\"{};'{}';{};{};'{}'\\n\".format(race[0], race[1], str(race[2]), str(race[3]), race[4]))\nf.close()\n","sub_path":"races.py","file_name":"races.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"356138801","text":"import subprocess\n\ndef exec_string(cmdString):\n stdout = \"\"\n err = False\n\n try:\n stdout, _ = subprocess.Popen(cmdString, stdout=subprocess.PIPE, shell=True).communicate()\n\n except subprocess.CalledProcessError as e:\n stdout = \"error: {0}\".format(e)\n err = True\n\n return (stdout, err)\n\ndef get_first(arr):\n return arr[0]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18903700","text":"# More Printing\nprint(\"Mary had a little lamb.\")\nprint(\"Its fleece was white as {}\".format('snow')) # What happens here\nprint(\"And everywhere that Mary went.\")\nprint('.'*10) # What happens here\n\n# Whats your favorite food. Then spell it out\n# C U R L Y F R I E S\nfood1 = 'C'\nfood2 = 'U'\nfood3 = 'R'\nfood4 = 'L'\nfood5 = 'Y'\nfood6 = 'F'\nfood7 = 'R'\nfood8 = 'I'\nfood9 = 'E'\nfood10 = 'S'\n# Using String concatenation print out your favorite food?\nprint(food1 + food2 + food3 + food4 + food5 + ' '+ food6 + food7 + food8 + food9 + food10)","sub_path":"More Printing.py","file_name":"More Printing.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335468669","text":"\"\"\"added date of birth to scholarship info\n\nRevision ID: 8eeb833af62b\nRevises: 98af32e2c860\nCreate Date: 2019-04-25 09:21:30.243550\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8eeb833af62b'\ndown_revision = '98af32e2c860'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('scholarship_info', sa.Column('dob', sa.Date(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('scholarship_info', 'dob')\n # ### end Alembic commands ###\n","sub_path":"web/code/tm/utils/keywordsdw/versions/8eeb833af62b_added_date_of_birth_to_scholarship_info.py","file_name":"8eeb833af62b_added_date_of_birth_to_scholarship_info.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"236152744","text":"import gc\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\nIMG_WIDTH, IMG_HEIGHT = 150, 150\nTRAIN_DATA_DIR = 'train'\nVALIDATION_DATA_DIR = 'validation'\nNB_TRAIN_SAMPLES = 20\nNB_VALIDATION_SAMPLES = 20\nEPOCHS = 50\nBATCH_SIZE = 5\n\ndef build_model():\n if K.image_data_format() == 'channels_first':\n input_shape = (3, IMG_WIDTH, IMG_HEIGHT)\n else:\n input_shape = (IMG_WIDTH, IMG_HEIGHT, 3)\n \n model = Sequential()\n model.add(Conv2D(32, (3,3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n model.add(Conv2D(32, (3,3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n model.add(Conv2D(64, (3,3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy']\n )\n \n\n return model\n\ndef train_model(model):\n # This adds extra versions of the photos to improve the training\n train_datagen = ImageDataGenerator(\n rescale = 1 / 255,\n shear_range = 0.2,\n )\n\n # This is the augmentation configuration we will use for testing:\n # only rescaling\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n TRAIN_DATA_DIR,\n target_size=(IMG_WIDTH, IMG_HEIGHT),\n batch_size=BATCH_SIZE,\n class_mode='binary'\n )\n\n validation_generator = train_datagen.flow_from_directory(\n TRAIN_DATA_DIR,\n target_size=(IMG_WIDTH, IMG_HEIGHT),\n batch_size=BATCH_SIZE,\n class_mode='binary'\n )\n\n model.fit_generator(\n train_generator,\n steps_per_epoch = NB_VALIDATION_SAMPLES // BATCH_SIZE,\n epochs = EPOCHS,\n validation_data = validation_generator,\n validation_steps = NB_VALIDATION_SAMPLES // BATCH_SIZE\n )\n\n return model\n\ndef save_model(model):\n model.save('saved_model.h5')\n\n\n\n\n\n\n\ndef main():\n myModel = None\n K.clear_session()\n gc.collect()\n myModel = build_model()\n myModel = train_model(myModel)\n save_model(myModel)\n\n\n\nmain()","sub_path":"trainTheBrain.py","file_name":"trainTheBrain.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"492131770","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sylvain.hareng1/Documents/Github/gitlab_stats/tests/test_cli.py\n# Compiled at: 2018-08-07 12:27:32\n# Size of source mod 2**32: 2346 bytes\nimport os, tests, unittest\nfrom gitlab_stats.cli import *\nfrom unittest.mock import patch\n\nclass CLITest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n @tests.api_call\n def test_100_parse_args(self):\n if tests.PROXY:\n testargs = [\n '', str(tests.PROJECT_ID), '-p', tests.PROXY]\n else:\n testargs = [\n '', str(tests.PROJECT_ID)]\n with patch.object(sys, 'argv', testargs):\n result = main()\n self.assertEqual(result, 0)\n\n @tests.api_call\n def test_101_print_report(self):\n if tests.PROXY:\n testargs = [\n '', str(tests.PROJECT_ID), '-p', tests.PROXY, '-r']\n else:\n testargs = [\n '', str(tests.PROJECT_ID), '-r']\n with patch.object(sys, 'argv', testargs):\n main()\n self.assertTrue(os.path.isfile('output.csv'))\n\n def test_102_run_main(self):\n result = os.system('python ' + tests.ROOT_PATH + '/gitlab_stats/cli.py -h')\n self.assertEqual(result, 0)\n\n def test_200_parser_id(self):\n parser = parse_args(['123'])\n self.assertEqual(parser.id, '123')\n self.assertFalse(parser.report)\n self.assertEqual(parser.url, ['https://gitlab.com'])\n self.assertEqual(parser.proxy, [''])\n\n def test_201_parser_id_with_url(self):\n parser = parse_args(['123', '-u', 'https://myurl.com'])\n self.assertEqual(parser.url, ['https://myurl.com'])\n\n def test_202_parser_id_with_report(self):\n parser = parse_args(['123', '-r'])\n self.assertTrue(parser.report)\n\n def test_203_parser_id_with_proxy(self):\n parser = parse_args(['123', '-p', 'https://myproxy.com'])\n self.assertEqual(parser.proxy, ['https://myproxy.com'])\n\n def test_210_parser_id_with_url_and_report(self):\n parser = parse_args(['123', '-r', '-u', 'https://myurl.com'])\n self.assertTrue(parser.report)\n self.assertEqual(parser.url, ['https://myurl.com'])\n\n def test_211_parser_id_with_url_and_report_and_proxy(self):\n parser = parse_args(['123', '-r', '-u', 'https://myurl.com', '-p', 'https://myproxy.com'])\n self.assertTrue(parser.report)\n self.assertEqual(parser.url, ['https://myurl.com'])\n self.assertEqual(parser.proxy, ['https://myproxy.com'])\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/gitlab_stats-1.2.1-py3-none-any/test_cli.cpython-36.py","file_name":"test_cli.cpython-36.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611742970","text":"import octoprint.plugin\r\nimport octoprint.events\r\nfrom octoprint.server import printer\r\n\r\nfrom flask import jsonify, make_response\r\nimport os.path\r\nimport os\r\nimport datetime\r\nimport calendar\r\nimport sqlite3\r\nfrom tinydb import TinyDB, Query, where\r\nimport pandas as pd\r\nimport shutil\r\n\r\n'''\r\n Should probably transition this back over to an SQL based solution.\r\n If the database migration fails (migrate_v1), then the database can never be read until the user MANUALLY\r\n edits the file to remove the second JSON that apears in the JSON. As TinyDB relies on JSON.loads() internally,\r\n as can be seen from the stack trace, this causes the plugin to bge unable to read the database or write to it.\r\n Game plan: \r\n First:\r\n Call migrate_v1 if needed. If it fails, undo the damage by looking for the following stirng: u'}{'. This \r\n is garaunteed to be part of the problem. Use the file reader class prpared in JSON_READER and create a file\r\n writer class that will read and write the files character by character. Essentially, we will keep track of the\r\n last character (u'}') and the current character (u'{'). If it matches the termination string, we will stop\r\n writing. All of this is to increase performance and decrease memory cost. \r\n Second:\r\n use tinydb to read the new (or old) json file, and attempt to convert it to a database file compatible\r\n with sqlite. Again, we need to do this because a) sql is easier to manage than a json 'database', and b)\r\n we are trying to avoid the bug described in the 'preamble' above.\r\n'''\r\n\r\nclass StatsDB:\r\n def __init__(self, plugin):\r\n original_db=plugin._settings.global_get_basefolder(\"logs\") + \"/octoprint_stats.db\"\r\n new_db = plugin.get_plugin_data_folder() + \"/octoprint_stats.db\"\r\n original_json=plugin._settings.global_get_basefolder(\"logs\") + \"/octoprint_stats.json\"\r\n new_json = plugin.get_plugin_data_folder() + \"/octoprint_stats.json\"\r\n old_backup = original_db + \".bak\"\r\n new_db_backup = new_db + \".bak\"\r\n\r\n # Move old file to the data folder instead of the Octoprint\r\n # Log folder, as was originally designed\r\n if os.path.exists(original_db):\r\n shutil.move(original_db, new_db)\r\n # /if os.path.exists(original_db)\r\n if os.path.exists(original_json):\r\n shutil.move(original_json, new_json)\r\n # /if os.path.exists(original_json)\r\n if (os.path.exists(old_backup)):\r\n shutil.move(old_backup, new_db_backup)\r\n # /if (os.path.exists(old_backup))\r\n\r\n self.DB_NAME_V1 = new_db\r\n self.DB_NAME = new_json\r\n\r\n if os.path.exists(self.DB_NAME_V1) == True:\r\n self.migrate_v1()\r\n # /if os.path.exists(self.DB_NAME_V1)\r\n # /def __init__(self, plugin)\r\n\r\n def migrate_v1(self):\r\n conn = sqlite3.connect(self.DB_NAME_V1)\r\n db = conn.cursor()\r\n\r\n dbnew = TinyDB(self.DB_NAME)\r\n events = dbnew.table('events')\r\n\r\n # Migrate connected\r\n sql = \"SELECT event_time, port, baudrate FROM connected\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n port = row[1]\r\n baudrate = row[2]\r\n\r\n eventData = {'event_type': 'CONNECTED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'port': port,\r\n 'baudrate': baudrate\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate disconnected\r\n sql = \"SELECT event_time FROM disconnected\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n\r\n eventData = {'event_type': 'DISCONNECTED',\r\n 'data': {'event_time': event_time.__str__()}}\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate upload\r\n sql = \"SELECT event_time, file, target FROM upload\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n target = row[2]\r\n\r\n eventData = {'event_type': 'UPLOAD',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'target': target\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate print_started\r\n sql = \"SELECT event_time, file, origin, bed_target, tool0_target, tool1_target FROM print_started\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n origin = row[2]\r\n bed_target = row[3]\r\n tool0_target = row[4]\r\n tool1_target = row[5]\r\n\r\n eventData = {'event_type': 'PRINT_STARTED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'target': target\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate print_done\r\n sql = \"SELECT event_time, file, ptime, origin, bed_actual, tool0_actual, tool1_actual, tool0_volume, tool1_volume, tool0_length, tool1_length FROM print_done\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n ptime = row[2]\r\n origin = row[3]\r\n bed_actual = row[4]\r\n tool0_actual = row[5]\r\n tool1_actual = row[6]\r\n tool0_volume = row[7]\r\n tool1_volume = row[8]\r\n tool0_length = row[9]\r\n tool1_length = row[10]\r\n\r\n eventData = {'event_type': 'PRINT_DONE',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'ptime': ptime,\r\n 'origin': origin,\r\n 'bed_actual': bed_actual,\r\n 'tool0_actual': tool0_actual,\r\n 'tool1_actual': tool1_actual,\r\n 'tool0_volume': tool0_volume,\r\n 'tool1_volume': tool1_volume,\r\n 'tool0_length': tool0_length,\r\n 'tool1_length': tool1_length\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n #/for row in rows\r\n\r\n # Migrate print_failed\r\n sql = \"SELECT event_time, file, origin FROM print_failed\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n origin = row[2]\r\n\r\n eventData = {'event_type': 'PRINT_FAILED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'origin': origin\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate print_cancelled\r\n sql = \"SELECT event_time, file, origin FROM print_cancelled\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n origin = row[2]\r\n\r\n eventData = {'event_type': 'PRINT_CANCELLED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'origin': origin\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate print_paused\r\n sql = \"SELECT event_time, file, origin FROM print_paused\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n origin = row[2]\r\n\r\n eventData = {'event_type': 'PRINT_PAUSED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'origin': origin\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate print_resumed\r\n sql = \"SELECT event_time, file, origin FROM print_resumed\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n file = row[1]\r\n origin = row[2]\r\n\r\n eventData = {'event_type': 'PRINT_RESUMED',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'file': file,\r\n 'origin': origin\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n # Migrate error\r\n sql = \"SELECT event_time, perror FROM error\"\r\n db.execute(sql)\r\n rows = db.fetchall()\r\n for row in rows:\r\n event_time = row[0]\r\n perror = row[1]\r\n\r\n eventData = {'event_type': 'ERROR',\r\n 'data': {\r\n 'event_time': event_time.__str__(),\r\n 'error': perror\r\n }\r\n }\r\n\r\n events.insert(eventData)\r\n # /for row in rows\r\n\r\n os.rename(self.DB_NAME_V1, self.DB_NAME_V1 + \".bak\")\r\n # /def migrate_v1(self)\r\n\r\n def execute(self, data, document):\r\n # DB\r\n db = TinyDB(self.DB_NAME)\r\n doc = db.table(document)\r\n doc.insert(data)\r\n # /def execute(self, data, document)\r\n\r\n def query(self, search_data, document):\r\n # DB\r\n db = TinyDB(self.DB_NAME)\r\n doc = db.table(document)\r\n return doc.search(search_data)\r\n # /def query(self, search_data, document)\r\n\r\n def count(self, search_data, document):\r\n # DB\r\n db = TinyDB(self.DB_NAME)\r\n doc = db.table(document)\r\n return doc.count(search_data)\r\n # /def count(self, search_data, document)\r\n\r\n def getData(self, data):\r\n resData = []\r\n\r\n for row in data:\r\n resData.append(row[\"data\"])\r\n # /for row in data\r\n\r\n return resData\r\n # /def getData(self, data)\r\n# /class StatsDB","sub_path":"octoprint_stats/StatsDB.py","file_name":"StatsDB.py","file_ext":"py","file_size_in_byte":11084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536619452","text":"from django.urls import path\nfrom . import views\n\n\n\nurlpatterns = [\n path(\"\", views.project_index, name=\"project_index\"),\n path(\"/\", views.project_detail, name=\"project_detail\"),\n path(\"\"+\"categories//\", views.project_category, name=\"project_category\"),\n]\n","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"528408126","text":"#!/c/Python25 python\n#---------------------------------------------------------------------------------------\n\"\"\"\n BioDIM - Biologically scaled Dispersal Model\n Version 1.05b.1 (com pastas)\n \n Milton C. Ribeiro - mcr@rc.unesp.br\n Bernardo B. S. Niebuhr - bernardo_brandaum@yahoo.com.br\n John W. Ribeiro - jw.ribeiro.rc@gmail.com\n \n Laboratorio de Ecologia Espacial e Conservacao\n Universidade Estadual Paulista - UNESP\n Rio Claro - SP - Brasil\n \n BioDim description...\n\"\"\"\n#---------------------------------------------------------------------------------------\n\nimport grass.script as grass\nfrom PIL import Image\nimport wx\nimport random\nimport re\nimport time\nimport math\nimport os\n#from rpy2 import robjects\nfrom datetime import datetime\n\nID_ABOUT=101\nID_IBMCFG=102\nID_EXIT=110\n\n\n#---------------------------------------------------\n# Auxiliary modules\n#---------------------------------------------------\nfrom read_table import read_table\n#---------------------------------------------------\n# from head_split_up_line import head_split_up_line\n# this function is used in the read_landscape_head_ascii_standard module\n#---------------------------------------------------\nfrom distance_between_indiv import distance_between_indiv # leads with spatial resolution/distance\n#---------------------------------------------------\nfrom estimate_distedgePix import estimate_distedgePix # leads with spatial resolution/distance\n#---------------------------------------------------\nfrom estimate_effectivedistance import estimate_effectivedistance\n#---------------------------------------------------\n\n#---------------------------------------------------\n# Landscape modules\n#---------------------------------------------------\n#from select_landscape_grassnames import * \n#---------------------------------------------------\n#from export_raster_from_grass import * ### GRASS FUNCTIONS\n#---------------------------------------------------\n#from read_landscape_head_ascii_standard import read_landscape_head_ascii_standard\n# the three function above are used by pickup_one_landscape\n#---------------------------------------------------\nfrom pickup_one_landscape import pickup_one_landscape\n#---------------------------------------------------\nfrom color_pallete import color_pallete\n#---------------------------------------------------\nfrom getForest import * # leads with spatial resolution - PIXELS - nao precisa mexer\n#---------------------------------------------------\nfrom identify_patchid import identify_patchid # leads with spatial resolution - PIXELS\n#---------------------------------------------------\nfrom identify_habarea import identify_habarea # leads with spatial resolution - PIXELS\n#---------------------------------------------------\nfrom check_landscaperange import check_landscaperange # leads with spatial resolution - PIXELS and distance\n#---------------------------------------------------\n\n#---------------------------------------------------\n# Genetic modules\n#---------------------------------------------------\nfrom gene_exchange import gene_exchange \n#---------------------------------------------------\nfrom LOCI_start import LOCI_start\n#---------------------------------------------------\n\n#---------------------------------------------------\n# Population modules\n#---------------------------------------------------\nfrom estimate_start_popsize import estimate_start_popsize # leads with spatial resolution - PIXELS AND AREA\n#---------------------------------------------------\nfrom populate import populate # leads with spatial resolution - PIXELS\n#---------------------------------------------------\nfrom check_overpopulation_onpatch import check_overpopulation_onpatch # leads with spatial resolution - PIXELS AND AREA\n#---------------------------------------------------\nfrom reset_isdispersing import reset_isdispersing # leads with spatial resolution - PIXELS AND AREA\n#---------------------------------------------------\n\n#---------------------------------------------------\n# Mortality modules\n#---------------------------------------------------\nfrom mortality import get_safetyness_mortality, kill_individual_new # leads with spatial resolution - PIXELS AND DISTANCE\n#---------------------------------------------------\ndef estimate_movement_cost(actualcost, distfromedgePix, aux_xy):\n protecdness = get_safetyness_mortality(tab_in=Form1.tab_safetyness, spatialresolution=Form1.spatialresolution, species_profile=Form1.species_profile, distPix=distfromedgePix)\n \n aux=[aux_xy]\n aux, changed_quadrante = check_landscaperange(aux, Form1.landscape_matrix)\n YY=aux[0][0]\n XX=aux[0][1] # leads with spatial resolution - PIXELS\n row=int(YY)\n col=int(XX)\n \n if Form1.UserBaseMap:\n habqualyOnPosition=1.0\n else:\n habqualyOnPosition=Form1.landscape_hqmqlq_quality[row][col]\n\n if protecdness<0.05:\n protecdness=0.05\n if protecdness>1:\n protecdness=1.0\n if habqualyOnPosition<0.05:\n habqualyOnPosition=0.05\n if habqualyOnPosition>1:\n habqualyOnPosition=1.0\n \n cost=actualcost+1.0/(protecdness*habqualyOnPosition)\n \n return cost\n\n#---------------------------------------------------\n\n#---------------------------------------------------\n# Movement modules\n#---------------------------------------------------\nfrom disperse_random_walk import disperse_random_walk\n#---------------------------------------------------\nfrom choose_dispersaldirection import choose_dispersaldirection #### WHY IS THAT!?\n#---------------------------------------------------\n\ndef get_listofposition(modified_indiv_xy_startpos):\n n_positions=20\n listofpositions=[]\n for pos in range(n_positions):\n deltaX=random.uniform(-Form1.movement_dist_sigma_pixel, Form1.movement_dist_sigma_pixel)\n deltaY=random.uniform(-Form1.movement_dist_sigma_pixel, Form1.movement_dist_sigma_pixel)\n listofpositions.append([modified_indiv_xy_startpos[0]+deltaX, modified_indiv_xy_startpos[1]+deltaY])\n\n listofpositions, changed_quadrante_psicologic = check_landscaperange(listofpositions, Form1.landscape_matrix)\n return listofpositions\n\n#----------------------------------------------------------------------\ndef OnHabitat(listposition): # leads with spatial resolution - PIXELS AND DISTANCE\n aux=[]\n distfromedgePix=[]\n for i in range(len(listposition)):\n aux.append([listposition[i][0],listposition[i][1]])\n \n OnHabitatList=[] #X, Y\n OnHabitatEdgedistPixList=[] #DIST from edge\n \n for position in range(len(aux)):\n aux, changed_quadrante=check_landscaperange(aux, Form1.landscape_matrix)\n YY=aux[position][0]\n XX=aux[position][1] \n row=int(YY)\n col=int(XX)\n distfromedgePix.append(Form1.landscape_habdist[row][col])\n\n\n ##'Random walk','Core dependent','Frag. dependent', 'Habitat dependent', 'Moderately generalist'\n \n if distfromedgePix[position]*Form1.spatialresolution<=0 and Form1.species_profile==\"Habitat dependent\":\n # <=0 above means the the full patch is considered.. \n # corridor (<60m) IS INCLUDED as \"habitat patch\"\n OnHabitatList.append([YY,XX])\n OnHabitatEdgedistPixList.append(Form1.landscape_habdist[row][col])\n \n if distfromedgePix[position]*Form1.spatialresolution<(-30) and Form1.species_profile==\"Frag. dependent\":\n \n # < (-30) means 30 meters from edge\n # so only the fragment is considered.. corridor (<60m)\n # is NOT INCLUDED as \"habitat patch\"\n OnHabitatList.append([YY,XX])\n OnHabitatEdgedistPixList.append(Form1.landscape_habdist[row][col])\n\n if distfromedgePix[position]*Form1.spatialresolution<(-30) and Form1.species_profile==\"Core dependent\":\n ### I COPYED THIS PART FROM ABOVE ON FEV2010 - CHECK CHECK CHECK m\n ##### check -30 !!!!\n # < (-30) means 30 meters from edge\n # so only the fragment is considered.. corridor (<60m)\n # is NOT INCLUDED as \"habitat patch\"\n OnHabitatList.append([YY,XX])\n OnHabitatEdgedistPixList.append(Form1.landscape_habdist[row][col])\n \n if distfromedgePix[position]*Form1.spatialresolution<=(+30) and Form1.species_profile==\"Moderately generalist\":\n # <=+30 above means the the full patch is considered.. \n # corridor (<60m) IS INCLUDED as \"habitat patch\"\n # Positions <30 of ANY HABITAT PATCH is considered within habitat patch\"\n \n OnHabitatList.append([YY,XX])\n OnHabitatEdgedistPixList.append(Form1.landscape_habdist[row][col])\n \n if distfromedgePix[position]*Form1.spatialresolution<=(+60) and Form1.species_profile==\"Highly generalist\":\n # <=+60 above means the the full patch is considered.. \n # corridor (<60m) IS INCLUDED as \"habitat patch\"\n # Positions <60 of ANY HABITAT PATCH is considered within habitat patch\"\n \n OnHabitatList.append([YY,XX])\n OnHabitatEdgedistPixList.append(Form1.landscape_habdist[row][col])\n\n return OnHabitatList, OnHabitatEdgedistPixList\n\n#----------------------------------------------------------------------\ndef disperse_habitat_dependent(indiv_xy, indiv_isdispersing,indiv_totaldistance,indiv_dispdirectionX,indiv_dispdirectionY):\n modified_indiv_xy=[]\n for i in range(len(indiv_xy)):\n modified_indiv_xy.append([indiv_xy[i][0],indiv_xy[i][1]])\n \n for indiv in range(len(modified_indiv_xy)):\n x1=modified_indiv_xy[indiv][0]\n y1=modified_indiv_xy[indiv][1]\n if indiv_isdispersing[indiv]==0:\n modified_indiv_xy_listposition=get_listofposition(modified_indiv_xy_startpos=modified_indiv_xy[indiv])\n modified_indiv_xy_listposition, distfromedgePix=OnHabitat(modified_indiv_xy_listposition)\n \n if len(modified_indiv_xy_listposition)>0:\n PROB_go_core_region=0\n\n if Form1.species_profile==\"Highly generalist\":\n PROB_go_core_region=0.05 \n if Form1.species_profile==\"Moderately generalist\":\n PROB_go_core_region=0.05\n if Form1.species_profile==\"Habitat dependent\":\n PROB_go_core_region=0.1\n if Form1.species_profile==\"Frag. dependent\":\n PROB_go_core_region=0.3 \n if Form1.species_profile==\"Core dependent\":\n PROB_go_core_region=0.7\n \n if distfromedgePix[0]*Form1.spatialresolution< (-90):\n #when position in relation to edge is > (-) 80 m \n #then the individual can move freely\n #Based on Hansbauer et al 2008 - for C.caudata species\n PROB_go_core_region=0.05\n \n if random.uniform(0,1)500 or dist<0: #CHECK - I need to check when the distance is computed wrongly\n #print \"error on Total Distance estimation\\n\"\n dist=0\n indiv_totaldistance[indiv]+=dist\n ### faz sentido corrigir a distancia aqui?!?!\n \n modified_indiv_xy, changed_quadrant = check_landscaperange(modified_indiv_xy, Form1.landscape_matrix)\n return modified_indiv_xy, indiv_totaldistance, changed_quadrant\n\n\n#---------------------------------------------------\n# Output models\n#---------------------------------------------------\nfrom create_synthesis import create_synthesis\n#---------------------------------------------------\ndef organize_output(moment, grassname_habmat, isdispersing, isfemale, islive, totaldistance, effectivedistance, experiment_info, actualrun, actual_step, actual_movementcost, timestep_waslive, number_of_meetings, LOCI_start, LOCI_end, initpos, xy):\n \"\"\"\n Is there a problem (output name) if nruns > 9999? # Is \"myzeros\"really used??\n # !!!!!!!!!! a virgula esta correta????\n # da pra verificar se habqual ja existe, acho... se foi definida ali em cima, nao precisa recalcular\n \n This function writes the output - what piece of information is going to be saved,\n how files are going to look like etc.\n Input (Information to be saved):\n - moment:\n - grassname_habmat:\n - isdispersing:\n - isfemale:\n - islive:\n - totaldistance:\n - effectivedistance:\n - experiment_info:\n - actualrun:\n - actual_step:\n - actual_movementcost:\n - timestep_waslive:\n - number_of_meetings:\n - LOCI_start:\n - LOCI_end:\n - initpos: list of pairs (x,y) indicating the position of each animal in the beginning of a simulation\n - xy: list of pairs (x,y) indicating the position of each animal in the current step\n Output (files):\n - output_prefix_indiv_step.txt: summary of individual information during the simulation\n - output_prefix_landscape_step.txt: summary of landscape information during the simulation\n - output_prefix_indiv.txt: summary of individual information at the end of the simulation\n - output_prefix_landscape.txt: summary of landscape information at the end of the simulation\n \"\"\" \n\n if actualrun<9:\n myzeros=\"000\"\n elif actualrun<99:\n myzeros=\"00\"\n elif actualrun<999: \n myzeros=\"0\"\n else:\n myzeros=\"\"\n\n# for indiv in range(len(timestep_waslive)):\n# if timestep_waslive[indiv]==0:\n# timestep_waslive[indiv]=actual_step+1\n\n if moment==\"ongoingstep\":\n if Form1.output_store_ongoingsteps_indiv==1:\n output_filename_indiv=Form1.output_prefix+\"_indiv_step\"+\".txt\"\n file_output_indiv=open(output_filename_indiv,\"a\")\n if actual_step==0 and actualrun==0:\n file_output_indiv.write('experiment_info;actualrun;nruns;grassname_habmat;PLAND;CONFIG;HABQUAL;species_profile;include_quality;start_popsize;actual_step;timesteps;indiv;homerangesize;movdistpix;dispfactor;x0;y0;x;y;isdispersing;isfemale;islive;totaldistance;effectivedistance;actual_movementcost;timestep_waslivem;number_of_meetings;LOCI_start;LOCI_end\\n')\n for indiv in range(len(totaldistance)):\n file_output_indiv.write('%s;' % experiment_info)\n file_output_indiv.write('%s;' % str(actualrun+1))\n file_output_indiv.write('%s;' % Form1.numberruns)\n file_output_indiv.write('%s;' % grassname_habmat)\n file_output_indiv.write('%s;' % Form1.landscape_grassname_habmat[19:22])\n file_output_indiv.write('%s;' % Form1.landscape_grassname_habmat[24:27])\n \n HABQUAL=0\n for row in range(len(Form1.landscape_hqmqlq_quality)):\n for col in range(len(Form1.landscape_hqmqlq_quality[0])):\n HABQUAL+=Form1.landscape_hqmqlq_quality[row][col]\n HABQUAL=float(HABQUAL)/100.0\n HABQUAL=HABQUAL/(len(Form1.landscape_matrix)*len(Form1.landscape_matrix[0]))\n HABQUAL=round(HABQUAL*100,0)\n \n file_output_indiv.write('%s;' % str(HABQUAL))\n file_output_indiv.write('%s;' % Form1.species_profile)\n file_output_indiv.write('%s;' % Form1.include_habitatquality)\n file_output_indiv.write('%s;' % str(Form1.start_popsize))\n file_output_indiv.write('%s;' % str(actual_step+1))\n file_output_indiv.write('%s;' % str(Form1.timesteps))\n file_output_indiv.write('%s;' % str(indiv+1))\n file_output_indiv.write('%s;' % str(Form1.homerangesize))\n file_output_indiv.write('%s;' % str(Form1.movement_dist_sigma_pixel))\n file_output_indiv.write('%s;' % str(Form1.when_dispersing_distance_factor))\n file_output_indiv.write('%s;' % str(initpos[indiv][0]))\n file_output_indiv.write('%s;' % str(initpos[indiv][1]))\n file_output_indiv.write('%s;' % str(xy[indiv][0]))\n file_output_indiv.write('%s;' % str(xy[indiv][1])) \n file_output_indiv.write('%s;' % str(isdispersing[indiv]))\n file_output_indiv.write('%s;' % str(isfemale[indiv]))\n file_output_indiv.write('%s;' % str(islive[indiv])) \n file_output_indiv.write('%s;' % str(totaldistance[indiv]))\n file_output_indiv.write('%s;' % str(effectivedistance[indiv]))\n file_output_indiv.write('%s;' % str(actual_movementcost[indiv]))\n file_output_indiv.write('%s;' % str(timestep_waslive[indiv]))\n file_output_indiv.write('%s;' % str(number_of_meetings[indiv]))\n file_output_indiv.write('%s;' % str(LOCI_start[indiv]))\n file_output_indiv.write('%s' % str(LOCI_end[indiv]))\n file_output_indiv.write('\\n')\n file_output_indiv.close()\n \n if Form1.output_store_ongoingsteps_landscape==1:\n output_filename_landscape=Form1.output_prefix+\"_landscape_step\"+\".txt\"\n file_output_landscape=open(output_filename_landscape,\"a\")\n if actual_step==0 and actualrun==0:\n file_output_landscape.write('experiment_info;actualrun;nruns;grassname_habmat;PLAND;CONFIG;HABQUAL;species_profile;include_quality;start_popsize;actual_step;timesteps;homerangesize;movdistpix;dispfactor;isdispersing;isfemale;islive;totaldistance;effectivedistance;actual_movementcost;timestep_waslive;number_of_meetings\\n')\n file_output_landscape.write('%s;' % experiment_info)\n file_output_landscape.write('%s;' % str(actualrun+1))\n file_output_landscape.write('%s;' % Form1.numberruns)\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat)\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat[19:22]) ######### nessas linhas pode dar problema com os novos mapas\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat[24:27])\n \n HABQUAL=0\n for row in range(len(Form1.landscape_hqmqlq_quality)):\n for col in range(len(Form1.landscape_hqmqlq_quality[0])):\n HABQUAL+=Form1.landscape_hqmqlq_quality[row][col]\n HABQUAL=float(HABQUAL)/100.0\n HABQUAL=HABQUAL/(len(Form1.landscape_matrix)*len(Form1.landscape_matrix[0]))\n HABQUAL=round(HABQUAL*100,0)\n \n file_output_landscape.write('%s;' % str(HABQUAL))\n file_output_landscape.write('%s;' % Form1.species_profile)\n file_output_landscape.write('%s;' % Form1.include_habitatquality)\n file_output_landscape.write('%s;' % str(Form1.start_popsize))\n file_output_landscape.write('%s;' % str(actual_step+1)) \n file_output_landscape.write('%s;' % str(Form1.timesteps))\n file_output_landscape.write('%s;' % str(Form1.homerangesize))\n file_output_landscape.write('%s;' % str(Form1.movement_dist_sigma_pixel))\n file_output_landscape.write('%s;' % str(Form1.when_dispersing_distance_factor))\n file_output_landscape.write('%s;' % str(sum(isdispersing)))\n file_output_landscape.write('%s;' % str(sum(isfemale)))\n file_output_landscape.write('%s;' % str(sum(islive)))\n file_output_landscape.write('%s;' % str( round(float(sum(totaldistance)),2) ))\n file_output_landscape.write('%s;' % str( round(float(sum(effectivedistance)),2) ))\n file_output_landscape.write('%s;' % str( max(actual_movementcost) ))\n file_output_landscape.write('%s;' % str(sum(timestep_waslive)))\n file_output_landscape.write('%s' % str(sum(number_of_meetings)))\n file_output_landscape.write('\\n')\n file_output_landscape.close()\n\n \n \n \n if moment==\"summary_of_a_run\":\n if Form1.output_store_summary_indiv==1:\n output_filename_indiv=Form1.output_prefix+\"_indiv.txt\"\n file_output_indiv=open(output_filename_indiv,\"a\")\n for indiv in range(len(totaldistance)):\n if indiv==0:\n file_output_indiv.write('experiment_info;actualrun;nruns;grassname_habmat;PLAND;CONFIG;HABQUAL;species_profile;include_quality;start_popsize;timesteps;indiv;homerangesize;movdistpix;dispfactor;isdispersing;isfemale;islive;totaldistance;effectivedistance;actual_movementcost;timestep_waslive;number_of_meetings;LOCI_start;LOCI_end\\n')\n file_output_indiv.write('%s;' % experiment_info)\n file_output_indiv.write('%s;' % str(actualrun+1))\n file_output_indiv.write('%s;' % Form1.numberruns)\n file_output_indiv.write('%s;' % grassname_habmat)\n file_output_indiv.write('%s;' % Form1.landscape_grassname_habmat[19:22])\n file_output_indiv.write('%s;' % Form1.landscape_grassname_habmat[24:27])\n\n HABQUAL=0\n for row in range(len(Form1.landscape_hqmqlq_quality)):\n for col in range(len(Form1.landscape_hqmqlq_quality[0])):\n HABQUAL+=Form1.landscape_hqmqlq_quality[row][col]\n HABQUAL=float(HABQUAL)/100.0\n HABQUAL=HABQUAL/(len(Form1.landscape_matrix)*len(Form1.landscape_matrix[0]))\n HABQUAL=round(HABQUAL*100,0)\n \n file_output_indiv.write('%s;' % str(HABQUAL)) \n file_output_indiv.write('%s;' % Form1.species_profile)\n file_output_indiv.write('%s;' % Form1.include_habitatquality)\n file_output_indiv.write('%s;' % str(Form1.start_popsize))\n file_output_indiv.write('%s;' % str(Form1.timesteps))\n file_output_indiv.write('%s;' % str(indiv+1))\n file_output_indiv.write('%s;' % str(Form1.homerangesize))\n file_output_indiv.write('%s;' % str(Form1.movement_dist_sigma_pixel))\n file_output_indiv.write('%s;' % str(Form1.when_dispersing_distance_factor))\n file_output_indiv.write('%s;' % str(isdispersing[indiv]))\n file_output_indiv.write('%s;' % str(isfemale[indiv]))\n file_output_indiv.write('%s;' % str(islive[indiv]))\n file_output_indiv.write('%s;' % str(totaldistance[indiv]))\n file_output_indiv.write('%s;' % str(effectivedistance[indiv]))\n file_output_indiv.write('%s;' % str(actual_movementcost[indiv]))\n file_output_indiv.write('%s;' % str(timestep_waslive[indiv]))\n file_output_indiv.write('%s;' % str(number_of_meetings[indiv]))\n file_output_indiv.write('%s;' % str(LOCI_start[indiv]))\n file_output_indiv.write('%s' % str(LOCI_end[indiv]))\n file_output_indiv.write('\\n')\n file_output_indiv.close()\n \n create_synthesis(output_filename_indiv)\n \n if Form1.output_store_summary_landscape==1:\n output_filename_landscape=Form1.output_prefix+\"_landscape.txt\"\n file_output_landscape=open(output_filename_landscape,\"a\")\n if actualrun==0:\n file_output_landscape.write('experiment_info;actualrun;nruns;grassname_habmat;PLAND;CONFIG;HABQUAL;species_profile;include_quality;start_popsize;timesteps;homerangesize;movdistpix;dispfactor;isdispersing;isfemale;islive;totaldistance;effectivedistance;actual_movementcost;timestep_waslive;number_of_meetings\\n')\n file_output_landscape.write('%s;' % experiment_info)\n file_output_landscape.write('%s;' % str(actualrun+1))\n file_output_landscape.write('%s;' % Form1.numberruns)\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat)\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat[19:22])\n file_output_landscape.write('%s;' % Form1.landscape_grassname_habmat[24:27])\n \n HABQUAL=0\n for row in range(len(Form1.landscape_hqmqlq_quality)):\n for col in range(len(Form1.landscape_hqmqlq_quality[0])):\n HABQUAL+=Form1.landscape_hqmqlq_quality[row][col]\n HABQUAL=float(HABQUAL)/100.0\n HABQUAL=HABQUAL/(len(Form1.landscape_matrix)*len(Form1.landscape_matrix[0]))\n HABQUAL=round(HABQUAL*100,0)\n \n file_output_landscape.write('%s;' % str(HABQUAL))\n file_output_landscape.write('%s;' % Form1.species_profile)\n file_output_landscape.write('%s;' % Form1.include_habitatquality)\n file_output_landscape.write('%s;' % str(Form1.start_popsize))\n file_output_landscape.write('%s;' % str(Form1.timesteps))\n file_output_landscape.write('%s;' % str(Form1.homerangesize))\n file_output_landscape.write('%s;' % str(Form1.movement_dist_sigma_pixel))\n file_output_landscape.write('%s;' % str(Form1.when_dispersing_distance_factor)) \n file_output_landscape.write('%s;' % str(sum(isdispersing)))\n file_output_landscape.write('%s;' % str(sum(isfemale)))\n file_output_landscape.write('%s;' % str(sum(islive)))\n file_output_landscape.write('%s;' % str( round(float(sum(totaldistance)),2) ))\n file_output_landscape.write('%s;' % str( round(float(sum(effectivedistance)),2) ))\n file_output_landscape.write('%s;' % str(max(actual_movementcost)))\n file_output_landscape.write('%s;' % str(sum(timestep_waslive)))\n file_output_landscape.write('%s' % str(sum(number_of_meetings)))\n file_output_landscape.write('\\n')\n file_output_landscape.close()\n \n \n\n\n#---------------------------------------\ndef plot_walk(landscape_matrix, indiv_xy, aux_isdispersing, aux_islive, nruns, aux_isdispersingRESET, timestep):\n '''....'''\n\n #random.seed(123) #to force every individual have the same color\n #on each movement\n \n # Checking if there is a dir moves\n cur_dir = os.getcwd()\n moves_dir = cur_dir+'/moves'\n if not os.path.exists(moves_dir):\n os.makedirs(moves_dir)\n else:\n # precisa limpar as imagens que tem la??\n pass\n \n landscape_matrix_temp=[]\n for row in range(len(landscape_matrix)):\n landscape_matrix_temp.append(landscape_matrix[row])\n\n for num_of_indiv in range(len(indiv_xy)):\n if aux_islive[num_of_indiv]==0:\n numbpix=Form1.indivpixels_isNOTlive\n elif aux_isdispersing[num_of_indiv]==1:\n numbpix=Form1.indivpixels_isdispersing\n else:\n numbpix=Form1.indivpixels_whenmoving\n \n for pixelsX in range(-(int(numbpix/2)),(int(numbpix/2)+1)):\n for pixelsY in range(-(int(numbpix/2)),(int(numbpix/2)+1)):\n xp=int(indiv_xy[num_of_indiv][0]+pixelsX)\n yp=int(indiv_xy[num_of_indiv][1]+pixelsY)\n if xp>(len(landscape_matrix)-1):\n xp=(len(landscape_matrix)-1)\n if xp<0:\n xp=0\n if yp>(len(landscape_matrix)-1):\n yp=(len(landscape_matrix)-1)\n if yp<0:\n yp=0\n num_of_indiv_color=num_of_indiv\n while num_of_indiv_color>255:\n num_of_indiv_color=num_of_indiv_color-230\n \n landscape_matrix_temp[xp][yp]=10+num_of_indiv_color\n #this 10 is just to shift starting color\n #landscape_matrix_temp[xp][yp]=random.sample(range(10,255),1 )\n \n im = Image.new('P', (len(landscape_matrix),len(landscape_matrix))) # 'P' for palettized\n data = sum(landscape_matrix_temp, []) # flatten data\n im.putdata(data)\n\n pal = color_pallete(userbase = Form1.UserBaseMap)\n\n im.putpalette(pal)\n\n #im.save(Form1.background_filename[0])\n \n if nruns<9:\n myzeros=\"000\"\n elif nruns<99:\n myzeros=\"00\"\n elif nruns<999: \n myzeros=\"0\"\n else:\n myzeros=\"\"\n \n saverun=Form1.output_prefix+\"_run_\"+myzeros+str(nruns+1)+\".png\"\n #im.save(saverun)\n\n if timestep<9:\n myzerosTS=\"000\"\n elif timestep<99:\n myzerosTS=\"00\"\n elif timestep<999: \n myzerosTS=\"0\"\n else:\n myzerosTS=\"\"\n saverunTS=\"moves/\"+Form1.output_prefix+\"_run_\"+myzeros+str(nruns+1)+\"_TS_\"+myzerosTS+str(timestep+1)+\".png\"\n im2=im.copy()\n im2.save(saverunTS)\n \n random.seed() #to release the random.seed()\n\nclass Form1(wx.Panel):\n \n def __init__(self, parent, id):\n wx.Panel.__init__(self, parent, id)\n \n Form1.biodim_version = 'BioDIM v. 1.05b.1'\n #------------------------------------------------\n Form1.UserBaseMap=False\n \n Form1.defaultDIR = os.getcwd()\n Form1.tempDir='../temp'\n Form1.inputDir='../input'\n Form1.outputDir='../output'\n Form1.pyDir='../py'\n Form1.auxDir='../auxfiles'\n \n #------------------------------------------------\n # Initializing parameters\n \n Form1.include_habitatquality=\"HabitatQuality_NO\"\n Form1.plotmovements=0\n Form1.include_probdeath=0\n Form1.exportPNG=False\n \n # Mortality parameters - in the future we will not need that anymore...\n os.chdir(Form1.defaultDIR)\n os.chdir(Form1.inputDir)\n \n Form1.tab_safetyness=read_table(\"_models_safetyness.txt\")\n Form1.tab_mortality=read_table(\"_models_mortality.txt\")\n \n # Output parameters\n Form1.output_store_ongoingsteps_indiv=1\n Form1.output_store_ongoingsteps_landscape=1\n Form1.output_store_summary_indiv=1\n Form1.output_store_summary_landscape=1\n \n self.speciesList = ['Random walk','Core dependent','Frag. dependent', 'Habitat dependent', 'Moderately generalist', 'Highly generalist']\n\n Form1.species_profile=self.speciesList[3]\n \n Form1.start_popsize=5\n Form1.numberruns=100\n Form1.timesteps=200\n #***********************************************\n # list of maps\n if Form1.UserBaseMap:\n if Form1.exportPNG:\n Form1.background_filename=[\"random_landscape_habmat.png\",\"random_landscape_habdist.png\",\"random_landscape_habmat_pid.png\",\"random_landscape_habmat_areapix.png\",\"random_landscape_frag_pid.png\",\"random_landscape_frag_AREApix.png\"]\n else:\n Form1.background_filename=[\"random_landscape_habmat.png\"]\n else:\n if Form1.exportPNG:\n Form1.background_filename=[\"random_landscape_hqmqlq.png\",\"random_landscape_habmat.png\",\"random_landscape_habdist.png\",\"random_landscape_habmat_pid.png\",\"random_landscape_habmat_areapix.png\",\"random_landscape_hqmqlq_quality.png\",\"random_landscape_hqmqlq_AREAqual.png\",\"random_landscape_frag_pid.png\",\"random_landscape_frag_AREApix.png\",\"random_landscape_frag_AREAqual.png\"]\n else:\n Form1.background_filename=[\"random_landscape_hqmqlq.png\",\"random_landscape_habmat.png\"]\n \n Form1.background_filename_start=Form1.background_filename\n\n #LOCI Informations -----------------------------\n Form1.LOCI_structure=[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0],[0,0],[0,0,0,0,0,0]] # here we define the structure of the loci\n #####..........LOCI_structure=list of locus with its alleles \n Form1.proximity_between_indiv_meters_threshold=100\n Form1.LOCI_gene_exchange_rate=0.1\n\n #HOME RANGE Informations -----------------------------\n Form1.homerangesize=10\n Form1.changehomerangesize=0 #0=not change; 1=uniform distr; 2=normaldist\n ####--- if changehomerangesize=1\n ####............ P1=min; P2=max\n ####--- if changehomerangesize=2\n ####............ P1=mean; P2=sd\n Form1.changehomerangesize_P1=20\n Form1.changehomerangesize_P2=10\n #(END) HOME RANGE Informations -----------------------------\n \n # Plot walk settings\n Form1.indivpixels_whenmoving=1\n Form1.indivpixels_isdispersing=1\n Form1.indivpixels_isNOTlive=1\n \n # Movement settings\n Form1.movement_dist_sigma_pixel=2.0\n Form1.when_dispersing_distance_factor=3.0\n \n Form1.indiv_agemean = 100\n Form1.indiv_agestd = 20\n Form1.indiv_female_rate = 0.5\n \n #*************************************************\n Form1.spatialresolution=30 #resolution in meters\n Form1.output_prefix=\"_explandgen01_mca_t02\"\n \n #-------------------------------------------------\n # Initializing GUI\n \n self.quote = wx.StaticText(self, id=-1, label=Form1.biodim_version+\" - landscape genetic embedded\", pos=wx.Point(20, 30))\n \n font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)\n self.quote.SetForegroundColour(\"blue\")\n self.quote.SetFont(font)\n\n # ------------------------\n # Selecting a landscape and calculating initial population\n if Form1.UserBaseMap:\n Form1.landscape_head, Form1.landscape_matrix, Form1.landscape_grassname_habmat, Form1.landscape_habdist, Form1.landscape_habmat_pid, Form1.landscape_habmat_areapix, Form1.landscape_frag_pid, Form1.landscape_frag_AREApix, Form1.landscape_dila01clean_pid, Form1.landscape_dila01clean_AREApix, Form1.landscape_dila02clean_pid, Form1.landscape_dila02clean_AREApix=pickup_one_landscape(Form1.defaultDIR, Form1.inputDir, Form1.tempDir, userbasemap=Form1.UserBaseMap, exportPNG=Form1.exportPNG)\n else:\n Form1.landscape_head, Form1.landscape_matrix, Form1.landscape_grassname_habmat, Form1.landscape_habdist,Form1.landscape_habmat_pid,Form1.landscape_habmat_areapix,Form1.landscape_hqmqlq_quality,Form1.landscape_hqmqlq_AREAqual,Form1.landscape_frag_pid,Form1.landscape_frag_AREApix,Form1.landscape_frag_AREAqual,Form1.landscape_dila01clean_pid,Form1.landscape_dila01clean_AREApix,Form1.landscape_dila01clean_AREAqual,Form1.landscape_dila02clean_pid,Form1.landscape_dila02clean_AREApix,Form1.landscape_dila02clean_AREAqual=pickup_one_landscape(Form1.defaultDIR, Form1.inputDir, Form1.tempDir, userbasemap=Form1.UserBaseMap, exportPNG=Form1.exportPNG)\n \n if Form1.UserBaseMap:\n pland, forest=getForest_habmat(landscape_matrix = Form1.landscape_matrix)\n else:\n pland, forest=getForest(landscape_matrix = Form1.landscape_matrix) \n \n Form1.start_popsize=estimate_start_popsize(Form1.landscape_matrix, pland, Form1.homerangesize, Form1.spatialresolution)\n \n # ------------------------\n # Initializing GUI\n\n # A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it\n self.logger = wx.TextCtrl(self,5, \"\",wx.Point(20,380), wx.Size(320,100),wx.TE_MULTILINE | wx.TE_READONLY)\n # A button\n self.button =wx.Button(self, 10, \"START SIMULATION\", wx.Point(10, 500))\n wx.EVT_BUTTON(self, 10, self.OnClick)\n \n self.button =wx.Button(self, 9, \"change Background\", wx.Point(140, 500))\n wx.EVT_BUTTON(self, 9, self.OnClick)\n\n self.button =wx.Button(self, 11, \"change Landscape\", wx.Point(140, 530))\n wx.EVT_BUTTON(self, 11, self.OnClick)\n\n self.button =wx.Button(self, 8, \"EXIT\", wx.Point(260, 500))\n wx.EVT_BUTTON(self, 8, self.OnExit)\n\n ##------------ plot landscape image on wx.Panel\n os.chdir(Form1.defaultDIR)\n os.chdir(Form1.tempDir) #acho que vai aqui isso.. ele vai mudar as imagens que mostra no programa\n im = Image.new('P', (len(Form1.landscape_matrix),len(Form1.landscape_matrix[0]))) # 'P' for palettized\n data = sum(Form1.landscape_matrix, []) # flatten data\n im.putdata(data)\n pal = color_pallete(userbase = Form1.UserBaseMap)\n im.putpalette(pal)\n im.save(Form1.background_filename[0])\n \n imageFile=Form1.background_filename[0]\n im1 = Image.open(imageFile)\n jpg1 = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n wx.StaticBitmap(self, -1, jpg1, (450,30), (jpg1.GetWidth(), jpg1.GetHeight()), style=wx.SIMPLE_BORDER)\n \n ##------------ LElab_logo\n #setinput\n os.chdir(Form1.defaultDIR)\n os.chdir(Form1.auxDir) # os logos estao na pasta input\n imageFile = 'LeLab05.gif'\n im1 = Image.open(imageFile)\n jpg1 = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n wx.StaticBitmap(self, -1, jpg1, (180,205), (jpg1.GetWidth(), jpg1.GetHeight()), style=wx.SUNKEN_BORDER)\n \n # the edit control - one line version.\n self.lblname = wx.StaticText(self, -1, \"Output file name :\",wx.Point(20,60))\n self.editname = wx.TextCtrl(self, 20, Form1.output_prefix, wx.Point(150, 60), wx.Size(140,-1))\n wx.EVT_TEXT(self, 20, self.EvtText)\n wx.EVT_CHAR(self.editname, self.EvtChar)\n \n Form1.lblstart_popsize = wx.StaticText(self, -1, \"Pop Size :\",wx.Point(20,120))\n Form1.edtstart_popsize = wx.TextCtrl(self, 30, str(Form1.start_popsize), wx.Point(82, 120), wx.Size(35,-1))\n wx.EVT_TEXT(self, 30, self.EvtText)\n wx.EVT_CHAR(Form1.edtstart_popsize, self.EvtChar)\n \n Form1.lblmovement_dist_sigma_pixel = wx.StaticText(self, -1, \"Mov.Dist.Pix:\",wx.Point(125,120))\n Form1.edtmovement_dist_sigma_pixel = wx.TextCtrl(self, 80, str(Form1.movement_dist_sigma_pixel), wx.Point(195, 120), wx.Size(30,-1))\n wx.EVT_TEXT(self, 80, self.EvtText)\n wx.EVT_CHAR(Form1.edtmovement_dist_sigma_pixel, self.EvtChar)\n \n Form1.lblwhen_dispersing_distance_factor = wx.StaticText(self, -1, \"Dispers.Fact:\",wx.Point(125,150))\n Form1.edtwhen_dispersing_distance_factor = wx.TextCtrl(self, 81, str(Form1.when_dispersing_distance_factor), wx.Point(195, 150), wx.Size(30,-1))\n wx.EVT_TEXT(self, 81, self.EvtText)\n wx.EVT_CHAR(Form1.edtwhen_dispersing_distance_factor, self.EvtChar)\n \n Form1.lblnumberruns = wx.StaticText(self, -1, \"N. of Runs :\",wx.Point(235,120))\n Form1.edtnumberruns = wx.TextCtrl(self, 50, str(Form1.numberruns), wx.Point(305, 120), wx.Size(35,-1))\n wx.EVT_TEXT(self, 50, self.EvtText)\n wx.EVT_CHAR(Form1.edtnumberruns, self.EvtChar)\n \n Form1.lbltimesteps = wx.StaticText(self, -1, \"Time Steps :\",wx.Point(20,150))\n Form1.edttimesteps = wx.TextCtrl(self, 40, str(Form1.timesteps), wx.Point(82, 150), wx.Size(35,-1))\n wx.EVT_TEXT(self, 40, self.EvtText)\n wx.EVT_CHAR(Form1.edttimesteps, self.EvtChar)\n\n Form1.lblhomerangesize = wx.StaticText(self, -1, \"HRange(ha) :\",wx.Point(235,150))\n Form1.edthomerangesize = wx.TextCtrl(self, 60, str(Form1.homerangesize), wx.Point(305, 150), wx.Size(35,-1))\n wx.EVT_TEXT(self, 60, self.EvtText)\n wx.EVT_CHAR(Form1.edthomerangesize, self.EvtChar)\n\n Form1.lblindivpixels = wx.StaticText(self, -1, \"Indiv.Size (pix):\",wx.Point(225,90))\n Form1.edtindivpixels = wx.TextCtrl(self, 70, str(Form1.indivpixels_whenmoving), wx.Point(305, 90), wx.Size(35,-1))\n wx.EVT_TEXT(self, 70, self.EvtText)\n wx.EVT_CHAR(Form1.edtindivpixels, self.EvtChar)\n \n # the combobox Control\n\n self.lblspeciesList = wx.StaticText(self,-1,\"Species Profile:\",wx.Point(20, 90))\n self.editspeciesList=wx.ComboBox(self, 93, Form1.species_profile, wx.Point(100, 90), wx.Size(120, -1),\n self.speciesList, wx.CB_DROPDOWN)\n wx.EVT_COMBOBOX(self, 93, self.EvtComboBox)\n wx.EVT_TEXT(self, 93, self.EvtText)\n \n # Checkbox\n self.insure = wx.CheckBox(self, 91, \"Habitat quality on model\",wx.Point(20,180))\n wx.EVT_CHECKBOX(self, 91, self.EvtCheckBox)\n\n self.insure = wx.CheckBox(self, 94, \"Plot movements\",wx.Point(160,180))\n wx.EVT_CHECKBOX(self, 94, self.EvtCheckBox)\n\n self.insure = wx.CheckBox(self, 95, \"Prob.Death\",wx.Point(260,180))\n wx.EVT_CHECKBOX(self, 95, self.EvtCheckBox) \n \n ##################################################\n # Isso nao eh usado em lugar algum!!!\n # Radio Boxes\n #self.dispersiveList = ['1', '2', '3', '4', '5', '6',\n #'7', '8', '9', '10']\n #rb = wx.RadioBox(self, 92, \"Choose dispersive level\", wx.Point(20, 210), wx.DefaultSize,\n #self.dispersiveList, 2, wx.RA_SPECIFY_COLS)\n #wx.EVT_RADIOBOX(self, 92, self.EvtRadioBox)\n\n\n def EvtRadioBox(self, event):\n self.logger.AppendText('Dispersive behaviour: %d\\n' % (event.GetInt()+1))\n \n def EvtComboBox(self, event):\n if event.GetId()==93: #93==Species Profile Combo box\n Form1.species_profile=event.GetString()\n self.logger.AppendText('Species Profile: %s\\n' % event.GetString())\n else:\n self.logger.AppendText('EvtComboBox: NEED TO BE SPECIFIED' )\n \n def OnClick(self,event):\n self.logger.AppendText(\" Click on object with Id %d\\n\" %event.GetId())\n \n ###### Se formos tirar os pngs, precisamos mudar aqui\n if event.GetId()==9: #9==CHANGE BACKGROUND\n background_filename_list_aux=[]\n for i in range(len(Form1.background_filename)-1):\n background_filename_list_aux.append(Form1.background_filename[i+1])\n background_filename_list_aux.append(Form1.background_filename[0])\n Form1.background_filename=background_filename_list_aux\n \n self.logger.AppendText(\" New background ==> %s\\n\" % Form1.background_filename[0] )\n \n os.chdir(Form1.defaultDIR)\n os.chdir(Form1.tempDir)\n imageFile=Form1.background_filename[0]\n im1 = Image.open(imageFile)\n jpg1 = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n wx.StaticBitmap(self, -1, jpg1, (450,30), (jpg1.GetWidth(), jpg1.GetHeight()), style=wx.SIMPLE_BORDER)\n self.Refresh()\n \n ###### Se formos tirar os pngs, precisamos mudar aqui\n if event.GetId()==11: #11==CHANGE LANDSCAPE\n self.logger.AppendText(\" Picking up new landscape ... please wait\\n\")\n \n #pickupland scape\n if Form1.UserBaseMap:\n Form1.landscape_head, Form1.landscape_matrix, Form1.landscape_grassname_habmat, Form1.landscape_habdist, Form1.landscape_habmat_pid, Form1.landscape_habmat_areapix, Form1.landscape_frag_pid, Form1.landscape_frag_AREApix, Form1.landscape_dila01clean_pid, Form1.landscape_dila01clean_AREApix, Form1.landscape_dila02clean_pid, Form1.landscape_dila02clean_AREApix=pickup_one_landscape(Form1.defaultDIR, Form1.inputDir, Form1.tempDir, userbasemap=Form1.UserBaseMap, exportPNG=Form1.exportPNG)\n else:\n Form1.landscape_head, Form1.landscape_matrix, Form1.landscape_grassname_habmat, Form1.landscape_habdist, Form1.landscape_habmat_pid, Form1.landscape_habmat_areapix,Form1.landscape_hqmqlq_quality,Form1.landscape_hqmqlq_AREAqual,Form1.landscape_frag_pid,Form1.landscape_frag_AREApix,Form1.landscape_frag_AREAqual,Form1.landscape_dila01clean_pid,Form1.landscape_dila01clean_AREApix,Form1.landscape_dila01clean_AREAqual,Form1.landscape_dila02clean_pid,Form1.landscape_dila02clean_AREApix,Form1.landscape_dila02clean_AREAqual=pickup_one_landscape(Form1.defaultDIR, Form1.inputDir, Form1.tempDir, userbasemap=Form1.UserBaseMap, exportPNG=Form1.exportPNG)\n \n # background\n os.chdir(Form1.defaultDIR)\n os.chdir(Form1.tempDir) \n imageFile=Form1.background_filename_start[0]\n \n im = Image.new('P', (len(Form1.landscape_matrix),len(Form1.landscape_matrix[0]))) # 'P' for palettized\n data = sum(Form1.landscape_matrix, []) # flatten data\n im.putdata(data)\n pal = color_pallete(userbase = Form1.UserBaseMap)\n im.putpalette(pal)\n im.save(Form1.background_filename_start[0])\n \n imageFile=Form1.background_filename_start[0]\n #if Form1.plotmovements==1:\n im1 = Image.open(imageFile)\n jpg1 = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n wx.StaticBitmap(self, -1, jpg1, (450,30), (jpg1.GetWidth(), jpg1.GetHeight()), style=wx.SIMPLE_BORDER)\n \n Form1.background_filename=Form1.background_filename_start\n \n self.logger.AppendText(\" New landscape: %s\\n\" % Form1.landscape_grassname_habmat )\n self.Refresh()\n\n #-----------------------------------------------------\n # Here is the beginning of a run\n \n if event.GetId()==10: #10==START\n Form1.experiment_info=datetime.now()\n for nruns in range(Form1.numberruns):\n if nruns>=0:\n self.logger.AppendText(\" ...........\\n Simulation started...\\n\")\n self.logger.AppendText(\" ...Landscape: %s\\n\" % Form1.landscape_grassname_habmat)\n self.logger.AppendText(\" ...PopSize : %d\\n\" % Form1.start_popsize)\n self.logger.AppendText(\" ...NumSteps : %d\\n\" % Form1.timesteps)\n self.logger.AppendText(\" ...MovDstPix: %0.1f\\n\" % Form1.movement_dist_sigma_pixel)\n self.logger.AppendText(\" ...OnDispFact: %0.1f\\n\" % Form1.when_dispersing_distance_factor)\n self.logger.AppendText(\" ...HoRa.size: %d\\n\" % Form1.homerangesize)\n self.logger.AppendText(\" ...SpProfile: %s\\n\" % Form1.species_profile)\n #END if nruns==0:\n \n self.logger.AppendText(\".................................................\\n\")\n self.logger.AppendText(\"[RUN %s] :::\" % str(nruns+1))\n time_starting = time.clock()\n \n #----------------------\n # Initializing variables within a run\n if Form1.UserBaseMap:\n pland, forest=getForest_habmat(landscape_matrix = Form1.landscape_matrix)\n else:\n pland, forest=getForest(landscape_matrix = Form1.landscape_matrix)\n \n indiv_xy = populate(forest, Form1.start_popsize) # rows and cols in which animals are initally\n \n indiv_xy_initpos=[]\n indiv_xy_quadrant=[]\n indiv_totaldistance=[]\n indiv_age = []\n indiv_isfemale = []\n \n indiv_islive = []\n indiv_islive_timestep_waslive = []\n indiv_isdispersing = []\n indiv_isdispersingRESET = []\n indiv_distedgePix = []\n indiv_whichpatchid = []\n indiv_habareapix = []\n indiv_dispdirectionX = []\n indiv_dispdirectionY = []\n indiv_movementcost = []\n indiv_LOCI = []\n indiv_LOCI_START = []\n indiv_number_of_meetings= []\n \n \n for num_of_indiv in range(len(indiv_xy)):\n indiv_xy_initpos.append([indiv_xy[num_of_indiv][0],indiv_xy[num_of_indiv][1]])\n indiv_age.append(abs(int(random.normalvariate(mu=Form1.indiv_agemean,sigma=Form1.indiv_agestd)))+1)\n if (random.uniform(0,1)

OK

\"\n\n # Controlador para la entrada de llamadas desde la centralia\n @http.route('/portalalterno/central-calls/entrycalls', type='http', auth='none', website=False)\n def get_call_data(self, *args, **kw):\n\n inicio = datetime.datetime.fromtimestamp((int(\n kw['inicio'])/1e3))\n\n fin = datetime.datetime.fromtimestamp((int(\n kw['finaliz'])/1e3))\n\n if str(kw['callee'])[0] is '0':\n llamado = str(kw['callee'])[1:]\n\n else:\n llamado = str(kw['callee'])\n \"\"\"\n if kw['duracion']:\n duracion = datetime.datetime.fromtimestamp(float(\n kw['duracion'])/1e3).strftime('%H:%M:%S')\n \"\"\"\n\n duracion = float((fin - inicio).seconds)\n duracion = (duracion)/60\n\n call = {\n 'caller': 'caller' in kw and kw['caller'].capitalize() or '',\n 'llamado': llamado,\n 'user': 'user' in kw and kw['user'].capitalize() or '',\n 'extension': 'extension' in kw and kw['extension'] or '',\n 'inicio': inicio.strftime('%Y-%m-%d %H:%M:%S'),\n 'duracion': duracion,\n 'fin': fin.strftime('%Y-%m-%d %H:%M:%S'),\n 'entidad': 'entidad' in kw and kw['entidad'].capitalize() or '',\n 'other': kw,\n 'name': llamado + ' - ' + kw['extension']\n }\n\n tmp_obj = http.request.env['crm.temp.call']\n if tmp_obj.sudo().create(call):\n return '{\"response\": \"OK\"}'","sub_path":"modules/isep_custom/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644982806","text":"import numpy as np\nimport surprise as env\nfrom scipy import sparse\n\n\nclass MF3(env.AlgoBase):\n \"\"\"\n batch gradient boosting matrix factorization\n \"\"\"\n def __init__(\n self,\n factor_num=10,\n max_iter=500,\n learning_rate=0.001,\n reg=0.1,\n batch_size=100,\n batch_factor=1,\n sgd=False,\n bias=True):\n\n env.AlgoBase.__init__(self)\n self.k = factor_num\n self.maxiter = max_iter\n self.eta = learning_rate\n self.ifbias = bias\n self.reg = reg\n self.batch = batch_size\n self.withsgd = sgd\n self.step = batch_factor\n self.est = None\n self.mu = None\n self.bu = None\n self.bi = None\n\n def train(self, trainset):\n\n env.AlgoBase.train(self, trainset)\n user_num = self.trainset.n_users\n item_num = self.trainset.n_items\n mu = self.trainset.global_mean\n bu = np.random.random([user_num, 1])\n bi = np.random.random([item_num, 1])\n P = np.random.random((user_num, self.k)) / 10\n Q = np.random.random((item_num, self.k)) / 10\n\n lil_rating = sparse.lil_matrix((user_num, item_num))\n\n for u, i, r in self.trainset.all_ratings():\n lil_rating[u, i] = r\n\n dok_rating = sparse.dok_matrix(lil_rating)\n rating_num = dok_rating.nnz\n uir_list = list(dok_rating.items())\n\n for f in range(0, self.k, self.step):\n print(\"-\" * 12 + str(f) + \"-\" * 12)\n for iter_i in range(self.maxiter):\n square_loss = 0\n\n if self.withsgd:\n # Stochastic Gradient Descent\n batch_index = np.random.choice(rating_num, self.batch)\n else:\n # Gradient Decent for all data\n batch_index = range(rating_num)\n\n for index in batch_index:\n (u, i), r = uir_list[index]\n\n hat = mu + bu[u] + bi[i] + \\\n np.dot(P[u, :f + 1], Q[i, :f + 1])\n err = r - hat\n\n if self.ifbias:\n bu[u] += self.eta * (err - self.reg * bu[u])\n bi[i] += self.eta * (err - self.reg * bi[i])\n\n P[u, f - self.step:f + 1] += self.eta * (\n err * Q[i, f - self.step:f + 1] - self.reg * P[u, f - self.step:f + 1])\n Q[i, f - self.step:f + 1] += self.eta * (\n err * P[u, f - self.step:f + 1] - self.reg * Q[i, f - self.step:f + 1])\n square_loss += (r - hat) ** 2\n loss = 0.5 * square_loss + self.reg * (\n np.sum(bu ** 2) + np.sum(bi ** 2) + np.sum(P ** 2) + np.sum(Q ** 2))\n print(\"iteration at \" + str(iter_i + 1) + \" loss: \" + str(loss))\n\n estimator = np.dot(P, Q.T)\n self.est = estimator\n self.mu = mu\n self.bu = bu\n self.bi = bi\n\n def estimate(self, u, i):\n\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):\n print('unknown input: u-->' + str(u) + ' i-->' + str(i))\n raise env.PredictionImpossible('User and/or item is unkown.')\n\n bias = self.mu + self.bu[u] + self.bi[i] if self.ifbias else 0\n return self.est[u, i] + bias\n\n\nif __name__ == '__main__':\n # builtin dataset\n # data = env.Dataset.load_builtin('ml-100k')\n\n # =============================== load data ============================\n # ml-latest-small\n # file_path = 'input/ml-latest-small/ratings.csv'\n # reader = env.Reader(line_format='user item rating timestamp', sep=',', skip_lines=1)\n # ------------------------------------------------------------------------------\n # ml-100k\n file_path = 'input/ml-100k/u.data'\n reader = env.Reader(\n line_format='user item rating timestamp',\n sep='\\t',\n skip_lines=1)\n # ------------------------------------------------------------------------------\n # ml-20m\n # file_path = 'input/ml-20m/ratings.csv'\n # reader = env.Reader(line_format='user item rating timestamp', sep=',', skip_lines=1)\n # ==============================================================================\n\n data = env.Dataset.load_from_file(file_path, reader=reader)\n data.split(n_folds=5)\n\n # define algorithm\n algo = MF3(factor_num=20,\n max_iter=100,\n learning_rate=0.001,\n reg=0.1,\n batch_size=100,\n batch_factor=2,\n sgd=False,\n bias=True)\n\n # evaluate\n env.evaluate(algo, data, measures=['rmse', 'mae', 'fcp'])\n","sub_path":"RecLab/mf_v3.py","file_name":"mf_v3.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"386051338","text":"\"\"\"\nBeginning Python From Novice to Professional\n\"\"\"\n\ndef check_index(key):\n \"\"\"\n 指定的键位是否可接受的索引\n \"\"\"\n if not isinstance(key, int):\n raise TypeError\n if key < 0:\n raise IndexError\n\nclass ArithmeticsSequence:\n \"\"\"\n 基本的序列和映射协议\n __len__ __getitem__ __setitem__ __delitem__\n \"\"\"\n def __init__(self, start=0, step=1):\n \"\"\"\n 初始化算数队列\n \"\"\"\n self.start = start\n self.step = step\n self.changed = {}\n \n def __getitem__(self, key):\n check_index(key)\n try:\n return self.changed[key]\n except KeyError:\n return self.start + key * self.step\n \n def __setitem__(self, key, value):\n check_index(key)\n self.changed[key] = value\n\ns = ArithmeticsSequence(1, 2)\ns[9] = 10\nprint(s[4], s[9])\n\n# del s[9]\n# AttributeError: __delitem__\n","sub_path":"魔法方法、特性和迭代器/example/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288002791","text":"import logging\n\nfrom src import download_info\nfrom src import downloader\nfrom src import finder\nfrom src import itemizator\n\ngreen = \"\\033[92m\"\nend = \"\\033[0m\"\n\nlogger = logging.getLogger('myApp')\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n\ndef find_artist_albums(artist_name):\n try:\n items = itemizator.list(finder.find(artist_name))\n except ValueError:\n return {'Error': 'Albums not found'}\n albums = []\n for i in range(len(items[\"albums\"])):\n albums.append(\"{0}) Album: {1}, Artist: {2}, Id: {3}\".format(\n i + 1, green + items[\"albums\"][i][\"title\"] + end,\n items[\"albums\"][i][\"artist\"], items[\"albums\"][i][\"id\"]))\n\n tracks = []\n for item in items[\"tracks\"]:\n tracks.append(\"Track: {0}, Album: {1}, Artist: {2}, Year: {3}\"\n .format(item[\"title\"], item[\"album\"], item[\"artist\"], item[\"year\"]))\n return {\n 'albums': albums,\n 'tracks': tracks\n }\n\n\ndef get_album(artist_and_album, lang):\n if lang == \"ru\":\n items = itemizator.list_rus(finder.find(artist_and_album))\n else:\n items = itemizator.list(finder.find(artist_and_album))\n logger.info(items)\n tracks_info = download_info.tracks_from_album_info(items[\"albums\"][0]['id'])\n downloader.download_album(items[\"albums\"][0], tracks_info, lang)\n print('Downloaded {} files'.format(len(tracks_info)))\n return {'Status': 'start download {}'.format(artist_and_album)}\n\n\ndef get_track(track, lang):\n if lang == \"ru\":\n items = itemizator.list_rus(finder.find(track))\n else:\n items = itemizator.list(finder.find(track))\n tracks_info = download_info.tracks_from_album_info(items[\"albums\"][0]['id'])\n downloader.download_album(items[\"albums\"][0], [tracks_info[0], ], lang)\n return {'Status': 'start download {}'.format(track)}\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"573299802","text":"#!/usr/local/bin/python3\n\na = \"21-22,49-51,60-63\" # 2 + 3 + 4 = 9 \nb = \"1-9,10,11,15-16\" # 9 + 1 + 1 + 2 = 13\n\ndef countRangesInString(s):\n t = 0\n for i in s.split(','):\n if \"-\" in i:\n beginning = int(i.split('-')[0])\n end = int(i.split('-')[1])\n t = t + len(range(beginning, end+1))\n else:\n t = t + 1\n return t\n\nprint(countRangesInString(a))\nprint(countRangesInString(b))\n","sub_path":"countRangesInString.py","file_name":"countRangesInString.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34705206","text":"import json\nimport sys\n\ndef get_mrp(id, flavor, framework, version, time, input, spans, edges):\n used_nodes = []\n nodes = []\n mrp_edges = []\n mrp = {}\n for span in spans.keys():\n node_dict = {'id' : spans[span], 'anchors':[{'from':int(span.split(':')[0]), 'to':int(span.split(':')[1])}]}\n nodes.append(node_dict)\n used_nodes.append(spans[span])\n for (u, v) in edges.keys():\n if not u in used_nodes:\n node_dict = {'id':u}\n used_nodes.append(node_dict)\n if not v in used_nodes:\n node_dict = {'id':v}\n used_nodes.append(node_dict)\n mrp_edges.append({'source':u, 'target':v, 'label':edges[(u,v)]})\n mrp['id'] = id\n mrp['flavor'] = flavor\n mrp['framework'] = framework\n mrp['version'] = version\n mrp['time'] = time\n mrp['input'] = input\n mrp['nodes'] = nodes\n mrp['edges'] = mrp_edges\n return mrp\n","sub_path":"ucca/get_mrp_from_intermediate.py","file_name":"get_mrp_from_intermediate.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78106480","text":"#!/usr/bin/env python\n\n\"\"\"\nsetup.py to build code with cython\n\"\"\"\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy # to get includes\nimport sys # to work around windows not having libdl\n\nsysdeps = [\"cy_retro/stdio.pxd\", \"cy_retro/stdlib.pxd\", \"cy_retro/dlfcn.pxd\" ]\npgdeps = [\"cy_retro/SDL.pxd\", \"cy_retro/pygame.pxd\"]\n\nmods = [\n\tExtension(\"cy_retro.core\",\n\t\tsources = [\"cy_retro/core.pyx\", \"cy_retro/core.pxd\"] + sysdeps,\n\t\tlibraries = [] if sys.platform == 'win32' else [\"dl\"],\n\t\tlanguage = 'C++',\n\t),\n\tExtension(\"cy_retro.pygame_video\",\n\t\tsources = [\"cy_retro/pygame_video.pyx\"] + pgdeps,\n\t\tlibraries = [\"SDL\"],\n\t\tlanguage = 'C++',\n\t),\n\tExtension(\"cy_retro.pygame_audio\",\n\t\tsources = [\"cy_retro/pygame_audio.pyx\"] + pgdeps,\n\t\tlibraries = [\"SDL_mixer\", \"SDL\"],\n\t\tlanguage = 'C++',\n\t),\n\tExtension(\"cy_retro.pygame_input\",\n\t\tsources = [\"cy_retro/pygame_input.pyx\"] + pgdeps,\n\t\tlibraries = [\"SDL\"],\n\t\tlanguage = 'C++',\n\t),\n\tExtension(\"cy_retro.simple_input\",\n\t\tsources = [\"cy_retro/simple_input.pyx\"],\n\t\tlanguage = 'C++',\n\t),\n]\n\nif __name__ == \"__main__\":\n\tsetup(\n\t\tname='cy_retro',\n\t\tpackages=['cy_retro'],\n\t\tcmdclass = {'build_ext': build_ext},\n\t\text_modules = mods,\n\t\tinclude_dirs = ['.', '/usr/include/SDL', numpy.get_include()],\n\t)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543434905","text":"# Forecasting ts data with DNN\n# python-2.7.13 , numpy-1.13.0, pandas-0.20.2, scipy-0.19.0, theano-0.9.0, keras-2.0.5\n\n\nimport pandas as pd\nimport numpy as np\n#import random\nimport matplotlib.pyplot as plt\n#import neuralpy\nimport urllib\nimport os\n\n# Getting the data from the internet\nurl = 'https://goo.gl/WymYzd'\npath = os.getcwd() \nloc = path + '/COE.xls'\nurllib.urlretrieve(url, loc)\n\n\n\n# Cleaning up downloaded spreadsheet files\nExcel_file = pd.ExcelFile(loc)\n#print 'Excel_file.sheet_names = ', Excel_file.sheet_names # [u'COE data']\nspreadsheet = Excel_file.parse('COE data')\n#print spreadsheet.info()\ndata = spreadsheet['COE$']\n#print data.head()\n\n\n\n# Adjusting dates\n#print spreadsheet['DATE'][193:204]\nspreadsheet.set_value(194, 'DATE', '2004-02-15')\nspreadsheet.set_value(198, 'DATE', '2004-04-15')\nspreadsheet.set_value(202, 'DATE', '2004-06-15')\n#print spreadsheet['DATE'][193:204]\nspreadsheet.to_csv('COE.csv')\n\n\n\n# Scale the input attributes\nx = data\nfrom sklearn import preprocessing\nscaler = preprocessing.MinMaxScaler(feature_range = (0, 1))\n#print scaler # MinMaxScaler(copy=True, feature_range=(0, 1))\n#print type(x) # \nx = np.array(x).reshape((len(x), ))\n#print type(x) # \nx = np.log(x)\n\n\n\n# Data shape\n#print x.shape # (265,)\nx = x.reshape(-1, 1)\n#print x.shape # (265, 1)\n\n\n\n# Scale x\nx = scaler.fit_transform(x)\nx = x.reshape(-1)\n#print x.shape # (265,)\n#print round(x.min(), 2) # 0.0\n#print round(x.max(), 2) # 1.0\n\n\n\n# statsmodels library: partial autocorrelation\nfrom statsmodels.tsa.stattools import pacf\nx_pacf = pacf(x, nlags = 40, method = 'ols')\n#print x_pacf\n\nplt.plot(np.arange(41), x_pacf)\n#plt.savefig('partial_autocorrelations.png')\n#plt.show()\n\n\n# Install theano and keras before nnet_ts\nfrom nnet_ts import *\n\nfit1 = TimeSeriesNnet(hidden_layers = [7, 3], activation_functions = ['tanh', 'tanh'])\n\n'''\ncount = 0\nahead = 12\npred = []\n'''\n\n\n\n'''\nwhile (count < ahead):\n\tend = len(x) - ahead + count\n\tnp.random.seed(2016)\n\tfit1 = TimeSeriesNnet(hidden_layers = [7, 3], activation_functions = ['tanh', 'tanh'])\n\tfit1.fit(x[0:end], lag = 1, epochs = 100)\n\tout = fit1.predict_ahead(n_ahead = 1)\n\tprint 'Obs: ', count + 1, ' x = ', round(x[count], 4), ' prediction = ', round(pd.Series(out), 4)\n\tpred.append(out)\n\tcount = count + 1\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DNN/nnet_ts.py","file_name":"nnet_ts.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39698714","text":"\r\n\r\nimport glob\r\nimport logging\r\nimport os\r\nimport sys\r\nfrom logging import Logger, RootLogger\r\nfrom typing import Any, Union\r\n\r\n# import sympy as sp\r\nimport numpy as np\r\nimport math\r\nimport pandas as pd\r\nimport scipy.io as sio\r\nfrom configparser import ConfigParser\r\nimport scipy.optimize\r\nimport warnings\r\nwarnings.filterwarnings('ignore', 'The iteration is not making good progress')\r\nfrom matplotlib import pyplot as plt\r\n\r\nconfig = ConfigParser()\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nlogger: Union[Union[Logger, RootLogger], Any] = logging.getLogger('GPSHeading')\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nconfig.read('constantsMAT.config', encoding='UTF-8')\r\n\r\n# ego vehicle parameters\r\nvehicle_halfWide = 1 # unit - meter\r\nvehicle_long = 2.5 # unit - meter\r\nvehicle_halfWide_po = 1 # unit - meter\r\nvehicle_long_po = 2.5 # unit - meter\r\nvehicle_length = 4 # unit - meter\r\n\r\n# Evaluation threshold\r\nego_speedLimit = 60 # unit - km/h\r\ndy_limit = 3.5 # unit - meter\r\nay_target = 2 # unit - m/s2\r\nday_p = 3 # unit - m/s3\r\ndx_farLimit = 50 # unit - meter\r\ndx_nearLimit = 20 # unit - meter\r\n\r\ndef getMatSignal( matStruct, matFile_dict):\r\n # NOTE you could use the MatFileVO class to read and use your mat file\r\n matStruct = matStruct.split(\";\")\r\n try:\r\n for i, elem in enumerate(matStruct):\r\n if i == 0:\r\n signal = matFile_dict[elem]\r\n else:\r\n signal = signal[elem]\r\n except KeyError:\r\n signal = None\r\n return signal\r\n\r\ndef buildDict(mat, matDict=None):\r\n if matDict is None:\r\n matDict = {}\r\n for key in mat:\r\n elem = mat[key]\r\n if isinstance(elem, sio.matlab.mio5_params.mat_struct):\r\n matDict[key] = {}\r\n mat2dict(elem, matDict[key])\r\n elif isinstance(elem, np.ndarray):\r\n matDict[key] = elem\r\n return matDict\r\n\r\ndef mat2dict(mat, matDict=None):\r\n if matDict is None:\r\n matDict = {}\r\n for key in mat._fieldnames:\r\n elem = mat.__dict__[key]\r\n if isinstance(elem, sio.matlab.mio5_params.mat_struct):\r\n matDict[key] = {}\r\n mat2dict(elem, matDict[key])\r\n elif isinstance(elem, np.ndarray):\r\n matDict[key] = elem\r\n return matDict\r\n\r\ndef getPOvy(time, po_index, vy_po):\r\n po_vy = []\r\n for cyc in range(len(time)):\r\n if po_index[cyc] <= 31:\r\n po_vyv = vy_po[po_index[cyc], cyc]\r\n else:\r\n po_vyv = 0\r\n po_vy.append(po_vyv)\r\n\r\n po_vy = np.array(po_vy)\r\n return po_vy\r\n\r\ndef getPOvx(time, po_index, vx_po):\r\n po_vx = []\r\n for cyc in range(len(time)):\r\n if po_index[cyc] <= 31:\r\n po_vxv = vx_po[po_index[cyc], cyc]\r\n else:\r\n po_vxv = 0\r\n po_vx.append(po_vxv)\r\n\r\n po_vx = np.array(po_vx)\r\n return po_vx\r\n\r\ndef calcDistance(time, vel):\r\n vel = vel * 0.0039 - 0.0119\r\n time_diff = list(np.append(np.array(0), np.diff(time)))\r\n if len(time_diff) > len(vel):\r\n time_diff.pop()\r\n\r\n distance = np.cumsum(np.multiply(time_diff, vel))\r\n total_dist = distance[-1]\r\n total_dist = abs(total_dist)\r\n\r\n return total_dist\r\n\r\n\r\ndef collisionRisk_long(ttc, dx_po, vx_po):\r\n dx_po = dx_po * 0.0625\r\n vx_po = vx_po * 0.0312 - 128\r\n moving_distance = ttc*vx_po\r\n distance_po = dx_po + moving_distance\r\n if (ttc == 0) | (distance_po > (dx_nearLimit + 4)) | (distance_po < (dx_nearLimit - 4)):\r\n return False\r\n else:\r\n return True\r\n\r\ndef collisionRisk_rd(po_exist, rd_exist, dy_rd):\r\n if rd_exist > 60 and dy_rd < 3:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# def predict_right_dtlc(self, measurement_time, line_right, line_right_aplPsi, line_right_kapHor, line_right_kapDtHor,\r\n# ego_velocity):\r\n# \"\"\"\r\n# predict right DTLC\r\n# dy = c0 + c1*dx + 1/2*c2sqr(dx) + 1/6*c3cube(dx)\r\n# \"\"\"\r\n# predict_time = self.config['PREDICTION_TIME']\r\n# dx_right = predict_time * ego_velocity\r\n# y_right_predict = line_right + line_right_aplPsi * dx_right + 0.5 * line_right_kapHor * pow(dx_right, 2) + 0.166667 * line_right_kapDtHor * pow(dx_right, 3)\r\n# return y_right_predict\r\n\r\ndef calcDTC(dy_po):\r\n dy_po = dy_po * 0.0625 - 64\r\n if dy_po == 0:\r\n dtc = 0\r\n elif dy_po > 0:\r\n dtc = dy_po - vehicle_halfWide - vehicle_halfWide_po\r\n else:\r\n dtc = -dy_po - vehicle_halfWide - vehicle_halfWide_po\r\n return dtc\r\n\r\ndef calcDTC_rd(dy_rd):\r\n if dy_rd == 0:\r\n dtc = 0\r\n elif dy_rd > 0:\r\n dtc = dy_rd - vehicle_halfWide\r\n else:\r\n dtc = -dy_rd - vehicle_halfWide\r\n return dtc\r\n\r\n\r\ndef calcTTC(dtc, ayv):\r\n coe = 6*dtc/ayv\r\n if dtc != 0:\r\n ttc = pow(abs(coe), 1/3)\r\n else:\r\n ttc = 0\r\n\r\n return ttc\r\n # coe = (1/6)*ayv\r\n # t = sp.Symbol('t')\r\n # d = coe*t**3 + vy*t\r\n # t = sp.solve(d)\r\n # if max(t) > 0:\r\n # return max(t)\r\n # else:\r\n # return 0\r\n\r\ndef calcTTA():\r\n t = ay_target/day_p\r\n\r\n return t\r\n\r\ndef ttc_final(dtc,ttc):\r\n\r\n v2 = 0.5*pow(calcTTA(), 2)*day_p\r\n d_po2_rest = dtc - day_p * pow(calcTTA(), 3) / 6\r\n if d_po2_rest <= 0:\r\n ttc_f = ttc\r\n elif d_po2_rest > 0:\r\n ttc_f = ttc + calcT2(ay_target / 2, v2, -d_po2_rest)\r\n\r\n return ttc_f\r\n\r\ndef calcT2(a,b,c):\r\n delta = pow((b*b - 4*a*c), 1/2)\r\n if delta >= 0:\r\n x1 = ((-b + delta)/2*a)\r\n x2 = ((-b - delta)/2*a)\r\n if x1 >= x2:\r\n x = x1\r\n elif x2 > x1:\r\n x = x2\r\n else:\r\n x = 0\r\n\r\n return x\r\n\r\ndef calcTTC_left(ay,vy,dy):\r\n # a: acceleration\r\n # b: vy\r\n # d: dy\r\n a = 0.5*ay\r\n b = -vy\r\n c = -dy\r\n d = b*b - 4*a*c\r\n if d < 0:\r\n return 0\r\n elif d == 0:\r\n x1 = -b/2*a\r\n x2 = x1\r\n else:\r\n x1 = (-b + math.sqrt(d))/(2*a)\r\n x2 = (-b - math.sqrt(d))/(2*a)\r\n\r\n if x1 > 0:\r\n x = x1\r\n elif x2 > 0:\r\n x = x2\r\n else:\r\n x = 0\r\n return x\r\n\r\ndef calcTTC_right(ay,vy,dy):\r\n # a: acceleration\r\n # b: vy\r\n # d: dy\r\n a = 0.5*ay\r\n b = vy\r\n c = dy\r\n d = b*b - 4*a*c\r\n if d < 0:\r\n return 0\r\n elif d == 0:\r\n x1 = -b / 2 * a\r\n x2 = x1\r\n else:\r\n x1 = (-b + math.sqrt(d))/(2*a)\r\n x2 = (-b - math.sqrt(d))/(2*a)\r\n\r\n if x1 > 0:\r\n x = x1\r\n elif x2 > 0:\r\n x = x2\r\n else:\r\n x = 0\r\n return x\r\n\r\ndef calcDistanceKPI():\r\n near_array = []\r\n near_array_total = []\r\n near_array_po2 = []\r\n near_array_po3 = []\r\n ttc_po2_array = []\r\n ttc_po3_array = []\r\n\r\n mat_po_data = sio.loadmat(mat_po, struct_as_record=False, squeeze_me=True)\r\n mat_roadedgele = sio.loadmat(mat_roadedge_le, struct_as_record=False, squeeze_me=True)\r\n mat_roadedgeri = sio.loadmat(mat_roadedge_ri, struct_as_record=False, squeeze_me=True)\r\n mat_ego_speed = sio.loadmat(mat_speed, struct_as_record=False, squeeze_me=True)\r\n # po and spped ts are same\r\n matDictFus_po = buildDict(mat_po_data)\r\n matDictFus_speed = buildDict(mat_ego_speed)\r\n ts_po = matDictFus_po[\"timestamp\"]\r\n #road edge\r\n matDictFus_roadedge_ri = buildDict(mat_roadedgeri)\r\n matDictFus_roadedge_le = buildDict(mat_roadedgele)\r\n ts_roadedge = matDictFus_roadedge_ri[\"timestamp\"]\r\n\r\n\r\n # get information from mat file\r\n velEgo = getMatSignal(\"ego;speed\", matDictFus_speed)\r\n # po2_index = getMatSignal(\"PO;i2;index\", matDictFus)\r\n po2_dx = getMatSignal(\"PO;Le;dx\", matDictFus_po)\r\n po2_dy = getMatSignal(\"PO;Le;dy\", matDictFus_po)\r\n po2_vx = getMatSignal(\"PO;Le;vx\", matDictFus_po)\r\n po2_vy = getMatSignal(\"PO;Le;vy\", matDictFus_po)\r\n po2_existprob = getMatSignal(\"PO;Le;ExistProb\", matDictFus_po)\r\n # po3_index = getMatSignal(\"PO;i3;index\", matDictFus)\r\n po3_dx = getMatSignal(\"PO;Ri;dx\", matDictFus_po)\r\n po3_dy = getMatSignal(\"PO;Ri;dy\", matDictFus_po)\r\n po3_vx = getMatSignal(\"PO;Ri;vx\", matDictFus_po)\r\n po3_vy = getMatSignal(\"PO;Ri;vy\", matDictFus_po)\r\n po3_existprob = getMatSignal(\"PO;Ri;ExistProb\", matDictFus_po)\r\n # po_vy = getMatSignal(\"PO;vyv\", matDictFus) # 0-32 row\r\n # po_vx = getMatSignal(\"PO;vxv\", matDictFus) # 0-32 row\r\n\r\n # road edge information\r\n roadedge_ri_dy = getMatSignal(\"Line;Dy\", matDictFus_roadedge_ri)\r\n roadedge_ri_existprob = getMatSignal(\"Line;ExistProb\", matDictFus_roadedge_ri)\r\n\r\n roadedge_le_dy = getMatSignal(\"Line;Dy\", matDictFus_roadedge_le)\r\n roadedge_le_existprob = getMatSignal(\"Line;ExistProb\", matDictFus_roadedge_le)\r\n # --------------------------------------------------------------------------------------------------------------\r\n # get v > 60 kph (16.67 m/s) and dy < 2.5m\r\n velEgo_limit = ego_speedLimit /3.6 # (config[\"ego_vehicle\"].getint(\"ego_speedLimit\"))\r\n\r\n # test\r\n # for cyc in range(len(ts_po)):\r\n # # calculate dlc\r\n # dlc_po2 = calcDLC(po2_dy[cyc])\r\n # dlc_po3 = calcDLC(po3_dy[cyc])\r\n #\r\n # if velEgo[cyc] > velEgo_limit:\r\n # if (dlc_po2 < 2.5) and (dlc_po2 > 0):\r\n # near_array_po2.append(po2_dy[cyc])\r\n # print(near_array_po2)\r\n #\r\n # for cyc in range(len(ts_po)):\r\n # # calculate dlc\r\n # dlc_po2 = calcDLC(po2_dy[cyc])\r\n # dlc_po3 = calcDLC(po3_dy[cyc])\r\n #\r\n # if velEgo[cyc] > velEgo_limit:\r\n # if (dlc_po3 < 2.5) and (dlc_po3 > 0):\r\n # near_array_po3.append(po3_dy[cyc])\r\n # print(near_array_po3)\r\n for cyc in range(len(ts_po)-4):\r\n\r\n cyc_rd = 0\r\n for index, ts in enumerate(ts_roadedge):\r\n if ts > ts_po[cyc]:\r\n cyc_rd = index-1\r\n break\r\n\r\n # calculate dlc\r\n if (po2_dy[cyc] != 0) and (po2_dy[cyc+1] != 0) and (po2_dy[cyc+2] != 0) and (po2_dy[cyc+3] != 0) and (po2_dy[cyc+4] != 0):\r\n dtc_po2 = calcDTC(po2_dy[cyc])\r\n else:\r\n dtc_po2 = 0\r\n\r\n if (po3_dy[cyc] != 0) and (po3_dy[cyc+1] != 0) and (po3_dy[cyc+2] != 0) and (po3_dy[cyc+3] != 0) and (po3_dy[cyc+4] != 0):\r\n dtc_po3 = calcDTC(po3_dy[cyc])\r\n else:\r\n dtc_po3 = 0\r\n\r\n dtc_rd_le = calcDTC_rd(roadedge_le_dy[cyc_rd])\r\n dtc_rd_ri = calcDTC_rd(roadedge_ri_dy[cyc_rd])\r\n # TTC when ay = 1 m/s2\r\n # ay,vy,dy\r\n # ttc_po2 = calcTTC_left(ay_p, po2_vy[cyc], dtc_po2)\r\n # ttc_po3 = calcTTC_right(ay_p, po3_vy[cyc], dtc_po3)\r\n ttc_po2 = calcTTC(dtc_po2, day_p)\r\n ttc_po3 = calcTTC(dtc_po3, day_p)\r\n ttc_rd_le = calcTTC(dtc_rd_le, day_p)\r\n ttc_rd_ri = calcTTC(dtc_rd_ri, day_p)\r\n\r\n # distance to PO2 - distance, from 0m/s2 to Ay_target with day.\r\n # if rest distance < 0 -> ttc with day is the finial ttc\r\n # if rest distance > 0 -> finial ttc = ttc with day + ttc with Ay\r\n # Velocity in end of phase 1 = 1/2 * day * t^2\r\n\r\n # v2 = 0.5*pow(calcTTA(), 2)*day_p\r\n # d_po2_rest = dtc_po2 - day_p*pow(calcTTA(), 3)/6\r\n #\r\n # if d_po2_rest <= 0:\r\n # ttc_po2 = ttc_po2\r\n # elif d_po2_rest > 0:\r\n # ttc_po2 = ttc_po2 + calcT2(ay_target/2, v2, -d_po2_rest)\r\n\r\n # distance to PO3 - distance, from 0m/s2 to Ay_target with day.\r\n # if rest distance < 0 -> ttc with day is the finial ttc\r\n # if rest distance > 0 -> finial ttc = ttc with day + ttc with Ay\r\n # d_po3_rest = dtc_po3 - day_p*pow(calcTTA(), 3)/6\r\n #\r\n # if d_po3_rest <= 0:\r\n # ttc_po3 = ttc_po3\r\n # elif d_po3_rest > 0:\r\n # ttc_po3 = ttc_po3 + calcT2(ay_target/2, v2, -d_po3_rest)\r\n\r\n ttc_po2 = ttc_final(dtc_po2, ttc_po2)\r\n ttc_po3 = ttc_final(dtc_po3, ttc_po3)\r\n ttc_rd_le = ttc_final(dtc_rd_le, ttc_rd_le)\r\n ttc_rd_ri = ttc_final(dtc_rd_ri, ttc_rd_ri)\r\n\r\n # collisionRisk_long(ttc_po2, po2_dx[cyc], po2_vx[cyc])\r\n\r\n # if ttc_po2 == 0:\r\n # ttc_po2_array.append(np.nan)\r\n # else:\r\n # ttc_po2_array.append(ttc_po2)\r\n #\r\n # if ttc_po3 == 0:\r\n # ttc_po3_array.append(np.nan)\r\n # else:\r\n # ttc_po3_array.append(ttc_po3)\r\n # ego vehicle speed > 60 kph\r\n # po2 or po3 to ego vehicle edge is < 1.5 meter\r\n if (0.0039 * velEgo[cyc] - 0.0119) > velEgo_limit:\r\n near_array_total.append(po2_dy[cyc])\r\n # if ((dtc_po2 < dy_limit) & (dtc_po2 != 0)) | (\r\n # (dtc_po3 < dy_limit) & (dtc_po3 != 0)):\r\n if collisionRisk_rd(po2_existprob[cyc], roadedge_le_existprob[cyc_rd], roadedge_le_dy[cyc_rd]) or collisionRisk_rd(po3_existprob[cyc], roadedge_ri_existprob[cyc_rd], roadedge_ri_dy[cyc_rd]):\r\n near_array.append(po2_dy[cyc])\r\n\r\n if (collisionRisk_long(ttc_po2, po2_dx[cyc], po2_vx[cyc])) | (collisionRisk_long(ttc_po3, po3_dx[cyc], po3_vx[cyc])):\r\n # if ((dtc_po2 < dy_limit) & (dtc_po2 != 0) & (collisionRisk_long(ttc_po2, po2_dx[cyc], po2_vx[cyc]))) | ((dtc_po3 < dy_limit) & (dtc_po3 != 0) & (collisionRisk_long(ttc_po3, po3_dx[cyc], po3_vx[cyc]))):\r\n near_array.append(po2_dy[cyc])\r\n\r\n if collisionRisk_long(ttc_po2, po2_dx[cyc], po2_vx[cyc]):\r\n ttc_po2_array.append(ttc_po2)\r\n\r\n if collisionRisk_rd(po2_existprob[cyc], roadedge_le_existprob[cyc_rd], roadedge_le_dy[cyc_rd]):\r\n ttc_po2_array.append(ttc_rd_le)\r\n\r\n if collisionRisk_long(ttc_po3, po3_dx[cyc], po3_vx[cyc]):\r\n ttc_po3_array.append(ttc_po3)\r\n\r\n if collisionRisk_rd(po3_existprob[cyc], roadedge_ri_existprob[cyc_rd], roadedge_ri_dy[cyc_rd]):\r\n ttc_po3_array.append(ttc_rd_ri)\r\n\r\n ttc_po2_average = np.nanmean(ttc_po2_array)\r\n ttc_po3_average = np.nanmean(ttc_po3_array)\r\n\r\n ts = ts_po[2]-ts_po[1]\r\n near_time_total = (len(near_array_total)*ts)\r\n near_time = (len(near_array)*ts)\r\n near_dist = 1\r\n # get near range time\r\n # po2_dy_near = po2_dy[np.where((po2_dy < 2.5) & (po2_dy > 0))]\r\n # po3_dy_near = po3_dy[np.where((po3_dy < 2.5) & (po3_dy > 0))]\r\n # near_time = (len(po2_dy_near)+len(po3_dy_near))*0.2\r\n # get near range distance\r\n # near_dist = calcDistance(po2_dy_near, velEgo)\r\n # --------------------------------------------------------------------------------------------------------------\r\n # total distance of measurement\r\n total_dist = calcDistance(ts_po, velEgo)\r\n # total time of measurement\r\n total_time = ts_po[-1] - ts_po[0]\r\n\r\n # draw plot\r\n # fig = plt.figure()\r\n # plt.title(os.path.basename(folder))\r\n # plt.plot(ts_po, ttc_po2_array, label='ttc po2')\r\n # plt.plot(ts_po, ttc_po3_array, label='ttc po3')\r\n # plt.gca().set_ylim(0, 1)\r\n # plt.legend(loc='upper right', prop={'size': 8})\r\n # fig.savefig(os.path.join(dirpath, os.path.basename(folder) + '_PO_dy.png'))\r\n # plt.close(fig)\r\n #\r\n # fig = plt.figure()\r\n # plt.title(os.path.basename(folder))\r\n # plt.plot(ts_po, po2_dx, label='PO2 dx')\r\n # plt.plot(ts_po, po3_dx, label='PO3 dx')\r\n # plt.gca().set_ylim(0, 50)\r\n # plt.legend(loc='upper right', prop={'size': 8})\r\n # fig.savefig(os.path.join(dirpath, os.path.basename(folder) + '_PO_dx.png'))\r\n # plt.close(fig)\r\n\r\n return ts_po, near_time_total, near_time, total_dist, total_time, ttc_po2_array, ttc_po3_array\r\n\r\nif __name__ == '__main__':\r\n dirpath = 'C:/02_Meas/20200115_epl2bp/'\r\n # Raise warning if trying to run without input file\r\n if len(sys.argv) != 2:\r\n logger.warning('Input path is not specified. Default path is used: %s', dirpath)\r\n else:\r\n dirpath = sys.argv[1]\r\n logger.info('Input path: {}'.format(dirpath))\r\n pathstring = \"{}/**\"\r\n\r\n # make sure that only folders are processed, and not files in them\r\n folder_list = [folder for folder in glob.iglob(pathstring.format(dirpath), recursive=True)\r\n if os.path.isdir(folder)]\r\n\r\n total_dist_all = []\r\n total_time_all = []\r\n near_time_all = []\r\n near_time_total_all = []\r\n ttc_po2_all = []\r\n ttc_po3_all = []\r\n ttc_po_all = []\r\n\r\n for folder in folder_list:\r\n # read po target distance\r\n try:\r\n mat_po = os.path.join(folder, 'po.mat')\r\n mat_roadedge_le = os.path.join(folder, 'roadedge_left.mat')\r\n mat_roadedge_ri = os.path.join(folder, 'roadedge_right.mat')\r\n mat_speed = os.path.join(folder, 'speed.mat')\r\n if os.path.exists(mat_po) and os.path.exists(mat_roadedge_le) and os.path.exists(mat_roadedge_ri) and os.path.exists(mat_speed):\r\n logger.info('Reading {}.'.format(folder))\r\n else:\r\n logger.warning('missing po or roadedge mat file from {}. '\r\n 'Make sure that it\\'s a measurement folder. Skipping.'\r\n .format(folder))\r\n continue\r\n\r\n m_ts_po, near_time_total, m_near_time, m_total_dist, m_total_time, m_ttc_po2_array, m_ttc_po3_array = calcDistanceKPI()\r\n\r\n total_dist_all.append(m_total_dist)\r\n total_time_all.append(m_total_time)\r\n near_time_all.append(m_near_time)\r\n near_time_total_all.append(near_time_total)\r\n ttc_po2_all.extend(m_ttc_po2_array)\r\n ttc_po3_all.extend(m_ttc_po3_array)\r\n\r\n except:\r\n pass\r\n\r\n ttc_po_all.extend(ttc_po2_all)\r\n ttc_po_all.extend(ttc_po3_all)\r\n\r\n ttc_po_all = np.array(ttc_po_all)\r\n a = len(np.where(ttc_po_all <= 1)[0])\r\n b = len(np.where((ttc_po_all > 1) & (ttc_po_all <= 1.5))[0])\r\n c = len(np.where((ttc_po_all > 1.5) & (ttc_po_all <= 2))[0])\r\n d = len(np.where((ttc_po_all > 2) & (ttc_po_all <= 3))[0])\r\n e = len(np.where(ttc_po_all > 3)[0])\r\n f = len(ttc_po_all)\r\n\r\n # print(max(ttc_po_all))\r\n print(a, b, c, d, e, f)\r\n\r\n # labels = ['0-0.5s', '0.5-1s', '1-1.5s', '1.5-2s', '2-3s', '>3s']\r\n # X = [222, 42, 455, 664, 454, 334]\r\n #\r\n # fig = plt.figure()\r\n # plt.pie(X, labels=labels, autopct='%1.2f%%') # 画饼图(数据,数据对应的标签,百分数保留两位小数点)\r\n # plt.title(\"Pie chart\")\r\n #\r\n # plt.show()\r\n # plt.savefig(\"PieChart.jpg\")\r\n\r\n fig = plt.figure()\r\n plt.title(os.path.basename(folder))\r\n\r\n labels = ['<1s', '1-1.5s', '1.5-2s', '2-3s', '>3s']\r\n X = [a, b, c, d, e]\r\n explode = (0.1, 0.1, 0.02, 0.02, 0.02) # 将某一块分割出来,值越大分割出的间隙越大\r\n\r\n fig = plt.figure()\r\n plt.pie(X, labels=labels, explode=explode, autopct='%1.2f%%', pctdistance=0.8, labeldistance=1.2, startangle=30)\r\n plt.title(\"PO ttc chart\")\r\n\r\n fig.savefig(os.path.join(dirpath, 'PO_ttc.png'))\r\n plt.close(fig)\r\n\r\n print(\"---------------------------------------------------\")\r\n print(\"total distance:\", \"%.2f\" % (np.sum(total_dist_all)/1000), \"km\")\r\n print(\"total time:\", \"%.2f\" % (np.sum(total_time_all)/3600), \"hours\")\r\n print(\"high speed time (V>60kph):\", \"%.4f\" % (np.sum(near_time_total_all)/3600), \"hours\")\r\n print(\"collision risky time:\", \"%.4f\" % (np.sum(near_time_all)/3600), \"hours\")\r\n print(\"---------------------------------------------------\")\r\n print(\"high speed time in total time:\", \"%.2f\" % (np.sum(near_time_total_all)/np.sum(total_time_all)*100), \"%\")\r\n print(\"collision risky time percent in total time:\", \"%.2f\" % (np.sum(near_time_all) / np.sum(total_time_all) * 100), \"%\")\r\n print(\"collision risky time percent in high speed time:\", \"%.2f\" % (np.sum(near_time_all)/np.sum(near_time_total_all)*100), \"%\")\r\n print(\"---------------------------------------------------\")\r\n print(\"average TTC po2 (day=\", day_p, \"m/s3):\", \"%.2f\" % (np.nanmean(ttc_po2_all)), \"second\")\r\n print(\"average TTC po3 (day=\", day_p, \"m/s3):\", \"%.2f\" % (np.nanmean(ttc_po3_all)), \"second\")\r\n print(\"TTC < 1.5\", \"%.2f\" % ((a+b)/f*100), \"%\")\r\n print(\"TTC = 1.5-2\", \"%.2f\" % (c/f*100), \"%\")\r\n print(\"TTC = 2-3\", \"%.2f\" % (d/f*100), \"%\")\r\n print(\"TTC > 3\", \"%.2f\" % (e/f*100), \"%\")\r\n # fig = plt.figure()\r\n # plt.title(os.path.basename(folder))\r\n # plt.plot(range(len(ttc_po2_all)), ttc_po2_all, label='ttc po2')\r\n # plt.gca().set_ylim(0, 1)\r\n # plt.legend(loc='upper right', prop={'size': 8})\r\n # fig.savefig(os.path.join(dirpath, os.path.basename(folder) + '_PO2_ttc.png'))\r\n # plt.close(fig)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"hf_val_roadedge.py","file_name":"hf_val_roadedge.py","file_ext":"py","file_size_in_byte":20506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537239645","text":"import urllib.request\nimport gridfs\nimport os\nimport hashlib\nimport ssdeep\nimport pefile\nimport json\nimport requests\nimport mimetypes\nfrom init_database import *\nfrom init_connection import *\nfrom virustotal_python import Virustotal\n\nfpath = 'report/'\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef get_md5(fname):\n \"\"\"Get malwate MD5\n Params:\n -fname\n Returns:\n - rslt\n \"\"\"\n\n hash_md5 = hashlib.md5()\n with open(fpath + fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n rslt = hash_md5.hexdigest()\n return rslt\n\ndef get_sha256(fname):\n \"\"\"Get malware sha256\n Params:\n -fname\n Returns:\n - rslt\n \"\"\"\n\n hash_sha256 = hashlib.sha256()\n with open(fpath + fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_sha256.update(chunk)\n rslt = hash_sha256.hexdigest()\n return rslt\n\ndef get_sha1(fname):\n \"\"\"Get malware sha1\n Params:\n -fname\n Returns:\n - rslt\n \"\"\"\n\n hash_sha1 = hashlib.sha1()\n with open(fpath + fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_sha1.update(chunk)\n rslt = hash_sha1.hexdigest()\n return rslt\n\ndef get_ssdeep(fname):\n \"\"\"Get malware ssdeep\n Params:\n -fname\n Returns:\n - rslt\n \"\"\"\n\n hash_ssdeep = ssdeep.Hash()\n with open(fpath + fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_ssdeep.update(chunk)\n rslt = hash_ssdeep.digest()\n return rslt\n\ndef get_imphash(fname):\n \"\"\"If the malwre is an exec get the impash\n Params:\n -fname, db\n Returns:\n - imps\n \"\"\"\n try:\n imps = []\n p = pefile.PE(fpath + fname)\n imphash = p.get_imphash()\n imps.append(imphash)\n return str(imps)\n except:\n return \"can't calculate the imphash\"\n\ndef is_exe(tags):\n \"\"\"If the malwre is an exec\n Params:\n -tags (source csv urlhaus)\n Returns:\n - boolean\n \"\"\"\n if \"exe\" in tags:\n return True\n return False\n\ndef file_from_db_to_hd(db, fname):\n fs = gridfs.GridFSBucket(db)\n file = open(fpath + fname, 'wb+')\n fs.download_to_stream_by_name(fname, file)\n\n\"\"\"rapport virustotal en json, dump\"\"\"\ndef vt_to_json(vt, fname):\n with open(ftpath + fname, \"w\") as outfile:\n json.dump(vt, outfile)\n outfile.close()\n\n\ndef connect_gridfs(db):\n fs = gridfs.GridFSBucket(db)\n return fs\n\ndef print_gridfs(db):\n \"\"\" Afficher la liste des fichiers dans la bdd\n Params:\n -fname\n Returns:\n - rslt\n \"\"\"\n\n print(\"\\n********************\")\n print(\"****PRINT GRIDFS**** \")\n print(\"********************\")\n fs = gridfs.GridFSBucket(db)\n for gridout in fs.find():\n print(\"\\n_id: \" + str(gridout._id) + \"\\nfilename: \" + str(gridout.filename) + \"\\ncontent_type: \" + str(gridout.contentType))\n print(\"********************\")\n print(\"**END PRINT GRIDFS**\")\n print(\"********************\")\n\ndef count_gridfs(db):\n \"\"\" Summary of procesed files in database\n Params:\n -db\n Returns:\n - Print stats from the dabase\n \"\"\"\n print(\"\\n********************\")\n print(\"***COUNT GRIDFS***\")\n print(\"********************\")\n fs = gridfs.GridFSBucket(db)\n count = 0\n for gridout in fs.find():\n count = count + 1\n print(\"\\n--->\" + str(count) + \" items in GriFS\\n\")\n\ndef get_file_from_gridfs(db, id_file):\n \"\"\" Get file from gridfs\n Params:\n -db, id_file\n Returns:\n - \n \"\"\"\n fs = gridfs.GridFS(db)\n f = fs.find_one({ \"filename\" : id_file })\n return f\n\ndef id_malware_exist(db, id):\n \"\"\" Check if the malware is already the database\n Params:\n -db,id\n Returns:\n - boolean\n \"\"\"\n\n\n count = db.malware_collection.count( { \"id_malware\": id }) \n if count == 1:\n return True\n elif count == 0:\n return False\n else:\n print(\"Database is corrupted\")\n return False\n\ndef print_database(db):\n\n \"\"\" List elements present in the database\n Params:\n -db\n Returns:\n - \n \"\"\"\n print(\"\\n********************\")\n print(\"***PRINT MONGO DB***\") \n print(\"********************\")\n\n collection = db['malware_collection']\n cursor = collection.find({})\n print(\"\\n*** MALWARE_COLLECTION ***\") \n for document in cursor:\n for x in document:\n print(x + ':', document[x])\n print(\"\\n\")\n print(\"\\n\")\n\n collection = db['vt_collection']\n cursor = collection.find({})\n print(\"\\n*** VIRUSTOTAL COLLECTION ***\") \n for document in cursor:\n for x in document:\n print(x + ':', document[x])\n print(\"\\n\")\n print(\"\\n\")\n \n collection = db['ip_collection']\n cursor = collection.find({})\n print(\"\\n*** IP COLLECTION ***\") \n for document in cursor:\n for x in document:\n print(x + ':', document[x])\n print(\"\\n\")\n print(\"\\n\")\n\n collection = db['label_collection']\n cursor = collection.find({})\n print(\"\\n*** LABEL COLLECTION ***\") \n for document in cursor:\n for x in document:\n print(x + ':', document[x])\n print(\"\\n\")\n print(\"\\n\")\n\n print(\"********************\")\n print(\"*END PRINT MONGO DB*\")\n print(\"********************\")\n\ndef count_database(db):\n \"\"\" Summary of processed files in database\n Params:\n -db\n Returns:\n - \n \"\"\"\n print(\"********************\")\n print(\"***COUNT MONGO DB***\")\n print(\"********************\")\n count_malware_collection = db.malware_collection.count()\n count_vt_collection = db.vt_collection.count()\n count_label_collection = db.label_collection.count()\n count_ip_collection = db.ip_collection.count()\n print(\" ---> \" + str(count_malware_collection) + \" malwares in database /// \")\n print(\" ---> \" + str(count_vt_collection) + \" virustotal report in database /// \")\n print(\" ---> \" + str(count_label_collection) + \" labels in database /// \")\n print(\" ---> \" + str(count_ip_collection) + \" ip's in database /// \")\n\ndef main():\n init_mongo();\n db = connect_database()\n erase_gridfs(db);\n request = init_connection_urlhaus()\n parse_csv_urlhaus(db, request)\n \n print_gridfs(db)\n print_database(db)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91314153","text":"import numpy as np\nfrom IPython.core.debugger import set_trace\nimport math\n\nclass SumTree():\n # capacity should be power of two\n def __init__(self, capacity):\n assert math.log(capacity, 2).is_integer()\n self.capacity = capacity\n self.tree = np.zeros(2 * capacity - 1)\n self.data = np.zeros(capacity, dtype='object')\n self.nextIdx = 0\n \n def insert(self, val, data):\n self.data[self.nextIdx] = data\n treeIdx = self.capacity - 1 + self.nextIdx\n change = val - self.tree[treeIdx]\n while treeIdx > 0:\n self.tree[treeIdx] += change \n treeIdx = (treeIdx - 1 )// 2\n self.tree[treeIdx] += change\n \n assert not math.isnan(self.tree[0])\n self.nextIdx = (self.nextIdx + 1) % self.capacity\n\n def update(self, idx, val):\n treeIdx = self.capacity - 1 + idx\n change = val - self.tree[treeIdx]\n while treeIdx > 0:\n self.tree[treeIdx] += change \n treeIdx = (treeIdx - 1 ) // 2\n self.tree[treeIdx] += change\n \n def find_val_idx(self,val):\n i = 0\n# set_trace()\n while True:\n l = 2 * i + 1\n r = 2 * i + 2\n if val <= self.tree[l]:\n i = l\n else:\n i = r\n val -= self.tree[l]\n if i >= self.capacity - 1:\n break\n return i - self.capacity + 1\n\n def get_val(self, idx):\n return self.tree[self.capacity - 1 + idx]\n\n @property\n def total(self):\n assert not math.isnan(self.tree[0])\n return self.tree[0]\n","sub_path":"sum_tree.py","file_name":"sum_tree.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586694448","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 30 19:02:14 2020\n\n@author: SUBHAM\n\"\"\"\n\nfor num in range(1,1001):\n order = len(str(num))\n sum = 0\n temp = num\n while temp > 0:\n digit = temp % 10\n sum += digit ** order\n temp //= 10\n if num == sum:\n print(num)","sub_path":"python assignment 3/armstrong in range 1 to 1000.py","file_name":"armstrong in range 1 to 1000.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408284124","text":"from repertory.models import *\nfrom django.contrib import admin\n\nclass SuiteInterpretationInline(admin.StackedInline):\n model = SuiteInterpretation\n extra = 0\n\nclass DanceInterpretationInline(admin.StackedInline):\n model = DanceInterpretation\n extra = 0\n\nclass MusicFileInline(admin.TabularInline):\n model = MusicFile\n extra = 0\n\nclass DanceAdmin(admin.ModelAdmin):\n list_filter = ('composer', 'tags', 'choreographer')\n list_display = ('__unicode__', 'other_titles', 'choreographer', 'get_tags', 'has_description', 'interp_count', 'has_music_file', 'notes')\n inlines = [DanceInterpretationInline, MusicFileInline]\n filter_horizontal = ('tags',)\n\nclass DanceSuiteAdmin(admin.ModelAdmin):\n inlines = [SuiteInterpretationInline]\n list_display = ('__unicode__', 'dance_count', 'has_description', 'interp_count')\n\nadmin.site.register(Dance, DanceAdmin)\nadmin.site.register(Composer)\nadmin.site.register(Tag)\nadmin.site.register(DanceSuite, DanceSuiteAdmin)\n\n","sub_path":"repertory/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186660098","text":"# https://atcoder.jp/contests/nikkei2019-qual/submissions/13677554\n# C - Different Strokes\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n n = int(input())\n AB = [list(map(int, input().split())) for _ in range(n)]\n for i in range(n):\n a, b = AB[i]\n AB[i] = [a + b, a, b]\n AB.sort(key=lambda x: x[0], reverse=True)\n\n score_t = sum(AB[i][1] for i in range(n) if i % 2 == 0)\n score_a = sum(AB[i][2] for i in range(n) if i % 2 != 0)\n\n print(score_t - score_a)\n\n\nif __name__ == '__main__':\n resolve()\n","sub_path":"ARC/NIKKEI_2019_Qual/NIKKEI_2019_Qual-C.py","file_name":"NIKKEI_2019_Qual-C.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351931843","text":"from agents import ExpectiMaxAgent\r\nfrom game import *\r\nimport numpy as np\r\n\r\nGAME_SIZE = 4\r\nSCORE_TO_WIN = 2048\r\neposide = 4000\r\ngame_train = Game(size=GAME_SIZE, score_to_win=SCORE_TO_WIN)\r\nagent = ExpectiMaxAgent(game_train)\r\ntxt_dir = \"./dataset2/data0.txt\"\r\nindex = 0\r\nfile = open(txt_dir, mode='w')\r\nfor ep in range(eposide):\r\n _ = game_train.reset()\r\n if ep % 10 == 0 and ep != 0:\r\n index += 1\r\n txt_dir = \"./dataset2/data\" + str(index) + \".txt\"\r\n file.close()\r\n file = open(txt_dir, mode='w')\r\n while game_train.end == 0:\r\n state = game_train.board\r\n max_score = np.max(state)\r\n state_print = np.reshape(state, [1, 16]).squeeze()\r\n action = agent.step()\r\n game_train.move(action)\r\n for _ in range(4):\r\n print(state_print, \" \", action, file=file)\r\n state = np.rot90(state)\r\n action = (action + 1) % 4\r\n state_print = np.reshape(state, [1, 16]).squeeze()\r\n \"\"\"\r\n if max_score >= 16:\r\n max_score = 1024 / max_score\r\n else:\r\n max_score = 64\r\n while max_score != 0:\r\n print(state, \" \", action, file=file)\r\n max_score -= 1\r\n \"\"\"\r\n\r\n\r\n","sub_path":"game2048/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282711078","text":"#!/usr/bin/python\n# encoding=utf8\nimport numpy as np\nimport unittest\nfrom numpy import linalg as LA\n\nclass TestMath(unittest.TestCase):\n\n def test_01(self):\n # eig函数用来求方阵的特征值和特征向量\n w, v = LA.eig(np.diag((1, 2, 3)))\n print(w)\n self.assertEqual(w, [1.,2.,3.])\n self.ass\n\n #print(w, v)\n\n a = np.array([[1, 2], [3, 4]])\n w, v = LA.eig(a)\n #self.assertEqual(w, 2)\n #self.assertEqual(v, [2])\n\n def test_02(self):\n #求行列式的值\n a = np.array([[1, 2], [3, 4]])\n self.assertEqual(round(LA.det(a), 0), -2)\n\n b = np.array([[1, -3, 3], [3, -5, 3], [6, -5, 4]]);\n self.assertEqual(round(LA.det(b), 0), 22.0)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"basic/languages/python-19910220/third-party/numpy-reference-1.13/03-routines/linear-algebra.py","file_name":"linear-algebra.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"402286100","text":"#!/usr/bin/env python3\n\nimport os\nimport time, random\nfrom multiprocessing import Process, Pool\n\n\ndef run_proc(name):\n print('Run child process %s (%s)...' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s(%s)...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s runs %0.2f seconds.' % (name, (end - start)))\n\n\ndef create_single():\n print('Parent process %s.' % os.getpid())\n p = Process(target=run_proc, args=('test',))\n print('Children process will start.')\n p.start()\n p.join()\n print('Children process end.')\n\n\ndef create_multi():\n print('Parent process %s.' % os.getpgid())\n p = Pool(4)\n for i in range(5):\n p.apply_async(long_time_task, args=(i,))\n print('Waiting for all suppresses done...')\n p.close()\n p.join()\n print('All subprocess done.')\n\n\nif __name__ == '__main__':\n create_single()\n","sub_path":"explore/progress_demo.py","file_name":"progress_demo.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97348597","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 13:55:07 2018\n\n@author: dom0015\n\"\"\"\n\nimport numpy as np\nimport numpy.random\nimport scipy.stats\n\ndef lhs_norm(d,n):\n R=numpy.random.uniform(size=[n,d])\n P=np.zeros([n,d])\n \n for i in range(d):\n P[:,i] = numpy.random.permutation(n)\n \n return (P+R)/n\n\n\nprint(lhs_uniform(2,5))","sub_path":"other_files/odpad/Bayes_working_example/lhs_uniform.py","file_name":"lhs_uniform.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267241122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 8 11:37:23 2020\n\n@author: santi\n\"\"\"\ndef lista_primos(n): \n \n # Create a boolean array \"prime[0..n]\" and \n # initialize all entries it as true. A value \n # in prime[i] will finally be false if i is \n # Not a prime, else true. \n prime = [True for i in range(n+1)] \n \n p = 2\n while(p * p <= n): \n \n # If prime[p] is not changed, then it is \n # a prime \n if (prime[p] == True): \n \n # Update all multiples of p \n for i in range(p * 2, n + 1, p): \n prime[i] = False\n p += 1\n lista = []\n \n # Print all prime numbers \n for p in range(2, n): \n if prime[p]: \n lista.append(p)\n return lista ","sub_path":"lista_primos.py","file_name":"lista_primos.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"236957241","text":"from typing import Awaitable, Callable, Optional, Union\nimport justpy as jp\nfrom .float_element import FloatElement\n\nclass Slider(FloatElement):\n\n def __init__(self,\n *,\n min: float,\n max: float,\n step: float = 1,\n value: float = None,\n on_change: Optional[Union[Callable, Awaitable]] = None,\n ):\n \"\"\"Slider Element\n\n :param min: lower bound of the slider\n :param max: upper bound of the slider\n :param step: step size\n :param value: inital value to set position of the slider\n :param on_change: callback which is invoked when the user releases the slider\n \"\"\"\n view = jp.QSlider(min=min, max=max, step=step, change=self.handle_change)\n\n super().__init__(view, value=value, on_change=on_change)\n","sub_path":"nicegui/elements/slider.py","file_name":"slider.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465476255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 21:01:35 2018\n\n@author: 45027374\n\"\"\"\n \nimport numpy as np\nimport struct as st\nfrom astropy.io import fits \n#import pandas as pd \nimport os\n\n \n#wind path\ndatastore_path=\"c:/cloudstor/datastore/SPIE2018/\"\n\n\n#mac path\n#datastore_path=\"/Users/wapr/PhD/datastore/SPIE2018/\"\n\n\n#run_dir=\"Run4-20180531-photon-noise-neg40c/\"\n#run_dir=\"Run5-20180602-neg40c/\"\n#run_dir=\"Run6-20180602-neg20c/\"\n#run_dir=\"Run8-20180713-neg60c/\"\nrun_dir=\"Run9-20180713-photon-noise-neg60c/\"\n\ndef open_file(file):\n \n \n with open(datastore_path + run_dir + file, \"rb\") as binary_file:\n # Read the whole file at once\n data = binary_file.read()\n \n return data\n\n\ndef read_header(data):\n\n header={\n 'ByteSwap':int.from_bytes([data[0],data[1]],byteorder='little',signed='true'),\n 'Xsize':int.from_bytes([data[2],data[3]],byteorder='little'),\n 'Ysize':int.from_bytes([data[4],data[5]],byteorder='little'),\n 'BytesPerPixel':int.from_bytes([data[6],data[7]],byteorder='little'),\n 'RowColOrder':int.from_bytes([data[8],data[9]],byteorder='little'),\n 'Yorigin':int.from_bytes([data[10],data[11]],byteorder='little'),\n 'Year':int.from_bytes([data[12],data[13]],byteorder='little'),\n 'Month':int.from_bytes([data[14],data[15]],byteorder='little'),\n 'Day':int.from_bytes([data[16],data[17]],byteorder='little'),\n 'Hour':int.from_bytes([data[18],data[19]],byteorder='little'),\n 'Minute':int.from_bytes([data[20],data[21]],byteorder='little'),\n 'Second':int.from_bytes([data[22],data[23]],byteorder='little'),\n 'IntensityCold':int.from_bytes([data[24],data[25]],byteorder='little',signed='true'), \n 'TemperatureCold': '%.1f' % st.unpack('> 12) == 0b0100:\n self.type = TypeOfFile.DIR\n else:\n self.type = TypeOfFile.OTHER\n\n def path(self):\n \"\"\"\n Get the path of the file from the backup root folder\n :return: the path\n \"\"\"\n if self.dir == \"\":\n path = self.name\n else:\n path = self.dir + \"/\" + self.name\n return path\n\n def meta_attrs(self):\n \"\"\"\n Get attributes in ISFTPFile format\n :return: the attribute dict\n \"\"\"\n permissions = self.permissions\n if self.type is TypeOfFile.DIR:\n permissions += 4096 * 4\n else:\n permissions += 4096 * 8\n\n return {\n \"uid\": self.uid,\n \"gid\": self.gid,\n \"atime\": self.creation_time,\n \"mtime\": self.modification_time,\n \"permissions\": permissions,\n }\n\n def db_id(self):\n \"\"\"\n The path in database format \",\"\n :return: the path\n \"\"\"\n return self.path().replace(\"/\", \",\")\n\n def db_representation(self):\n \"\"\"\n Interface from File object to database\n :return: the dictionary to give to db\n \"\"\"\n return {\n \"_id\": self.db_id(),\n \"size\": self.size,\n \"uid\": self.uid,\n \"gid\": self.gid,\n \"creation_time\": self.creation_time,\n \"modification_time\": self.modification_time,\n \"permissions\": self.permissions,\n \"type\": self.type.value\n }\n\n @staticmethod\n def import_db_file(file_db):\n \"\"\"\n Interface from database to a File object\n :param file_db:\n :return: resulting File object\n \"\"\"\n path = file_db[\"_id\"].rsplit(',', 1)\n\n if len(path) == 1:\n directory = \"\"\n name = file_db[\"_id\"]\n else:\n directory = path[0].replace(',', '/')\n name = path[1]\n\n file = File(directory, name)\n\n file.size = file_db[\"size\"]\n file.uid = file_db[\"uid\"]\n file.gid = file_db[\"gid\"]\n file.permissions = file_db[\"permissions\"]\n file.creation_time = file_db[\"creation_time\"]\n file.modification_time = file_db[\"modification_time\"]\n file.type = TypeOfFile(file_db[\"type\"])\n\n return file\n\n def __str__(self):\n return \"{} ({}) at {}\".format(self.name, self.type.name, self.dir)\n\n def __repr__(self):\n return self.__str__()\n","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186189081","text":"import cv2\nimport threading\n\n\nclass CamThread(threading.Thread):\n def __init__(self, PreviewName, CamId):\n threading.Thread.__init__(self)\n self.PreviewName = PreviewName\n self.CamId = CamId\n\n def run(self):\n print('Starting' + self.PreviewName)\n CamPreview(self.PreviewName, self.CamId)\n\n\n\ndef CamPreview(PreviewName, CamId, x=0):\n #cv2.namedWindow(PreviewName)\n cam = cv2.VideoCapture(CamId)\n if cam.isOpened():\n rval, frame = cam.read()\n else:\n rval = False\n\n while rval:\n rval, frame = cam.read()\n #cv2.imshow(PreviewName, frame)\n\n cv2.imwrite('captura/' + str(PreviewName) + ' Frame' + str(x) + '.jpg', frame)\n x += 1\n\n key = cv2.waitKey(1000)\n if key == 27: # saída com ESC\n break\n cv2.destroyWindow(PreviewName)\n\n\n# Cria as threads\n\nthread1 = CamThread(\"Camera 1\", 1)\nthread2 = CamThread(\"Camera 2\", 2)\nthread3 = CamThread(\"Camera 3\", 3)\nthread4 = CamThread(\"Camera 4\", 4)\nthread1.start()\nthread2.start()\nthread3.start()\nthread4.start()\n","sub_path":"teste_captura_n_abre.py","file_name":"teste_captura_n_abre.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70190525","text":"from mrjob.job import MRJob\r\n\r\n\r\nclass MRMinTemperature(MRJob):\r\n \"\"\"\r\n Remember to set the data in the python runner\r\n 1st arg. ../Data/temperatures/1800.csv\r\n \"\"\"\r\n\r\n def mapper(self, _, line):\r\n (location, _, type, data, *_) = line.split(',')\r\n if type == 'TMIN':\r\n temperature = data\r\n yield location, temperature\r\n\r\n def reducer(self, location, temps):\r\n yield location, \"{}C\".format(min(temps))\r\n\r\n\r\nif __name__ == '__main__':\r\n MRMinTemperature.run()\r\n","sub_path":"Temperature/min_temperatures.py","file_name":"min_temperatures.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612586746","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys, os\nimport time\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\nclass RequestPlugin(object):\n\n def __str__(self):\n x=[]\n if self.title:\n x.append('title={0}'.format(self.title))\n if self.epstrat:\n x.append('epstrat={0}'.format(self.epstrat))\n if self.epend:\n x.append('epend={0}'.format(self.epend)) \n return ' '.join(x)\n\n def __init__(self,title=None, epstrat=None, epend=None):\n self.title=title\n self.epstrat=epstrat\n self.epend=epend\n \n \n \nclass ResponsePlugin(object):\n \n def __str__(self):\n x=[]\n if self.title:\n x.append('title={0}'.format(self.title))\n if self.link:\n x.append('link={0}'.format(self.link))\n if self.episode:\n x.append('episode={0}'.format(self.episode))\n return ' '.join(x)\n \n def __init__(self, title=None, link=None,episode=None):\n self.title = title\n self.link=link\n self.episode=episode\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"airtrap/beans/.~c9_invoke_iPFZ1q.py","file_name":".~c9_invoke_iPFZ1q.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312471105","text":"import salabim as sim\n\ndef animation():\n env.animation_parameters(synced=False, modelname='Job shop', background_color='20%gray')\n \n max_len = 0\n for i, group in enumerate(groups):\n x = i * 70 + 100 + 2\n y = env.height() - 140 + 20\n sim.AnimateText(text=group.name(), x=x, y=y, text_anchor='sw', fontsize=12)\n for j, machine in enumerate(group.machines):\n sim.AnimateRectangle(spec=(0,0,60,12), x=x, y=y - 20 - j * 15,\n fillcolor=lambda machine, t: machine.group.color if machine.task else (machine.group.color,100),\n textcolor='white',\n text=lambda machine, t: machine.task.name() if machine.task else '',\n arg=machine)\n \n max_len = max(max_len, len(group.machines))\n \n env.y_top = y - max_len * 15 - 15\n sim.AnimateLine(spec=(0, env.y_top, 2000, env.y_top))\n sim.AnimateText(text='job', x=50, y=env.y_top - 15, text_anchor='ne', fontsize=12)\n sim.AnimateText(text='slack', x=90, y=env.y_top - 15, text_anchor='ne', fontsize=12)\n\nclass Group(sim.Component):\n def setup(self, job_select_method, fraction, number_of_machines, color):\n if job_select_method.lower() == 'fifo':\n self.job_select = self.job_select_fifo\n elif job_select_method.lower() == 'min_slack':\n self.job_select = self.job_select_min_slack\n else:\n raise AssertionError('wrong selection_method:', job_select_method)\n self.machines = [Machine(group=self, name=self.name() + '.') for _ in range(number_of_machines)]\n\n self.fraction = fraction\n self.color = color\n self.jobs = sim.Queue(self.name() + '.jobs')\n self.idle_machines = sim.Queue(self.name() + '.idle_machines')\n\n def job_select_fifo(self):\n return self.jobs.head()\n\n def job_select_min_slack(self):\n return min(self.jobs, key=lambda job: job.slack_t(env.now()))\n\n\nclass Machine(sim.Component):\n def setup(self, group):\n self.group = group\n self.task = None\n\n def process(self):\n while True:\n self.task = None\n self.enter(self.group.idle_machines)\n while not self.group.jobs: # use while instead of if, to avoid any problems with multiple activates\n yield self.passivate()\n self.leave(self.group.idle_machines)\n job = self.group.job_select()\n job.slack -= (env.now() - job.enter_time(self.group.jobs))\n job.leave(self.group.jobs)\n self.task = job.tasks.head()\n self.task.machine = self\n self.task.start_execution = env.now()\n yield self.hold(self.task.duration)\n self.task.leave(job.tasks)\n self.task.an_bar.remove()\n\n if job.tasks:\n task1 = job.tasks.head()\n job.enter(task1.group.jobs)\n if task1.group.idle_machines:\n task1.group.idle_machines.head().activate()\n else:\n for ao in (job.an_due, job.an_slack, job.an_label, job.an_execute, job.an_tasks):\n ao.remove()\n job.leave(plant)\n\n\nclass JobGenerator(sim.Component):\n def setup(self, inter_arrival_time_dist, number_of_tasks_dist, group_dist, duration_dist):\n self.inter_arrival_time_dist = sim.Exponential(8)\n self.number_of_tasks_dist = sim.IntUniform(1, 9)\n self.group_dist = group_dist\n self.duration_dist = duration_dist\n\n def process(self):\n while True:\n yield self.hold(self.inter_arrival_time_dist())\n Job(job_generator=self)\n\n\nclass Job(sim.Component):\n def setup(self, job_generator):\n self.tasks = sim.Queue(fill=[Task(job_generator=job_generator, job=self,\n name='Task ' + str(self.sequence_number()) + '.')\n for _ in range(job_generator.number_of_tasks_dist())], name='tasks.')\n self.task_in_execution = sim.Queue(name='task_in_execution.')\n self.slack = start_slack\n self.an_slack = sim.AnimateText(x=90, y=self.y,\n text=lambda job,t:'{:7.2f}'.format(job.slack_t(t)),\n textcolor=lambda job,t: 'white' if job.slack_t(t)<0 else '50%gray',\n fontsize=12, text_anchor='se', arg=self)\n self.an_label = sim.AnimateText(text=str(self.sequence_number()),\n x=50, y=self.y,\n fontsize=12, text_anchor='se')\n self.an_execute = sim.AnimateRectangle(spec=(0,0,72,12),\n x=100, y=self.y,\n fillcolor=lambda job, t: '' if job.tasks[0].start_execution is None else job.tasks[0].group.color,\n text=lambda job,t: '' if job.tasks[0].start_execution is None else job.tasks[0].machine.name(),\n textcolor='white', text_anchor='sw', arg=self)\n self.an_due = sim.AnimateLine(spec=(0,-1, 0, 13),\n x=lambda job,t: 200 + job.due_t(t) * scale_x, y=self.y,\n layer = -1,\n arg=self)\n self.an_tasks = sim.AnimateQueue(queue=self.tasks,\n x=200,\n y = self.y,\n direction='e',\n arg=self)\n self.enter(self.tasks[0].group.jobs)\n if self.tasks.head().group.idle_machines:\n self.tasks.head().group.idle_machines.head().activate()\n self.enter(plant)\n\n def y(self, t):\n return env.y_top - 45 - self.index(plant) * 15\n \n def due_t(self, t):\n due_t = self.slack_t(t)\n for task in self.tasks:\n if task.start_execution is None:\n due_t += task.duration\n else:\n due_t += task.duration - (t - task.start_execution)\n return due_t \n \n def slack_t(self, t):\n task1 = self.tasks.head()\n\n if self in task1.group.jobs:\n return self.slack - (t - self.enter_time(task1.group.jobs))\n else:\n return self.slack\n\nclass Task(sim.Component):\n def setup(self, job_generator, job):\n self.group = job_generator.group_dist()\n self.duration = job_generator.duration_dist()\n self.start_execution = None\n self.an_bar = sim.Animate(rectangle0=(0, 0, 0, 0))\n \n def animation_objects(self):\n ao0 = sim.AnimateRectangle(\n spec=lambda task,t: (0,0, (task.duration - (0 if task.start_execution is None else (t-task.start_execution))) * scale_x, 12),\n fillcolor=lambda task, t: (task.group.color, 80) if task.start_execution is None else task.group.color,\n arg=self)\n return lambda task,t: (task.duration - (0 if task.start_execution is None else (t-task.start_execution))) * scale_x, 0, ao0\n \n\nsim.reset()\nenv = sim.Environment(trace=False)\n\ngroups = []\nwith sim.ItemFile('job shop.txt') as f:\n job_select_method = f.read_item()\n\n while True:\n name = f.read_item()\n if name == '//':\n break\n number_of_machines = f.read_item_int()\n fraction = f.read_item_float()\n color = f.read_item()\n groups.append(Group(name=name, job_select_method=job_select_method,\n fraction=fraction, number_of_machines=number_of_machines, color=color))\n\n duration_dist = sim.Distribution(f.read_item())\n inter_arrival_time_dist = sim.Distribution(f.read_item())\n number_of_tasks_dist = sim.Distribution(f.read_item())\n start_slack = f.read_item_float()\n\nplant = sim.Queue('plant')\n\ngroup_dist = sim.Pdf(groups, probabilities=[group.fraction for group in groups])\n\nJobGenerator(inter_arrival_time_dist=inter_arrival_time_dist, number_of_tasks_dist=number_of_tasks_dist,\n group_dist=group_dist, duration_dist=duration_dist)\n\nscale_x = 1\n\nanimation()\nenv.run(100000)\n\nplant.print_statistics()\nplant.print_info()\n","sub_path":"Job shop animated.py","file_name":"Job shop animated.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606231444","text":"import json\n\ndef main():\n logFile = open('sampleLogFile.txt','r')\n #print('logFile is :',logFile)\n logFileString = str(logFile)\n #print('logFileString is =',logFileString)\n i=0\n for line in logFile:\n i=i+1\n print('i =',i)\n #print(line.split()[5:])\n logJsonArr = line.split()[5:]\n JsonStr = \" \".join(logJsonArr)\n #print(JsonStr)\n if len(JsonStr)>0 and JsonStr[0]=='{':\n logJson = json.loads(JsonStr)\n print(logJson)\n print(logJson['Status'])\n\n #regex = '{/S*}'\n # regex = r\"\\s([{\\[].*?[}\\]])$\"\n # match = re.findall(regex,logFileString)\n # print(match)\n # print('--------------------')\n # regex2 = '[{\\[].*?[}\\]]'\n # match = re.findall(regex,logFileString)\n # print(match)\n # logJSONArr = logFileString.split(\" \")\n #\n # #JSONStr = \" \".join(logJSONArr)\n #\n # #logJSON = json.loads(JSONStr)\n # print(logJSONArr)\n # for i in logJSONArr:\n # print(i)\n\n\n\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"regexLogFileSearch.py","file_name":"regexLogFileSearch.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566086056","text":"#Tic Tac Toe project with GUI because im bored\nimport sys\nimport random\nfrom tkinter import *\nfrom tkinter import messagebox\n\n#GUI stuff\nroot = Tk()\nroot.title(\"Tic Tac Toe\")\n\n#Some global variables, may fix later(probably not)\n###################################################\nplayers_turn = False\ngame_over = False\ncpu = 'X' #Default cpu starts\nplayer = 'O'\nboard = []\n###################################################\n\n#Functions\n\ndef initialize_game(player_starts):\n\t#initializes the buttons of the game-grid, the grid , who starts\n\n\tglobal b1, b2, b3, b4, b5, b6, b7, b8, b9, b10\n\tglobal players_turn, cpu, player\n\n\t#Set who starts the game\n\tif player_starts:\n\t\tplayer = 'X'\n\t\tcpu = 'O'\n\t\tplayers_turn = True\n\telse:\n\t\tcpu = 'X'\n\t\tplayer = 'O'\n\t\tplayers_turn = False\n\n\t#build board, a matrix pararell to the gui grid\n\tboard = create_board()\n\n\t#Build buttons (9 buttons for the tic tac toe board and one to tell the cpu to play)\n\tb1 = Button(root, text= ' ' , font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b1))\n\tb2 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b2))\n\tb3 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b3))\n\n\tb4 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b4))\n\tb5 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b5))\n\tb6 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b6))\n\n\tb7 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b7))\n\tb8 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b8))\n\tb9 = Button(root, text= ' ', font=(\"Helvetica\", 20), height=3, width=6, bg=\"SystemButtonFace\", command=lambda: b_click(b9))\n\n\tb10 = Button(root, text= \"CPU PLAY!\", font=(\"Helvetica\", 20), height=3, width=10, bg=\"SystemButtonFace\", command=lambda: b_click(b10))\n\n\t#Grid our buttons to the screen\n\tb1.grid(row=0, column=0)\n\tb2.grid(row=0, column=1)\n\tb3.grid(row=0, column=2)\n\n\tb4.grid(row=1, column=0)\n\tb5.grid(row=1, column=1)\n\tb6.grid(row=1, column=2)\n\n\tb7.grid(row=2, column=0)\n\tb8.grid(row=2, column=1)\n\tb9.grid(row=2, column=2)\n\n\tb10.grid(row=1, column=3)\n\ndef disable_all_buttons():\n\tb1.config(state=DISABLED)\n\tb2.config(state=DISABLED)\n\tb3.config(state=DISABLED)\n\tb4.config(state=DISABLED)\n\tb5.config(state=DISABLED)\n\tb6.config(state=DISABLED)\n\tb7.config(state=DISABLED)\n\tb8.config(state=DISABLED)\n\tb9.config(state=DISABLED)\n\tb10.config(state=DISABLED)\n\n\ndef b_click(button):\n\t#function that decides what happens every time a button is clicked\n\n\tglobal players_turn, player, cpu, board, game_over\n\n\tif button[\"text\"] == ' ' and players_turn == True: #checks for valid player move, and checks the state of the game (win or tie) afterwards\n\t\tbutton[\"text\"] = player\n\t\tplayers_turn = False\n\t\tboard = copy_grid(board)\n\n\t\tif is_winner(player,board):\n\t\t\tgame_over = True\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"Player won the game\\n\")\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"CPU mission failed, we'll get em next time\\n\")\n\t\t\tdisable_all_buttons()\n\t\t\treturn\n\n\t\tif board_full(board):\n\t\t\tgame_over = True\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"Game ends as a tie\\n\")\n\t\t\tdisable_all_buttons()\n\t\t\treturn\n\n\telif button[\"text\"] == \"CPU PLAY!\" and players_turn == False: #checks for valid computer move, and checks the state of the game (win or tie) afterwards\n\t\tboard = cpu_make_move(cpu, player, board)\n\t\tplayers_turn = True\n\n\t\tif is_winner(cpu,board):\n\t\t\tgame_over = True\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"CPU won the game\\n\")\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"Player mission failed, we'll get em next time\\n\")\n\t\t\tdisable_all_buttons()\n\t\t\treturn\n\n\t\tif board_full(board):\n\t\t\tgame_over = True\n\t\t\tmessagebox.showinfo(\"Tic Tac Toe\", \"Game ends as a tie\\n\")\n\t\t\tdisable_all_buttons()\n\t\t\treturn\n\n\telif button[\"text\"] != \"CPU PLAY!\" and players_turn == True and button[\"text\"] != ' ':\n\t\tmessagebox.showerror(\"Tic Tac Toe\", \"Square already taken, make another move\\n\")\n\n\telif button[\"text\"] == \"CPU PLAY!\" and players_turn == True:\n\t\tmessagebox.showerror(\"Tic Tac Toe\", \"Not cpu turn, stop trying to crash my game\\n\")\n\n\telif button[\"text\"] != \"CPU PLAY\" and players_turn == False:\n\t\tmessagebox.showerror(\"Tic Tac Toe\", \"Cpu turn to play, stop trying to crash my game\\n\")\n\ndef create_board():\n\t#creates board to be used pararell to the gui grid (the board will be an exact copy of the grid at all times)\n\tglobal board\n\tboard = []\n\tfor i in range(10):\n\t\tboard.append(' ')\n\treturn board\n\ndef is_winner(player, board):\n\treturn ((board[1] == player and board[2] == player and board[3] == player) or\n\t\t (board[4] == player and board[5] == player and board[6] == player) or\n\t\t (board[7] == player and board[8] == player and board[9] == player) or\n\t\t (board[1] == player and board[4] == player and board[7] == player) or\n\t\t (board[2] == player and board[5] == player and board[8] == player) or\n\t\t (board[3] == player and board[6] == player and board[9] == player) or\n\t\t (board[1] == player and board[5] == player and board[9] == player) or\n\t\t (board[3] == player and board[5] == player and board[7] == player))\n\ndef copy_grid(board):\n\t#the board will be used pararell to the gui grid and thus must be an exact copy of the grid at all times\n\tboard[1] = b1[\"text\"]\n\tboard[2] = b2[\"text\"]\n\tboard[3] = b3[\"text\"]\n\tboard[4] = b4[\"text\"]\n\tboard[5] = b5[\"text\"]\n\tboard[6] = b6[\"text\"]\n\tboard[7] = b7[\"text\"]\n\tboard[8] = b8[\"text\"]\n\tboard[9] = b9[\"text\"]\n\treturn board\n\ndef change_grid_at(pos):\n\tglobal cpu\n\tif pos == 1:\n\t\tb1[\"text\"] = cpu\n\telif pos == 2:\n\t\tb2[\"text\"] = cpu\n\telif pos == 3:\n\t\tb3[\"text\"] = cpu\n\telif pos == 4:\n\t\tb4[\"text\"] = cpu\n\telif pos == 5:\n\t\tb5[\"text\"] = cpu\n\telif pos == 6:\n\t\tb6[\"text\"] = cpu\n\telif pos == 7:\n\t\tb7[\"text\"] = cpu\n\telif pos == 8:\n\t\tb8[\"text\"] = cpu\n\telif pos == 9:\n\t\tb9[\"text\"] = cpu\n\ndef random_select(list):\n\t#returns random index from list\n\treturn random.choice(list)\n\ndef is_empty(board, square):\n\treturn board[square] == ' '\n\ndef board_full(board):\n\tfor i in range(1,10):\n\t\tif board[i] == ' ':\n\t\t\treturn False\n\treturn True\n\ndef board_empty(board):\n\tcount = 0\n\tfor i in range(1,10):\n\t\tif is_empty(board,i):\n\t\t\tcount = count + 1\n\treturn count == 9\n\ndef cpu_make_move(cpu, player, board):\n\n\tpotential_moves = [] #a list of all potential cpu moves as decided by our algorithm, then a random list element selector is called to randomize choice\n\n\t#step 0 of algorithm: opening move be played in center or corner\n\tif board_empty(board) == True:\n\t\tsquare = random_select([1,3,5,7,9])\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 1 of algorithm : if there is a winner position for cpu, play it\n\tfor i in range(1,10):\n\t\tif is_empty(board,i):\n\t\t\tboard[i] = cpu\n\t\t\tif is_winner(cpu,board):\n\t\t\t\tpotential_moves.append(i)\n\t\t\tboard[i] = ' '\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 2 of algorithm : if there is a winner position for opponent, block it\n\tfor i in range(1,10):\n\t\tif is_empty(board,i):\n\t\t\tboard[i] = player\n\t\t\tif is_winner(player,board):\n\t\t\t\tboard[i] = cpu\n\t\t\t\tchange_grid_at(i)\n\t\t\t\treturn board\n\t\t\telse:\n\t\t\t\tboard[i] = ' '\n\n\t#step 3 of algorithm : if there is a fork opportunity, play it\n\tfor i in range(1,10):\n\t\tcount = 0 #counts ways to win for each move\n\t\tif is_empty(board,i):\n\t\t\tboard[i] = cpu\n\t\t\tfor j in range(1,10):\n\t\t\t\tif is_empty(board,j):\n\t\t\t\t\tboard[j] = cpu\n\t\t\t\t\tif is_winner(cpu,board):\n\t\t\t\t\t\tcount = count + 1\n\t\t\t\t\tboard[j] = ' '\n\n\t\t\tif count > 1: #checks if there is indeed a fork (two ways to win after move)\n\t\t\t\tpotential_moves.append(i)\n\t\t\tboard[i] = ' '\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 4 of algorithm : if there is a fork oppportunity for opponent, block it\n\tfor i in range(1,10):\n\t\tcount = 0\n\t\tif is_empty(board,i):\n\t\t\tboard[i] = player\n\t\t\tfor j in range(1,10):\n\t\t\t\tif is_empty(board,j):\n\t\t\t\t\tboard[j] = player\n\t\t\t\t\tif is_winner(player,board):\n\t\t\t\t\t\tcount = count + 1\n\t\t\t\t\tboard[j] = ' '\n\n\t\t\tif count > 1: #checks if there is indeed a fork (two ways to win after move)\n\t\t\t\tpotential_moves.append(i)\n\t\t\tboard[i] = ' '\n\n\tif len(potential_moves) == 1: #if there is only one possible opponent fork, then block it\n\t\tboard[potential_moves[0]] = cpu\n\t\tchange_grid_at(potential_moves[0])\n\t\treturn board\n\n\telif len(potential_moves) > 1: #if there are more than one potential opponent forks, then cpu makes a move to make opponent defend, while avoiding forks\n\t\tpotential_moves = []\n\t\tfor i in range(1,10):\n\t\t\tcount = 0\n\t\t\tif is_empty(board,i):\n\t\t\t\tboard[i] = cpu\n\t\t\t\tfor j in range(1,10):\n\t\t\t\t\tif is_empty(board,j):\n\t\t\t\t\t\tboard[j] = cpu\n\t\t\t\t\t\tif is_winner(cpu,board): #cpu makes move to make opponent defend\n\t\t\t\t\t\t\tboard[j] = player #the player will defend to avoid loss, and we want this move not to lead to any forks for opponent\n\t\t\t\t\t\t\tfor k in range(1,10):\n\t\t\t\t\t\t\t\tif is_empty(board,k):\n\t\t\t\t\t\t\t\t\tboard[k] = player\n\t\t\t\t\t\t\t\t\tif is_winner(player,board):\n\t\t\t\t\t\t\t\t\t\tcount = count + 1\n\t\t\t\t\t\t\t\t\tboard[k] = ' '\n\t\t\t\t\t\t\tif count <= 1: #if there are no forks possible from move i, then append it into the possible moves list , note: if no i satisfies this, then no matter what, opponent has won the game\n\t\t\t\t\t\t\t\tpotential_moves.append(i)\n\t\t\t\t\t\tboard[j] = ' '\n\t\t\t\tboard[i] = ' '\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 5 of algorithm : cpu marks the center\n\tcenter = 5\n\tif is_empty(board,center) == True:\n\t\tboard[center] = cpu\n\t\tchange_grid_at(center)\n\t\treturn board\n\n\t#step 6 of algorithm: cpu plays an opposite corner\n\tif board[1] == player and is_empty(board,9):\n\t\tpotential_moves.append(9)\n\telif board[9] == player and is_empty(board,1):\n\t\tpotential_moves.append(1)\n\telif board[3] == player and is_empty(board,7):\n\t\tpotential_moves.append(7)\n\telif board[7] == player and is_empty(board,3):\n\t\tpotential_moves.append(3)\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 7 of algorithm : cpu marks an empty square\n\tif is_empty(board,1):\n\t\tpotential_moves.append(1)\n\telif is_empty(board,3):\n\t\tpotential_moves.append(3)\n\telif is_empty(board,7):\n\t\tpotential_moves.append(7)\n\telif is_empty(board,9):\n\t\tpotential_moves.append(9)\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\t#step 8 of algorithm : cpu marks the middle of any of the 4 sides\n\tfor i in range(2,9,2):\n\t\tif is_empty(board,i):\n\t\t\tpotential_moves.append(i)\n\n\tif potential_moves:\n\t\tsquare = random_select(potential_moves)\n\t\tboard[square] = cpu\n\t\tchange_grid_at(square)\n\t\treturn board\n\n\n\n\n\n\n\n\n###########################################################################################\n\n#Create menu\nmy_menu = Menu(root)\nroot.config(menu=my_menu)\n\n#Create options menu\noptions_menu = Menu(my_menu, tearoff=False)\nmy_menu.add_cascade(label=\"Options\", menu=options_menu )\noptions_menu.add_command(label=\"Reset Game: Computer starts\", command=lambda: initialize_game(0))\noptions_menu.add_command(label=\"Reset Game: Player starts\", command=lambda: initialize_game(1))\n\n#main program\ninitialize_game(0)\n\nroot.mainloop()\n","sub_path":"toe.py","file_name":"toe.py","file_ext":"py","file_size_in_byte":11629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"512511908","text":"class Read(object):\n \"\"\"\n Represents a single SAM read\n \"\"\"\n def __init__(self, name, gene, pos, cigar, seq):\n self.name = name\n self.gene = gene\n self.start = int(pos)\n self.cigar = cigar\n self.seq = seq\n self.end = int(pos) + getCigarLen(cigar)\n \n def __str__(self):\n return '(%s, %d)' % (self.name, self.start)\n\n def getBase(self, pos):\n if (pos - self.start < 0) or (self.end - pos < 0):\n return \"\"\n target = pos - self.start\n temp = \"\"\n seqPos = 0\n refPos = 0\n for c in self.cigar:\n if c.isdigit():\n temp += c\n elif c == \"M\":\n if refPos + int(temp) > target:\n if seqPos+target-refPos < len(self.seq):\n return self.seq[seqPos+target-refPos]\n return \"\"\n refPos += int(temp)\n seqPos += int(temp)\n temp = \"\"\n elif c == \"I\":\n seqPos += int(temp)\n temp = \"\"\n elif c == \"D\":\n if refPos + int(temp) > target:\n return \"-\"\n refPos += int(temp)\n temp = \"\"\n return \"\"\n\n def isInRange(self, cpgs):\n for cpg in cpgs:\n if self.gene == cpg.gene and len(self.getBase(cpg.pos)) > 0: return True\n return False\n\ndef getCigarLen(cigar):\n temp = \"\"\n seqPos = 0\n for c in cigar:\n if c.isdigit():\n temp += c\n elif c == \"M\":\n seqPos += int(temp)\n temp = \"\"\n elif c == \"I\":\n seqPos += int(temp)\n temp = \"\"\n elif c == \"D\":\n temp = \"\"\n return seqPos","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"540724976","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 11 16:06:36 2018\n\n@author: blanchm\n\"\"\"\n\nimport argparse\nfrom Bio import SeqIO\n \n\ndef get_args(): \n \n parser = argparse.ArgumentParser(description = 'this script filters out sequences from a FASTA file that are not in the given list')\n \n parser.add_argument('-fa', help='name of FASTA file', dest = \"fasta_file\", type = str, required = True) \n \n #add an optional argument, the length cutoff for our filter \n \n parser.add_argument(\"-exc\", \"--exclude\", help = \"filter seq. that are in this list\", dest = 'filter_list', type = str) \n \n parser.add_argument(\"-out\", help = \"output fasta file\", dest = 'out_fasta', type = str )\n \n #parse the arg \n \n return parser.parse_args()\n\n\nargs = get_args()\n\n \n# Rename args variables \nfile_fasta = args.fasta_file #input fasta file to be filtered\n\n# List that will be excluded from input fasta file \nseq_left_out = args.filter_list\n\n#Output file name that will be generated \n\noutput_fasta_file = args.out_fasta \n\nprint(\" We're going to filter this FASTA file\", file_fasta) \n\nprint(\"These sequences will be excluded from the new fasta file \", seq_left_out)\n\nprint(\"The filtered output will be saved as\", output_fasta_file)\n\n\n\n# function that will generate a list of the sequences to be excludeed \ndef list_given():\n\n # read the given list and generate a set with ids \n\n identifiers = set([])\n\n with open(seq_left_out, 'r') as file:\n for line in file:\n line = line.strip()\n identifiers.add(str(line).replace(\">\", \"\"))\n\n return identifiers\n\n# Filter function will write the new fasta file without the ids in identifiers\n \ndef filter():\n\n identifiers = list_given()\n\n with open(file_fasta) as original_fasta, open(output_fasta_file, 'w') as filtered_fasta:\n records = SeqIO.parse(original_fasta, 'fasta')\n for record in records:\n print (record.id)\n if record.id not in identifiers: #only writes those files that do not contain the identifier in list given \n SeqIO.write(record, filtered_fasta, 'fasta')\n\nif __name__ == '__main__':\n filter()\n \n ","sub_path":"python_scripts/filter_fasta_from_list.py","file_name":"filter_fasta_from_list.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"647411666","text":"import discord\nfrom discord.ext import commands\nimport asyncio\n\nimport checks\n\nclass Admin:\n \"\"\"Administration commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(hidden=True)\n @checks.is_owner()\n async def load(self, *, module : str):\n \"\"\"Loads a module.\"\"\"\n try:\n self.bot.load_extension(module)\n except Exception as e:\n await self.bot.say('\\N{THINKING FACE}')\n await self.bot.say('{}: {}'.format(type(e).__name__, e))\n else:\n await self.bot.say('\\N{HEAVY LARGE CIRCLE}')\n\n @commands.command(hidden=True)\n @checks.is_owner()\n async def unload(self, *, module : str):\n \"\"\"Unloads a module.\"\"\"\n try:\n self.bot.unload_extension(module)\n except Exception as e:\n await self.bot.say('\\N{THINKING FACE}')\n await self.bot.say('{}: {}'.format(type(e).__name__, e))\n else:\n await self.bot.say('\\N{HEAVY LARGE CIRCLE}')\n\n @commands.command(name='reload', hidden=True)\n @checks.is_owner()\n async def _reload(self, *, module : str):\n \"\"\"Reloads a module.\"\"\"\n try:\n self.bot.unload_extension(module)\n self.bot.load_extension(module)\n except Exception as e:\n await self.bot.say('\\N{THINKING FACE}')\n await self.bot.say('{}: {}'.format(type(e).__name__, e))\n else:\n await self.bot.say('\\N{HEAVY LARGE CIRCLE}')\n\n @commands.command(hidden=True)\n @checks.mod_or_permissions()\n async def restart(self):\n await self.bot.say('Bye!')\n await self.bot.logout()\n exit(1)\n\n\ndef setup(bot):\n bot.add_cog(Admin(bot))\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"118660977","text":"import numpy as np\nimport keras\nimport json\nimport csv\n\nfrom keras.layers import Dense, Activation, Embedding, Bidirectional, GRU, Flatten, Dropout\nfrom keras import regularizers\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n\n\n# 数据处理\nTrollsData = [] # label:1\nNonTrollsData = [] # label:0\nwith open('../data/new.csv', 'r', encoding=\"utf-8\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n content = row['tweet']\n label = int(row['class'])\n if label == 0 or label == 1:\n label = 1\n else:\n label = 0\n Data = {\n \"content\": content,\n \"annotation\": label\n }\n if label == 0:\n NonTrollsData.append(Data)\n else:\n TrollsData.append(Data)\nprint('数据总数:%d, 欺凌数据个数:%d, 友好数据个数:%d' % (len(TrollsData) + len(NonTrollsData), len(TrollsData), len(NonTrollsData)))\n# print(json.dumps(data))\n\nData = TrollsData + NonTrollsData\nWordIdx = json.load(open('word.json'))\nWordIdxLen = len(WordIdx)\n\nmodel = keras.models.Sequential()\nmodel.add(Embedding(input_dim=WordIdxLen + 1, output_dim=50, mask_zero=True))\nmodel.add(Bidirectional(GRU(64, return_sequences=False, dropout=0.4)))\nmodel.add(Dense(units=32, activation='relu', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(Dropout(0.2, noise_shape=None, seed=None))\nmodel.add(Dense(units=16, activation='relu', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(Dense(units=1, activation='sigmoid'))\nmodel.load_weights(\"weights.best.hdf5\")\nmodel.compile(\n optimizer='adam',\n loss='binary_crossentropy',\n metrics=['binary_accuracy'],\n)\n\n# 填充字符串\nmax_str_len = 0\nfor d in Data:\n d['content'] = keras.preprocessing.text.text_to_word_sequence(d['content'])\n if len(d['content']) > max_str_len:\n max_str_len = len(d['content'])\nTrainData = []\nTrainLabel = []\nfor d in Data:\n vec = [WordIdx.get(i, WordIdx['_UNK_']) for i in d['content']]\n pad = WordIdx.get('_PAD_')\n temp = keras.preprocessing.sequence.pad_sequences(sequences=[vec], maxlen=max_str_len, value=pad)\n TrainData.append(temp[0])\n TrainLabel.append(d['annotation'])\n\n# 分割训练和测试集\nsplit = 1\ntrainNum = int(len(TrainData) * split)\nfinalTestData = TrainData[0:trainNum]\nfinalTestLabel = TrainLabel[0:trainNum]\n\n# 评估模型效果\nloss, accuracy = model.evaluate(np.asarray(finalTestData), np.asarray(finalTestLabel))\nprint('accuracy', accuracy)\nprint('loss', loss)\n\n# 模型预测\npreds = model.predict(np.asarray(finalTestData))\nfor i in range(len(preds)):\n if preds[i] > 0.5:\n preds[i] = 1\n else:\n preds[i] = 0\n\ny_predict = [int(item) for item in preds]\nprint('num of test predictions')\nprint(len(y_predict))\n\nprint('acc')\nacc = accuracy_score(y_predict, np.asarray(finalTestLabel))\nprint(acc)\n\nprint('precision')\nprecision = precision_score(y_predict, np.asarray(finalTestLabel), average='weighted')\nprint(precision)\n\nprint('recall')\nrecall = recall_score(y_predict, np.asarray(finalTestLabel), average='weighted')\nprint(recall)\n\nprint('matrix')\nmatrix = confusion_matrix(y_predict, np.asarray(finalTestLabel))\nprint(matrix)\n","sub_path":"model_less_prepro/test_without_new_dataset.py","file_name":"test_without_new_dataset.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"36223726","text":"#======================================================================================================\n# !/usr/bin/env python\n# title : FWC_P1_Simulator.py\n# description : Semiconductor Fab Wide Control using FDC, VM, R2R, L2L\n# author : Youngil Jung\n# date : 2018-07-31\n# version : v0.8\n# usage : python GlobalW2W_FWC_Run.py\n# notes : Reference Paper \"An Approach for Factory-Wide Control Utilizing Virtual Metrology\"\n# python_version : v3.5.3\n#======================================================================================================\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn import metrics\n\nclass VM_Process1_DynamicSampling_시뮬레이터:\n metric = 0\n QueueZ = 0\n QueueSize = 0\n\n def __init__(self, A, d, C, dM, dStart, seed):\n self.pls = PLSRegression(n_components=6, scale=False, max_iter=50000, copy=True)\n np.random.seed(seed)\n self.A = A\n self.d = d\n self.C = C\n self.dM = dM\n self.dStart = dStart\n\n def setParemeter(self, A, d, C):\n self.A = A\n self.d = d\n self.C = C\n\n def setdMParemeter(self, dM, dStart):\n self.dM = dM\n self.dStart = dStart\n\n def sampling_up(self):\n u1 = np.random.normal(0.4, np.sqrt(0.2))\n u2 = np.random.normal(0.6, np.sqrt(0.2))\n u = np.array([u1, u2])\n return u\n\n def sampling_vp(self):\n v1 = np.random.normal(1, np.sqrt(0.2))\n v2 = 2 * v1\n v3 = np.random.uniform(0.2, 1.2)\n v4 = 3 * v3\n v5 = np.random.uniform(0, 0.4)\n v6 = np.random.normal(-0.6, np.sqrt(0.2))\n\n v = np.array([v1, v2, v3, v4, v5, v6])\n return v\n\n def sampling_ep(self):\n e1 = np.random.normal(0, np.sqrt(0.1))\n e2 = np.random.normal(0, np.sqrt(0.2))\n e = np.array([e1, e2])\n return e\n\n def sampling(self, k, uk=np.array([0, 0]), vp=np.array([0, 0, 0, 0, 0, 0]), ep=np.array([0, 0]), isInit=True):\n u1 = uk[0]\n u2 = uk[1]\n u = uk\n\n v1 = vp[0]\n v2 = vp[1]\n v3 = vp[2]\n v4 = vp[3]\n v5 = vp[4]\n v6 = vp[5]\n\n v = vp\n e = ep\n\n if isInit == True:\n k1 = k % 100\n k2 = k % 200\n e = np.array([0, 0]) #DoE는 Sampling Actual이기 때문에 e가 없다.\n else:\n k1 = k % 100 # n = 100 일 때 #1 entity maintenance event\n k2 = k % 200 # n = 200 일 때 #1 entity maintenance event\n eta_k = np.array([[k1], [k2]])\n\n psi = np.array([u1, u2, v1, v2, v3, v4, v5, v6, k1, k2])\n y = u.dot(self.A) + v.dot(self.C) + np.sum(eta_k * self.d, axis=0) + e\n rows = np.r_[psi, y]\n idx_end = len(rows)\n idx_start = idx_end - 2\n return idx_start, idx_end, rows\n\n def pls_update(self, V, Y):\n self.pls.fit(V, Y)\n return self.pls\n\n def setDoE_Mean(self, DoE_Mean):\n self.DoE_Mean = DoE_Mean\n\n def getDoE_Mean(self):\n return self.DoE_Mean\n\n def setPlsWindow(self, PlsWindow):\n self.PlsWindow = PlsWindow\n\n def getPlsWindow(self):\n return self.PlsWindow\n\n def DoE_Run(self, lamda_PLS, Z, M): ##12, 10\n self.QueueZ = Z\n self.QueueSize = Z * M\n DoE_Queue = []\n\n for k in range(1, self.QueueSize + 1): # range(101) = [1, 2, ..., 120])\n idx_start, idx_end, result = self.sampling(k, self.sampling_up(), self.sampling_vp(), self.sampling_ep(), True)\n DoE_Queue.append(result)\n\n initplsWindow = DoE_Queue.copy()\n npPlsWindow = np.array(initplsWindow)\n\n plsWindow = []\n\n for z in np.arange(0, Z):\n npPlsWindow[z * M:(z + 1) * M - 1, 0:idx_start] = lamda_PLS * npPlsWindow[z * M:(z + 1) * M - 1, 0:idx_start]\n npPlsWindow[z * M:(z + 1) * M - 1, idx_start:idx_end] = lamda_PLS * (npPlsWindow[z * M:(z + 1) * M - 1, idx_start:idx_end])\n\n for i in range(len(npPlsWindow)):\n plsWindow.append(npPlsWindow[i])\n\n npDoE_Queue = np.array(plsWindow)\n DoE_Mean = np.mean(npDoE_Queue, axis=0)\n\n plsModelData = npDoE_Queue - DoE_Mean\n V0 = plsModelData[:, 0:idx_start]\n Y0 = plsModelData[:, idx_start:idx_end]\n\n pls = self.pls_update(V0, Y0)\n\n #print('Init VM Coefficients: \\n', pls.coef_)\n\n y_prd = pls.predict(V0) + DoE_Mean[idx_start:idx_end]\n y_act = npDoE_Queue[:, idx_start:idx_end]\n\n print(\"Init DoE VM Mean squared error: %.4f\" % metrics.mean_squared_error(y_act[:,0:1], y_prd[:,0:1]))\n print(\"Init DoE VM r2 score: %.4f\" % metrics.r2_score(y_act[:,0:1], y_prd[:,0:1]))\n\n self.setDoE_Mean(DoE_Mean)\n self.setPlsWindow(plsWindow)\n # self.plt_show1(N, y_act[:,0:1], y_prd[:,0:1])\n\n def VM_Run(self, lamda_PLS, Z, M):\n ## V0, Y0 Mean Center\n DoE_Mean = self.getDoE_Mean()\n idx_end = len(DoE_Mean)\n idx_start = idx_end - 2\n meanVz = DoE_Mean[0:idx_start]\n meanYz = DoE_Mean[idx_start:idx_end]\n\n M_Queue = []\n ez_Queue = []\n ez_Queue.append([0, 0])\n y_act = []\n y_prd = []\n VM_Output = []\n ACT_Output = []\n #pre_M = M\n\n plsWindow = self.getPlsWindow()\n DYNAMIC = False\n end = 0\n for z in np.arange(0, Z):\n if z + 1 >= self.dStart:\n DYNAMIC = True\n M = self.dM\n start = end\n end = start + M\n else:\n start = z * M + 1\n end = ((z + 1) * M) + 1\n\n for k in np.arange(start, end):\n idx_start, idx_end, result = self.sampling(k, self.sampling_up(), self.sampling_vp(), self.sampling_ep(), False)\n psiK = result[0:idx_start]\n psiKStar = psiK - meanVz\n y_predK = self.pls.predict(psiKStar.reshape(1, idx_start)) + meanYz\n rows = np.r_[result, y_predK.reshape(2, )]\n M_Queue.append(rows)\n\n y_prd.append(rows[idx_end:idx_end + 2])\n y_act.append(rows[idx_start:idx_end])\n\n # if z == self.dStart:\n # plsWindow = []\n\n # if DYNAMIC:\n # delCount = (self.dM + 1) * self.QueueZ\n # delSize = self.QueueSize - delCount\n # if len(plsWindow) == 60:\n # del plsWindow[0:delSize]\n # else:\n # del plsWindow[0:pre_M]\n\n # 정상일 때이므로 생략했다.\n # if z + 1 == self.dStart:\n # delCount = self.dM * self.QueueZ - self.dM\n # delSize = self.QueueSize - delCount\n # del plsWindow[0:delSize]\n\n del plsWindow[0:M]\n\n #del plsWindow[0:pre_M]\n\n # for i in range(M): # VM_Output 구한다. lamda_pls 가중치를 반영하지 않는다.\n # if i == M - 1:\n # temp = npM_Queue[i:i + 1, idx_start:idx_end]\n # else:\n # temp = npM_Queue[i:i + 1, idx_end:idx_end + 2]\n # VM_Output.append(np.array([temp[0, 0], temp[0, 1]]))\n\n ez = M_Queue[M - 1][idx_start:idx_end] - M_Queue[M - 1][idx_end:idx_end + 2]\n ez_Queue.append(ez)\n\n if z == 0:\n ez = np.array([0, 0])\n npVM_Queue = np.array(M_Queue)\n npACT_Queue = np.array(M_Queue)\n\n npVM_Queue[0:M - 1, 0:idx_start] = lamda_PLS * npVM_Queue[0:M - 1, 0:idx_start]\n npVM_Queue[0:M - 1, idx_start:idx_end] = lamda_PLS * (npVM_Queue[0:M - 1, idx_end:idx_end + 2] + 0.5 * ez) # + 0.5 * ez\n npVM_Queue = npVM_Queue[:, 0:idx_end] ##idx_start ~ end 까지 VM 값 정리\n\n npACT_Queue[0:M - 1, 0:idx_start] = lamda_PLS * npACT_Queue[0:M - 1, 0:idx_start]\n npACT_Queue[0:M - 1, idx_start:idx_end] = lamda_PLS * npACT_Queue[0:M - 1, idx_start:idx_end]\n npACT_Queue = npACT_Queue[:, 0:idx_end] ##idx_start ~ end 까지 VM 값 정리\n\n for i in range(M): #VM_Output 구한다. lamda_pls 가중치를 반영하여 다음 계산시 편리하게 한다.\n if i == M - 1:\n temp = npACT_Queue[i:i + 1, idx_start:idx_end]\n else:\n temp = npVM_Queue[i:i + 1, idx_start:idx_end]\n VM_Output.append(np.array([temp[0, 0], temp[0, 1]]))\n temp = npACT_Queue[i:i + 1, idx_start:idx_end]\n ACT_Output.append(np.array([temp[0, 0], temp[0, 1]]))\n\n for i in range(M):\n plsWindow.append(npVM_Queue[i])\n\n M_Mean = np.mean(plsWindow, axis=0)\n meanVz = M_Mean[0:idx_start]\n meanYz = M_Mean[idx_start:idx_end]\n\n plsModelData = plsWindow - M_Mean\n V = plsModelData[:, 0:idx_start]\n Y = plsModelData[:, idx_start:idx_end]\n\n self.pls_update(V, Y)\n\n del M_Queue[0:M]\n\n y_act = np.array(y_act)\n y_prd = np.array(y_prd)\n ez_all_run = y_act - y_prd\n\n self.metric = metrics.explained_variance_score(y_act[:,0:1], y_prd[:,0:1])\n print(\"VM Mean squared error: %.3f\" % metrics.mean_squared_error(y_act[:,0:1], y_prd[:,0:1]))\n print(\"explained_variance_score: %.3f\" % self.metric)\n print(\"VM r2 score: %.3f\" % metrics.r2_score(y_act[:,0:1], y_prd[:,0:1]))\n print(\"pls.coef_: \", self.pls.coef_)\n ez_run = np.array(ez_Queue)\n\n VM_Output = np.array(VM_Output)\n ACT_Output = np.array(ACT_Output)\n\n return VM_Output, ACT_Output, ez_run, y_act, y_prd, ez_all_run\n","sub_path":"VM_Process1_DynamicSampling_시뮬레이터.py","file_name":"VM_Process1_DynamicSampling_시뮬레이터.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"522668984","text":"import json\n\nfrom key import auth_key, auth_key_ds, key_monitoring, key_cdb1, key_cdb2\nfrom tenders.tender_additional_data import above_threshold_procurement, below_threshold_procurement\n\n\ndef tender_host_selector(cdb_version):\n if cdb_version == 'dev':\n host = 'https://api-sandbox.prozorro.openprocurement.net/api/dev/tenders'\n host_public = 'https://public.api-sandbox.prozorro.openprocurement.net/api/dev/tenders'\n else:\n host = 'https://lb.api-sandbox.openprocurement.org/api/2.4/tenders'\n host_public = 'https://public.api-sandbox.openprocurement.org/api/2.4/tenders'\n return host, host_public\n\n\ndef tender_ds_host_selector(cdb_version):\n if cdb_version == 'dev':\n host = 'https://upload.docs-sandbox.prozorro.openprocurement.net/upload'\n else:\n host = 'https://upload.docs-sandbox.openprocurement.org/upload'\n return host\n\n\nmonitoring_host = 'https://audit-api-sandbox.prozorro.gov.ua/api/2.4/monitorings'\n\n\ndef tender_headers_request(json_data):\n headers = {\"Authorization\": \"Basic {}\".format(auth_key),\n \"Content-Length\": \"{}\".format(len(json.dumps(json_data))),\n \"Content-Type\": \"application/json\"}\n return headers\n\n\ntender_headers_add_document_ds = {'authorization': \"Basic {}\".format(auth_key_ds)}\ntender_headers_patch_document_ds = {\n 'authorization': \"Basic {}\".format(auth_key),\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n }\nmonitoring_headers = {\"Authorization\": \"Basic {}\".format(key_monitoring),\n \"Content-Type\": \"application/json\"} # \"Host\": \"audit-api-sandbox.prozorro.gov.ua\"\n\n\ndef auction_host_selector(cdb_number):\n if cdb_number == 1:\n host = 'https://lb.api-sandbox.ea.openprocurement.org/api/2.5/auctions'\n else:\n host = 'https://lb.api-sandbox.ea2.openprocurement.net/api/2.3/auctions'\n return host\n\n\ndef auction_headers_request(cdb_number, json_data, token=None):\n if cdb_number == 1:\n key = key_cdb1\n host_headers = 'lb.api-sandbox.ea.openprocurement.org'\n else:\n key = key_cdb2\n host_headers = 'lb.api-sandbox.ea2.openprocurement.net'\n headers = {\"Authorization\": \"Basic {}\".format(key),\n \"Content-Length\": \"{}\".format(len(json.dumps(json_data))),\n \"Content-Type\": \"application/json\",\n \"X-Access-Token\": token,\n \"Host\": host_headers}\n return headers\n\n\ndef privatization_host_selector(entity):\n if entity == 'asset':\n host = 'https://lb.api-sandbox.ea2.openprocurement.net/api/2.3/assets'\n elif entity == 'transfer':\n host = 'https://lb.api-sandbox.ea2.openprocurement.net/api/2.3/transfers'\n else:\n host = 'https://lb.api-sandbox.ea2.openprocurement.net/api/2.3/lots'\n return host\n\n\ndef json_status(status):\n status_json = {\"data\": {\"status\": status}}\n return status_json\n\n\ndef json_activate_tender(procurement_method):\n if procurement_method in above_threshold_procurement:\n activate_tender_json = json_status('active.tendering')\n elif procurement_method in below_threshold_procurement:\n activate_tender_json = json_status('active.enquiries')\n else:\n activate_tender_json = json_status('active')\n return activate_tender_json\n\n\nprequalification_approve_bid_json = {\n \"data\": {\n \"status\": \"active\",\n \"qualified\": True,\n \"eligible\": True\n }\n}\n\n\ndef activate_award_limited_json(procurement_method):\n json_activate_award_limited = json_status('active')\n if procurement_method != 'reporting':\n json_activate_award_limited['data']['qualified'] = True\n return json_activate_award_limited\n\n\ntransfer_json = {\"data\": {}}\n\n\ndef json_activate_auction_p(token):\n json_activate = json_status('active.tendering')\n json_activate['access'] = {\"token\": token}\n return json_activate\n","sub_path":"data_for_requests.py","file_name":"data_for_requests.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389909950","text":"from functools import partial\nimport logging\n\nimport msgpack\nimport msgpack_numpy as mpn\n\nfrom confluent_kafka.cimpl import KafkaException\nimport numpy as np\nimport pickle\nimport pytest\n\nfrom bluesky_kafka import Publisher, BlueskyConsumer\nfrom bluesky_kafka.tests.conftest import get_all_documents_from_queue\nfrom bluesky.plans import count\nfrom event_model import sanitize_doc\n\n# mpn.patch() is recommended by msgpack-numpy as a way\n# to patch msgpack but it caused a utf-8 decode error\nmpn.patch()\n\nlogging.getLogger(\"bluesky.kafka\").setLevel(\"DEBUG\")\n\n\n# the Kafka test broker should be configured with\n# KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true\n\n\ndef test_producer_config():\n test_topic = \"test.producer.config\"\n kafka_publisher = Publisher(\n topic=test_topic,\n bootstrap_servers=\"1.2.3.4:9092\",\n key=\"kafka-unit-test-key\",\n # work with a single broker\n producer_config={\n \"bootstrap.servers\": \"5.6.7.8:9092\",\n \"acks\": 1,\n \"enable.idempotence\": False,\n \"request.timeout.ms\": 5000,\n },\n )\n\n assert (\n kafka_publisher._producer_config[\"bootstrap.servers\"]\n == \"1.2.3.4:9092,5.6.7.8:9092\"\n )\n\n\ndef test_get_cluster_metadata(publisher_factory):\n # the topic test.get.cluster.metadata will be created\n # by the call to publisher.get_cluster_metadata\n # if automatic topic creation is enabled\n # otherwise this test will fail\n publisher = publisher_factory(topic=\"test.get.cluster.metadata\")\n cluster_metadata = publisher.get_cluster_metadata()\n assert \"test.get.cluster.metadata\" in cluster_metadata.topics\n\n\ndef test_get_cluster_metadata_failure(publisher_factory):\n publisher = publisher_factory(\n topic=\"test.get.cluster.metadata.failure\",\n bootstrap_servers=\"5.6.7.8:9092\"\n )\n with pytest.raises(KafkaException):\n publisher.get_cluster_metadata()\n\n\ndef test_consumer_config():\n test_topic = \"test.consumer.config\"\n bluesky_consumer = BlueskyConsumer(\n topics=[test_topic],\n bootstrap_servers=\"1.2.3.4:9092\",\n group_id=\"abc\",\n consumer_config={\n \"bootstrap.servers\": \"5.6.7.8:9092\",\n \"auto.offset.reset\": \"latest\",\n },\n )\n\n assert (\n bluesky_consumer._consumer_config[\"bootstrap.servers\"]\n == \"1.2.3.4:9092,5.6.7.8:9092\"\n )\n\n\ndef test_bad_consumer_config():\n test_topic = \"test.bad.consumer.config\"\n with pytest.raises(ValueError) as excinfo:\n BlueskyConsumer(\n topics=[test_topic],\n bootstrap_servers=\"1.2.3.4:9092\",\n group_id=\"abc\",\n consumer_config={\n \"bootstrap.servers\": \"5.6.7.8:9092\",\n \"auto.offset.reset\": \"latest\",\n \"group.id\": \"raise an exception!\",\n },\n )\n assert (\n \"do not specify 'group.id' in consumer_config, use only the 'group_id' argument\"\n in excinfo.value\n )\n\n\n@pytest.mark.parametrize(\n \"serializer, deserializer\",\n [(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],\n)\ndef test_kafka_remote_dispatcher(\n RE,\n hw,\n serializer,\n deserializer,\n publisher_factory,\n remote_dispatcher_process_factory,\n external_process_document_queue,\n):\n # COMPONENT 1\n # a Kafka broker must be running\n # in addition the topic \"test.remote.dispatcher\" must exist\n # or the broker must be configured to create topics on demand (recommended)\n\n # COMPONENT 2\n # Run a RemoteDispatcher on a separate process. Pass the documents\n # it receives over a Queue to this process so we can count them for\n # our test.\n test_topic = \"test.remote.dispatcher\"\n with external_process_document_queue(\n topics=[test_topic],\n deserializer=deserializer,\n process_factory=remote_dispatcher_process_factory,\n ) as document_queue:\n # COMPONENT 3\n # Set up a RunEngine in this process that will\n # send all documents to a bluesky_kafka.Publisher\n # and accumulate all documents in the local_documents list\n kafka_publisher = publisher_factory(\n topic=test_topic, serializer=serializer, flush_on_stop_doc=True\n )\n RE.subscribe(kafka_publisher)\n\n local_documents = []\n RE.subscribe(\n lambda local_name, local_doc: local_documents.append(\n (local_name, local_doc)\n )\n )\n\n # test that numpy data is transmitted correctly\n md = {\n \"numpy_data\": {\"nested\": np.array([1, 2, 3])},\n \"numpy_scalar\": np.float64(3),\n \"numpy_array\": np.ones((3, 3)),\n }\n\n # documents will be generated by this plan\n # and published by the Kafka Publisher\n RE(count([hw.det]), md=md)\n\n # retrieve the documents published by the Kafka broker\n remote_documents = get_all_documents_from_queue(document_queue=document_queue)\n\n # sanitize_doc normalizes some document data, such as numpy arrays, that are\n # problematic for direct comparison of documents by \"assert\"\n sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]\n sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]\n\n assert len(sanitized_remote_documents) == len(sanitized_local_documents)\n assert sanitized_remote_documents == sanitized_local_documents\n\n\n@pytest.mark.parametrize(\n \"serializer, deserializer\",\n [(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],\n)\ndef test_bluesky_consumer(\n RE,\n hw,\n serializer,\n deserializer,\n publisher_factory,\n consumer_process_factory,\n external_process_document_queue,\n):\n # COMPONENT 1\n # a Kafka broker must be running\n # in addition the broker must have topic \"test.bluesky.consumer\"\n # or be configured to create topics on demand (recommended)\n\n # COMPONENT 2\n # Run a BlueskyConsumer polling loop in a separate process.\n # Pass the documents it receives over a Queue to this process\n # and compare them against the documents published directly\n # by the RunEngine.\n test_topic = \"test.bluesky.consumer\"\n with external_process_document_queue(\n topics=[test_topic],\n deserializer=deserializer,\n process_factory=partial(\n consumer_process_factory, consumer_factory=BlueskyConsumer\n ),\n ) as document_queue:\n # COMPONENT 3\n # Set up a RunEngine in this process that will\n # send all documents to a bluesky_kafka.Publisher\n # and accumulate all documents in the local_documents list\n kafka_publisher = publisher_factory(\n topic=test_topic, serializer=serializer, flush_on_stop_doc=True\n )\n RE.subscribe(kafka_publisher)\n\n local_documents = []\n RE.subscribe(\n lambda local_name, local_doc: local_documents.append(\n (local_name, local_doc)\n )\n )\n\n # test that numpy data is transmitted correctly\n md = {\n \"numpy_data\": {\"nested\": np.array([1, 2, 3])},\n \"numpy_scalar\": np.float64(3),\n \"numpy_array\": np.ones((3, 3)),\n }\n\n # documents will be generated by this plan\n # and published by the Kafka Publisher\n RE(count([hw.det]), md=md)\n\n # retrieve the documents published by the Kafka broker\n remote_documents = get_all_documents_from_queue(document_queue=document_queue)\n\n # sanitize_doc normalizes some document data, such as numpy arrays, that are\n # problematic for direct comparison of documents by \"assert\"\n sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]\n sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]\n\n assert len(sanitized_remote_documents) == len(sanitized_local_documents)\n assert sanitized_remote_documents == sanitized_local_documents\n","sub_path":"bluesky_kafka/tests/test_kafka.py","file_name":"test_kafka.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"423870229","text":"import vim\nimport webbrowser\n\nfrom abc import ABC, abstractmethod\n\n\nclass DocOn(ABC):\n allowed_browsers = [\n 'firefox',\n 'opera',\n 'windows-default',\n 'safari',\n 'google-chrome',\n 'chromium',\n ]\n\n @abstractmethod\n def build_url(self) -> str:\n pass\n\n def __init__(self):\n browser = vim.eval('g:docon_browser')\n browser = browser if browser in self.allowed_browsers else None\n self.controller = webbrowser.get(browser)\n\n self.behavior = int(vim.eval('g:docon_behavior'))\n\n def open(self):\n url = self.build_url()\n self.controller.open(url, self.behavior, False)\n","sub_path":"plugin/py3/docon/docon.py","file_name":"docon.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461460603","text":"# Tool für automatisierte Bewerbungen\n# Bitte mit bedacht nutzen\n# Only for German People Actually\n# For a learning Purpose\n# Author: PiereLucas\n\n# Module\nimport colorama\nimport sys, subprocess, time\n\nfrom module import scraper\nfrom module import pdfcreator\nfrom module import mail\n\n\n\n# Klasse\n# Main Klasse, regelt die Eingaben und Aufrufe\nclass Bewerber:\n\n def __init__(self):\n # Attribute für die Suche\n # Jobboerse\n self.jobboerse = 0\n # Suchattribute\n self.boersesuch1 = \"\"\n self.boersesuchplz = \"\"\n self.boersebez = \"\"\n self.url = \"\"\n\n # Zeit\n self.zeit = time.localtime()\n\n # Banner Methode\n def banner(self):\n subprocess.call(\"clear\", shell=True)\n print(colorama.Fore.RED + \"-\" * 60)\n print(colorama.Fore.CYAN + \"Automatisches Bewerbungstool\")\n print(\"Author: PiereLucas\")\n print(\"Datum:\", self.zeit[:3])\n print(colorama.Fore.RED + \"-\" * 60 + colorama.Style.RESET_ALL)\n print()\n\n # Methode zur wahl der Jobboerse\n def jobboerse_waehlen(self):\n\n # Wahlfunktion\n print(\"Bitte waehle eine Jobboerse auf der du suchen moechtest\")\n print(\"Klassifizierung:\", \"[\"+self.boersesuch1+\"]\", \"Postleitzahl:\", \"[\"+self.boersesuchplz+\"]\")\n print()\n # Mehr Jobboersen folgen ...\n print(\"[0] Programm Beenden\\n[1] Neue Suche\\n[2] Jobnet\\n[3] Stepstone\")\n print()\n\n try:\n self.jobboerse = int(input(\"Wähle: \"))\n print()\n if self.jobboerse > 3 or self.jobboerse < 0:\n print(\"Falsche eingabe\")\n self.jobboerse_waehlen()\n elif self.jobboerse == 0:\n print(\"Programm beendet\")\n sys.exit()\n elif self.jobboerse == 1:\n self.eingabe()\n\n except ValueError:\n print()\n print(\"Falsche eingabe\")\n self.jobboerse_waehlen()\n except KeyboardInterrupt:\n print()\n print(\"Strg + C\")\n sys.exit()\n\n # Übergebe die Bezeichnung der Jobboerse\n # Mehr Jobboersen folgen ...\n if self.jobboerse == 2:\n self.boersebez = \"Jobnet\"\n elif self.jobboerse == 3:\n self.boersebez = \"Stepstone\"\n\n # Liefere einen Integer als Rückgabewert für die search Funktion\n return self.jobboerse\n\n # Methode fuer Usereingaben\n def eingabe(self):\n\n # Initialisierung der Fehlerschleife\n eingabeva = False\n # Fehlerschleife\n while not eingabeva:\n # Eingabe-Abfrage\n print(\"Bitte gib deine Suchbegriffe ein! Die Jobboerse wird im anschluss gewaehlt\")\n print()\n\n try:\n self.boersesuch1 = str(input(\"Jobbezeichnung (Z.b. IT, Architekt, Studium, Schule ...): \"))\n self.boersesuchplz = int(input(\"Postleitzahl: \"))\n print()\n self.boersesuchplz = str(self.boersesuchplz)\n # Beende die Schleife\n eingabeva = True\n # Rufe search Funktion auf und wähle die Jobboerse\n self.search(self.jobboerse_waehlen())\n\n except KeyboardInterrupt:\n print()\n print(\"Strg + C\")\n sys.exit()\n except ValueError:\n print()\n print(\"Falsche eingabe\")\n continue\n except:\n continue\n\n # Methode um Ergebnis aus Usereingaben zu generieren\n def search(self, boersennummer):\n\n # Gebe die Gewählte Jobboerse aus\n print(colorama.Fore.CYAN + \"URL:\")\n print(colorama.Fore.RED + \"-\" * 60 + colorama.Style.RESET_ALL)\n print(\"Ihre gewählte Joboerse: \", self.boersebez)\n # Generiere die URL anhand der vorherigen Eingaben\n # Mehr Jobboersen folgen ...\n if boersennummer == 2:\n self.url = (\"https://www.jobnet.de/suche?utf8=%E2%9C%93&query%5Btext%5D=\" + self.boersesuch1 + \"&query%5Blocation%5D=\" + self.boersesuchplz)\n print(self.url)\n print()\n elif boersennummer == 3:\n self.url = (\"https://www.stepstone.de/5/job-search-simple.html?stf=freeText&ns=1&qs=%5B%5D&companyID=0&cityID=0&sourceOfTheSearchField=homepagemex%3Ageneral&searchOrigin=Homepage_top-search&ke=\" + self.boersesuch1 + \"&ws=\" + self.boersesuchplz + \"&ra=30\")\n print(self.url)\n print()\n\n # Main-Methode für den Programmablauf\n def aufruf(self):\n\n # Banner und Funktionsaufruf\n self.banner()\n self.eingabe()\n\n # Checke den Status der URL\n scraper.statuscheck(self.url)\n # Fehlerbehandlung der IF-Anweisung\n try:\n if not scraper.statuscheck:\n print(\"Website ist nicht erreichbar :( Ein neuer versuch?\")\n x = input(\"[y/n]\")\n if \"y\" in x:\n self.aufruf()\n else:\n print()\n print(\"Bis zum nächsten mal\")\n sys.exit()\n else:\n print(\"Erfolg, Website ist erreichbar\")\n print()\n\n except KeyboardInterrupt:\n print()\n print(\"Strg + C\")\n sys.exit()\n except:\n print()\n print(\"Programm beendet\")\n sys.exit()\n\n # Main Programm Schleife\n mainschleife = True\n while mainschleife:\n # Übergebe die bestätigte URL dem Scraper\n scraper.scrappy(self.url, self.boersebez)\n # Wenn der Scraper True liefert (Ender der Methode oder Abbruch)\n # Rufe den filechecker des Moduls pdfcreator auf\n if scraper.scrappy:\n pdfcreator.filechecker()\n # Wenn der filechecker True liefert (Datei vorhanden und nicht leer)\n # Rufen den creatty aus dem Modul pdfcreator auf und beende die scraperschleife\n if pdfcreator.filechecker:\n pdfcreator.creatty(self.boersebez)\n # Rufe Mailer auf\n if pdfcreator.creatty:\n mail.mailer()\n if mail.mailer:\n print()\n print(\"ERFOLG\")\n print(\"Programm beendet\")\n sys.exit()\n else:\n print()\n print(\"Fehler in mail.mailer\")\n print(\"Programm beendet\")\n sys.exit()\n else:\n print()\n print(\"Fehler in pdfcreator.creatty\")\n print(\"Programm beendet\")\n sys.exit()\n # Sollte der filechecker False liefern (Datei nicht vorhanden oder leer)\n # Starte die scraperschleife neu\n else:\n print()\n print(\"Dateistruktur Fehlerhaft / Kein Zugriff / Nicht vorhanden\")\n sys.exit()\n else:\n print()\n print(\"Dateistruktur Fehlerhaft / Kein Zugriff / Nicht vorhanden\")\n sys.exit()\n\n# TO BE CONTINUED ...\n\n# Objekt\nbewerber = Bewerber()\n\n# Initialer Aufruf\nbewerber.aufruf()\n","sub_path":"bewerber.py","file_name":"bewerber.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"25065381","text":"from PyQt5.QtWidgets import QApplication,QMainWindow,QLineEdit,QMessageBox\r\nfrom PyQt5 import uic\r\nfrom DB import DB\r\n\r\nclass App(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.win = uic.loadUi(\"veri1.ui\")\r\n self.ilDoldur()\r\n self.win.btGonder.clicked.connect(self.kaydet)\r\n self.win.cmbIl.currentIndexChanged.connect(self.tespit)\r\n self.win.show()\r\n\r\n def kaydet(self):\r\n adi = self.win.txtAd.text()\r\n soyadi = self.win.txtSoyad.text()\r\n il = self.win.cmbIl.currentIndex()\r\n ilce = self.win.cmbIlce.currentIndex()\r\n if self.win.rdbKadin.isChecked():\r\n cinsiyet = 0\r\n elif self.win.rdbErkek.isChecked():\r\n cinsiyet = 1\r\n db = DB()\r\n if db.ekleme(adi,soyadi,il,ilce,cinsiyet):\r\n elCevap = QMessageBox.question(self,\"Soru\",\"Kaydetmek İster misin?\",\\\r\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,\\\r\n QMessageBox.Yes)\r\n if elCevap == QMessageBox.Yes:\r\n QMessageBox.information(self,\"Bilgi\",\"Kaydedildi!.\")\r\n else:\r\n QMessageBox.warning(self,\"Hata\",\"Hata Var!.\")\r\n \r\n def ilDoldur(self):\r\n db = DB()\r\n liste = db.ilListele()\r\n self.win.cmbIl.addItem(\"Seçiniz\")\r\n for IlKod,IlAd in liste:\r\n self.win.cmbIl.addItem(IlAd)\r\n \r\n \r\n def ilceDoldur(self,il=\"1\"):\r\n db = DB()\r\n liste = db.ilceListele(il)\r\n self.win.cmbIlce.clear()\r\n self.win.cmbIlce.addItem(\"Seçiniz\")\r\n for IlceKod,IlceAd in liste:\r\n self.win.cmbIlce.addItem(IlceAd)\r\n\r\n\r\n def tespit(self):\r\n self.ilceDoldur(str(self.win.cmbIl.currentIndex()))\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n app = QApplication(sys.argv)\r\n ex = App()\r\n sys.exit(app.exec_())","sub_path":"veri1.py","file_name":"veri1.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298781357","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, Http404\nimport datetime\nfrom django.core import urlresolvers\nfrom eventtools.periods import Month\nfrom django.utils.translation import ugettext as _\n\ndef occurrences(request, id, modeladmin):\n \n EventModel = modeladmin.model\n \n event = EventModel.objects.get(pk=id)\n \n hasnext = False\n hasprev = False\n period = None\n occurrences = event.get_all_occurrences_if_possible()\n if not occurrences:\n generators = event.generators.all()\n first = event.get_first_occurrence().start\n last = event.get_last_day()\n \n if 'year' in request.GET and 'month' in request.GET:\n period = Month(generators, datetime.datetime(int(request.GET.get('year')),int(request.GET.get('month')),1))\n else:\n now = datetime.datetime.now()\n if first > now:\n period = Month(generators, first)\n else:\n period = Month(generators, now)\n hasprev = first < period.start\n if not last:\n hasnext = True\n else:\n hasnext = last > period.end \n occurrences = period.get_even_hidden_occurrences()\n title = _(\"Select an occurrence to change\")\n \n admin_url_name = ('admin:%s_%s_change' % (EventModel._meta.app_label, event.OccurrenceModel.__name__)).lower()\n occ_change_url = urlresolvers.reverse(admin_url_name, args=(0,))[:-3] # we don't want a real parameter yet, so strip off the last /0/\n \n return render_to_response('admin/eventtools/list_occurrences.html', {\"event\": event, 'occurrences': occurrences, 'period': period, 'hasprev': hasprev, 'hasnext': hasnext, 'title': title, 'occ_change_url': occ_change_url, 'opts': EventModel._meta }, context_instance=RequestContext(request))\n\ndef make_exceptional_occurrence(request, event_id, gen_id, year, month, day, hour, minute, second, modeladmin):\n \n \n EventModel = modeladmin.model \n event = EventModel.objects.get(pk=int(event_id))\n \n # import pdb; pdb.set_trace()\n generator = get_object_or_404(event.GeneratorModel, id=int(gen_id))\n occurrence = generator.get_occurrence(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)))\n\n occurrence.save()\n OccurrenceModel = occurrence.__class__\n admin_url_name = ('admin:%s_%s_change' % (OccurrenceModel._meta.app_label, OccurrenceModel.__name__)).lower()\n event_change_url = urlresolvers.reverse(admin_url_name, args=(occurrence.id,))\n return HttpResponseRedirect(event_change_url)\n","sub_path":"eventtools/adminviews.py","file_name":"adminviews.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286300881","text":"import imagesize\nfrom avcv.utils import *\nfrom avcv.vision import *\nimport pprint\n\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('subset', choices=['training', 'validation'])\n# parser.add_argument('--arguments', '-a', nargs='+', default=None)\nargs = parser.parse_args()\n\n\npp = pprint.PrettyPrinter(4)\n\n# sample = read_json('/root/detr/data/coco/coco-panoptic/annotations/panoptic_val2017.json')\n\n\n\n#---- Create categories\ncategories = []\nconfig_data = read_json(\"/mnt/vinai/haianh/mappilari-v2/config_v2.0.json\")\n\n\nname_to_cat_id = dict()\nfor cat_id, label in enumerate(config_data['labels']):\n cat = {'supercategory': 'unknown', 'isthing': label['instances'], 'id': cat_id, 'name': label['name'], 'readable':label['readable']}\n name_to_cat_id[label['name']] = cat_id\n categories += [cat]\n\n\n# Create images list\n\nVIS = None # './cache/vis'\nsegment_id = 0\ndef get_segment_id():\n global segment_id\n segment_id += 1\n return segment_id\n\nresize = 0.25\n\nimages = []\nannotations = []\ndata_root = \"/mnt/vinai/haianh/mappilari-v2/\"\n\nsubset = args.subset\n# subset = \"validation\" # comment this if you want to generate training\nimage_dir = osp.join(data_root, subset, \"images\")\n\nimage_paths = get_paths(image_dir, 'jpg')\nassert len(image_paths), image_dir\n\nresize_rate = int(resize*100)\nout_root = f\"./data/mapillary_{resize_rate}\"\nimage_dir_out = osp.join(out_root, subset, \"images\")\nannotation_dir_out = osp.join(out_root, 'annotations')\npanoptic_dir_out = osp.join(annotation_dir_out, \"panoptic_val2017\" if \"val\" in subset else \"panoptic_train2017\")\n\ndef get_polygon(filename):\n json_path = osp.join(data_root, subset, \"v2.0/polygons\", filename.split('.')[0]+'.json')\n return read_json(json_path)\n\ndef get_random_color(id):\n np.random.seed(id)\n return tuple(np.random.choice(256, 3).tolist())\n\ndef f(inp):\n image_id, path = inp\n file_name = osp.basename(path)\n\n\n polygons = get_polygon(file_name)\n objects, height, width = [polygons[k] for k in [\"objects\",\"height\",\"width\"]]\n \n if resize != 1:\n img = mmcv.imread(path)\n assert resize >0.2 and resize <1, resize\n img = cv2.resize(img, None, fx=resize, fy=resize)\n path = osp.join(image_dir_out, file_name)\n mmcv.imwrite(img, path)\n height = int(height*resize)\n width = int(width*resize)\n\n panoptic = np.zeros_like(img)\n\n image = {'file_name': file_name,\n 'height': height,\n 'width': width,\n 'id': image_id}\n\n # pp.pprint(image)\n\n annotation = dict(\n image_id=image_id,\n file_name=file_name,\n segments_info=[]\n )\n if VIS:\n # mask = np.zeros([height, width, 3], 'uint8')\n \n img = mmcv.imread(path)\n mask = img.copy()\n\n for object in objects:\n segment_id = get_segment_id()\n cat_id = name_to_cat_id[object['label']]\n \n cnt = np.array(object['polygon'])\n if resize != 1:\n cnt = (cnt*resize).astype('int')\n cv2.drawContours(panoptic, [cnt], -1, get_random_color(segment_id), -1)\n\n\n cnt = cnt.astype('int')\n x,y,w,h = cv2.boundingRect(cnt)\n if VIS:\n color = tuple(np.random.choice(256, 3).tolist())\n cv2.drawContours(mask, [cnt], -1, color, -1)\n\n segment_info = {'id': segment_id,\n 'category_id': cat_id,\n 'iscrowd': 0,\n 'bbox': [x,y,w,h],\n 'area': h*w}\n annotation['segments_info'].append(segment_info)\n \n if resize != 1:\n out_panoptic_path = osp.join(panoptic_dir_out, file_name).replace('.jpg', '.png')\n mmcv.imwrite(panoptic, out_panoptic_path)\n\n if VIS:\n vis = (img*0.5+mask*0.5).astype('uint8')\n mmcv.imwrite(vis, osp.join(VIS, file_name))\n annotations.append(annotation)\n images.append(image)\n\nmulti_thread(f, enumerate(image_paths), verbose=True, max_workers=16)\n\n\nouput = dict(\n images=images,\n annotations=annotations,\n categories=categories,\n)\nmmcv.dump(ouput, osp.join(annotation_dir_out, subset+\".json\"))","sub_path":"create_panoptic_dataset.py","file_name":"create_panoptic_dataset.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"562695107","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.urls import resolve\nfrom ..views import home, yards\nfrom ..models import Yard\nfrom django.contrib.auth.models import User\nfrom ..forms import NewYardForm\n\n# Create your tests here.\nclass HomeTests(TestCase):\n def setUp(self):\n User.objects.create_user(username='john', email='john@doe.com', password='123')\n self.yard = Yard.objects.create(description=\"This is a test\", created_by = User.objects.first())\n url = reverse('home')\n self.response = self.client.get(url)\n\n\n def test_home_view_status_code(self):\n url = reverse('home')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n\n def test_home_url_resolves_home_view(self):\n view = resolve('/')\n self.assertEquals(view.func, home)\n\n def test_home_view_contains_link_to_topics_page(self):\n yard_url = reverse('yards', kwargs={'pk': self.yard.pk})\n self.assertContains(self.response, 'href=\"{0}\"'.format(yard_url))\n\nclass YardTests(TestCase):\n def setUp(self):\n User.objects.create_user(username='john', email='john@doe.com', password='123')\n Yard.objects.create(description=\"This is a test\", created_by = User.objects.first())\n\n def test_yard_view_success_status_code(self):\n url = reverse('yards', kwargs={'pk': 1})\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n\n #def test_yard_view_not_found_status_code(self):\n #url = reverse('yards', kwargs={'pk':99})\n #response = self.client.get(url)\n #self.assertEquals(response.status_code,404)\n\n def test_yard_url_resolves_yard_view(self):\n view = resolve('/yards/1/')\n self.assertEquals(view.func, yards)\n\n def test_yard_view_contains_link_back_to_homepage(self):\n yards_url = reverse('yards', kwargs={'pk' : 1})\n response = self.client.get(yards_url)\n homepage_url = reverse('home')\n self.assertContains(response, 'href=\"{0}\"'.format(homepage_url))\n\nclass NewYardTests(TestCase):\n def test_contains_form(self): # <- new test\n url = reverse('new_yard')\n response = self.client.get(url)\n form = response.context.get('form')\n self.assertIsInstance(form, NewYardForm)\n\n def test_new_yard_invalid_post_data(self): # <- updated this one\n '''\n Invalid post data should not redirect\n The expected behavior is to show the form again with validation errors\n '''\n url = reverse('new_yard')\n response = self.client.post(url, {})\n form = response.context.get('form')\n self.assertEquals(response.status_code, 200)\n self.assertTrue(form.errors)","sub_path":"colonymgr/test/test_view_home.py","file_name":"test_view_home.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190311863","text":"from source.question_answer import QA\nfrom source.shape_checker import get_triangle_type, get_object_shape_type_2\nfrom source.job_story_end import check_current_time\nfrom git_utils import check_valid_path, get_git_file_info, get_file_info, get_repo_branch, get_repo_url, is_file_in_repo\n\nimport difflib\n\nNOT_A_QUESTION_RETURN = \"Was that a question?\"\nUNKNOWN_QUESTION = \"I don't know, please provide the answer\"\nNO_QUESTION = 'Please ask a question first'\nNO_TEACH = 'I don\\'t know about that. I was taught differently'\n\n\nclass Interface(object):\n def __init__(self):\n self.how_dict = {}\n self.what_dict = {}\n self.where_dict = {}\n self.who_dict = {}\n\n self.keywords = ['How', 'What', 'Where', 'Who', 'Why', 'Is']\n self.question_mark = chr(0x3F)\n\n self.question_answers = {\n 'What type of triangle is ': QA('What type of triangle is ', get_triangle_type),\n 'What type of quadrilateral is ': QA('What type of quadrilateral is ', get_object_shape_type_2),\n 'What time is it': QA('What time is it', check_current_time()),\n 'How many days?': QA('How many days?', 'How many days?'),\n 'Is the file path in the repo': QA('Is the in the repo', is_file_in_repo),\n 'What is the status of the file path?'\n : QA('What is the status of the file path', get_git_file_info),\n 'What is the deal with the file path'\n : QA('What is the deal with the file path', get_file_info),\n 'What branch is file path'\n : QA('What branch is file path?',get_repo_branch),\n 'What is the repo url'\n : QA('What is the path file come from?', get_repo_url)\n\n\n }\n self.last_question = None\n\n def ask(self, question=\"\"):\n\n \"\"\"\n func that takes questions\n :param question:\n \"\"\"\n if not isinstance(question, str):\n self.last_question = None\n raise Exception('Not A String!')\n if question[-1] != self.question_mark or question.split(' ')[0] not in self.keywords:\n self.last_question = None\n return NOT_A_QUESTION_RETURN\n else:\n parsed_question = \"\"\n args = []\n for keyword in question[:-1].split(' '):\n try:\n args.append(float(keyword))\n if keyword[0] == \"[\" and keyword[-1] == \"]\":\n args.append(keyword)\n\n except:\n parsed_question += \"{0} \".format(keyword)\n parsed_question = parsed_question[0:-1]\n self.last_question = parsed_question\n for answer in self.question_answers.values():\n if difflib.SequenceMatcher(a=answer.question, b=parsed_question).ratio() >= 90:\n if answer.function is None:\n return answer.value\n else:\n try:\n return answer.function(*args)\n except:\n raise Exception(\"Too many extra parameters\")\n else:\n return UNKNOWN_QUESTION\n\n def teach(self, answer=\"\"):\n \"\"\"\n\n :param answer:\n :return:\n \"\"\"\n if self.last_question is None:\n return NO_QUESTION\n elif self.last_question in self.question_answers.keys():\n return NO_TEACH\n else:\n self.__add_answer(answer)\n\n def correct(self, answer=\"\"):\n \"\"\"\n\n :param answer:\n :return:\n \"\"\"\n if self.last_question is None:\n return NO_QUESTION\n else:\n self.__add_answer(answer)\n\n def __add_answer(self, answer):\n self.question_answers[self.last_question] = QA(self.last_question, answer)\n\n def delete(self):\n \"\"\"\n func that checks for delete\n :rtype: object\n \"\"\"\n self.question_answers = {}\n return \"deleted\"\n\n def checkdelete(self):\n if self.question_answers == {}:\n return \"deleted\"\n else:\n return 1\n def wrapper_ask(self, ask):\n def out_put_to_log():\n outfile = open(\"log.txt\", \"w\")\n for STR in ask():\n outfile.write(STR)\n\n\n\n","sub_path":"cst236_lab7/cst236_lab4/source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"448056642","text":"from datetime import datetime\nimport numpy as np\nfrom pydataset import data as data\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom datetime import timedelta\nimport datetime, time \nimport statistics\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# from 1/1/1939\npayems = pd.read_csv(\"C:/Users/suket/Desktop/Homeworks/Computation/Week2/Problem8/payems.csv\", header = 15, names=[\"date\", \"payems\"])\n\n# For managing First segment data.. It is different from others.\npayems_0 = pd.read_csv(\"C:/Users/suket/Desktop/Homeworks/Computation/Week2/Problem8/payems.csv\", header = 5, names=[\"date\", \"payems\"])\nfirst_period_index = pd.date_range(start = \"7/1/1929\", end = \"7/1/1936\", freq=\"12MS\")\n\npayems_0 = payems_0[0:8]\npayems_0.index = first_period_index\n\n# Setting time index\npayems.index = pd.date_range(start = \"1/1/1939\", end = \"10/1/2016\", freq=\"MS\")\n\n# Setting segments\npayems_list = [payems_0, payems[\"1936-05\":\"1944-05\"], payems[\"1944-02\":\"1952-02\"], \\\n payems[\"1947-11\":\"1955-11\"], payems[\"1952-6\":\"1960-6\"],\\\n payems[\"1956-08\":\"1964-08\"], payems[\"1959-04\":\"1967-04\"], \\\n payems[\"1968-12\":\"1976-12\"], payems[\"1972-11\":\"1980-11\"], \\\n payems[\"1979-01\":\"1987-01\"], payems[\"1980-07\":\"1988-07\"], \\\n payems[\"1989-07\":\"1997-07\"], payems[\"2000-03\":\"2008-03\"], payems[\"2006-12\":\"2014-12\"]]\n\n# (b) Normalize\npeak_date= [\"7/1/29\", \"1/1/39\", \"2/1/45\", \"11/1/48\", \"6/1/53\", \"8/1/57\",\"4/1/60\", \"12/1/69\",\"11/1/73\",\\\n \"1/1/80\",\"7/1/81\",\"7/1/90\",\"3/1/01\",\"12/1/07\"]\n\n\nfor i in range(len(payems_list)):\n payems_list[i][\"Normalized\"] = payems_list[i][\"payems\"] / ( payems_list[i][payems_list[i][\"date\"] == peak_date[i]][\"payems\"][0] )\n \n\n# (c), (d), (e), (f), (i) Plot each segments\n\n# For merging data in order to plot on same axis.\nCount_index = []\nfor k in range(97):\n Count_index.append(k)\n \nCount_index_for_first_period = []\nfor m in range(8):\n Count_index_for_first_period.append(11 + 12*m) \n\nCount_index_for_second_period = []\nfor n in range(65):\n Count_index_for_second_period.append(n+32) \n\n\n\nfor l in range(len(payems_list)):\n if l == 0:\n payems_list[l][\"Count\"] = Count_index_for_first_period\n elif l == 1:\n payems_list[l][\"Count\"] = Count_index_for_second_period\n else:\n payems_list[l][\"Count\"] = Count_index\n\nlinestyles = ['-', '--', '-.', ':','-', '--', '-.', ':','-', '--', '-.', ':', '-', '--']\n\nfor j in range(len(payems_list)):\n if j == 0:\n plt.plot(payems_list[j][\"Count\"], payems_list[j][\"Normalized\"], color = \"black\", linewidth =5, linestyle = \"-\", label = peak_date[j] + \" Great Depression\" )\n elif j == 13:\n plt.plot(payems_list[j][\"Count\"], payems_list[j][\"Normalized\"], color = \"red\", linewidth =5, linestyle = \"-\", label = peak_date[j] + \" Great Recession\" )\n else:\n plt.plot(payems_list[j][\"Count\"], payems_list[j][\"Normalized\"], linestyle = linestyles[j], label = peak_date[j] )\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \nplt.title(\"Job Percentage of each Segment during Recession Time\")\nplt.ylabel(\"Jobs/peak\")\nplt.xlabel(\"Time from peak\")\n\n# (g) Labeling on x-axis\nplt.xticks(np.arange(0, 100, 12), (\"-1yr\", \"peak\", \"+1yr\", \"+2yr\", \"+3yr\", \"+4yr\", \"+5yr\", \"+6yr\", \"+7yr\"))\n\n# (h) dashed line\nplt.axvline(x = 12, c='grey', linestyle = \"--\")\nplt.axhline(y = 1, c='grey', linestyle = \"--\")\n\n# (i)\n# I added line setting into plot function above.\n\nplt.show()\n\n# (j) There was \"NOT\" any worse recessions than Great Recession except Great Depression.\n\n# (k) In terms of nonagricultural jobs, Great Depression was the worst definitely when I see this graph.\n# However, we have to see compare with population. This is just a jobs total. \n# If popluation change direction is different, maybe the result can be changed.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Homeworks/Computation/Week2/Problem8/Problem_set_2_8.py","file_name":"Problem_set_2_8.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"388578634","text":"#################################\r\n# Date: 2021/03/25\r\n# Author: Miles Xu\r\n# Email: kanonxmm@163.com\r\n# Desc.: 提供reids的control层\r\n#################################\r\n# -*- coding: utf-8 -*-\r\nimport aioredis\r\nfrom .BaseHelper import BaseHelper\r\nfrom global_.PubManager import PublicManager\r\n\r\n\r\nclass redisHelper(BaseHelper):\r\n _pool = None\r\n # loop = asyncio.get_running_loop()\r\n def __init__(self, config) -> None:\r\n self.config = config\r\n\r\n async def connect(self):\r\n if self._pool is None:\r\n db = await aioredis.create_pool(\r\n (self.config[\"host\"], self.config[\"port\"]),\r\n db=self.config[\"db\"],\r\n password=self.config[\"password\"]\r\n )\r\n if db:\r\n self._pool = db\r\n PublicManager.logger.info(\"redis connect success!!!\") \r\n else:\r\n raise (\"redis connect failure!!!\")\r\n\r\n async def set_key(self, key, value):\r\n await self.connect()\r\n with await self._pool as db:\r\n await db.set(key, value)\r\n\r\n async def read_key(self, key):\r\n await self.connect()\r\n with await self._pool as db:\r\n res = await db.get(key)\r\n return res\r\n\r\n async def close(self):\r\n self._pool.close()\r\n \r\n def __del__(self):\r\n self._pool.close()","sub_path":"utils/db/redisHelper.py","file_name":"redisHelper.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"380267881","text":"import tensorflow as tf\nimport os\nfrom PIL import Image\nimport numpy as np\n##salvo i dati in formato png. 3 feature per L,R,DGT\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"n_examples\", \"100\", \"TfRecords File Size\")\ntf.flags.DEFINE_string(\"data_dir_left\", \"/media/Data/Pierluigi/City/\", \"path to dataset\")\ntf.flags.DEFINE_string(\"tfR_dir\", \"/TFRecordsSem/city/\", \"path to TFrecords_dataset\")\n\ndef bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef convert_to(imagesL,imagesR,imagesDGT, imagesSGT, name):\n\tnum_examples= FLAGS.n_examples\n\tcwd = os.getcwd() + FLAGS.tfR_dir\n\tfilename = os.path.join(cwd, name + '.tfrecords')\n\tprint('Writing', filename)\n\twriter = tf.python_io.TFRecordWriter(filename)\n\tfor index in range(num_examples):\n\t\t\timage_rawL = imagesL[index]\n\t\t\timage_rawR = imagesR[index]\n\t\t\timage_rawDGT = imagesDGT[index]\n\t\t\timage_rawSGT = imagesSGT[index]\n\t\t\texample = tf.train.Example(features=tf.train.Features(feature={\n\t\t\t\t'image_raw_L': bytes_feature(image_rawL),\n\t\t\t\t'image_raw_R': bytes_feature(image_rawR),\n\t\t\t\t'depth_ground_truth': bytes_feature(image_rawDGT),\n\t\t\t\t'labels_ground_truth': bytes_feature(image_rawSGT)}))\n\t\t\twriter.write(example.SerializeToString())\n\twriter.close()\n\treturn 0\n\ndef main():\n\tcwd = FLAGS.data_dir_left\n\tfilenames = []\n\tfor f in os.listdir(cwd):\n\t\tfile_path = os.path.join(cwd,f)\n\t\tfilenames.append(file_path)\n\t\n\tfilenamesL = [f for f in filenames if \"_L.png\" in f]\n\tfilenamesR = [f.replace(\"_L\",\"_R\") for f in filenamesL]\n\tfilenamesDGT = [f.replace(\"_L\",\"_L_disp\") for f in filenamesL]\n\t\n\tfilenamesSGT = [f.replace(\"Image\",\"\") for f in filenamesL]\n\tfilenamesSGT = [f.replace(\"_L\",\"\") for f in filenamesSGT]\n\n\tif len(filenamesL) == len(filenamesR) and len(filenamesR) == len(filenamesDGT) and len(filenamesSGT) == len(filenamesDGT):\t\n\t\timagesL = []\n\t\timagesR = []\n\t\timagesDGT = []\n\t\timagesSGT = []\n\t\t\n\t\tfor i in range(len(filenamesL)): \n\t\t\timgLreader= open(filenamesL[i], mode='rb')\n\t\t\timgRreader= open(filenamesR[i], mode='rb')\n\t\t\timgDGTreader= open(filenamesDGT[i], mode='rb')\n\t\t\timgSGTreader = open(filenamesSGT[i], mode='rb')\n\n\t\t\timgL = imgLreader.read()\n\t\t\timgR = imgRreader.read()\n\t\t\timgDGT = imgDGTreader.read()\n\t\t\timgSGT = imgSGTreader.read()\n\n\t\t\timagesL.append(imgL)\n\t\t\timagesR.append(imgR)\n\t\t\timagesDGT.append(imgDGT)\n\t\t\timagesSGT.append(imgSGT)\n\n\t\t\tif i % FLAGS.n_examples == (FLAGS.n_examples - 1):\n\t\t\t\tconvert_to(imagesL,imagesR,imagesDGT,imagesSGT, \"TFRecords\" + str((int)(i/FLAGS.n_examples)))\n\t\t\t\timagesL = []\n\t\t\t\timagesR = []\n\t\t\t\timagesDGT = []\n\t\t\t\timagesSGT = []\n\telse:\n\t\traise ValueError(\"Images number does not match.\" % (FLAGS.n_examples))\n\t\texit(1)\n\t\n\texit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Code/TfRecordsSem.py","file_name":"TfRecordsSem.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"409783208","text":"from unittest.mock import patch\n\nfrom .app import Sentry\n\n\n@patch.object(Sentry, \"clear\")\n@patch.object(Sentry, \"track\")\ndef test_before_and_after_request_are_called(track_mock, clear_mock, client):\n # Given that I have an endpoint\n # And I've mocked Sentry.{clear, track}\n # When I call that endpoint\n response = client.get(\"/\")\n\n # Then the request should succeed\n assert response.status_code == 200\n\n # And each mock should be ccalled once\n track_mock.assert_called_once()\n clear_mock.assert_called_once()\n","sub_path":"tests/test_component.py","file_name":"test_component.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"573416824","text":"from __future__ import print_function\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\nfrom deepx.nn import *\nfrom deepx.rnn import *\nfrom deepx.loss import *\nfrom deepx.optimize import *\nimport numpy as np\nfrom numpy import random\nimport csv\n\nfile_name = 'MLP_VC_H10_Lr_N500-5000.csv'\n\n# Train MLP using SGD until error is 0, or until n_iter\n# is reached using a threshold of 0.5 on the sigmoid output\n# Return the final error and the number of iterations required\ndef train(optimizer, X, y, n_iter, lr):\n for i in range(1, n_iter + 1):\n lr = float(lr)*(0.99999)\n loss = optimizer.train(X, y, lr)\n\n if np.isnan(loss):\n print(\"Recieved NAN Loss, breaking...\")\n return loss, i\n\n preds = [[int(x >= 0.5)] for x in mlp.predict(X)]\n\n error = 1 - (preds == y).sum() / float(N)\n if error == 0:\n break\n if i % 1000 == 0:\n print(\"{}, error:{}, loss:{}\".format(i, error, loss))\n\n return error, i\n\nif __name__ == \"__main__\":\n # Define the names of the columns for the results CSV\n fieldnames = ['num_layers', 'num_hidden', 'data_dim', \n 'N_data', 'error', 'iterations']\n\n # Open the results file and write header\n #results_file = open(file_name, 'w')\n #writer = csv.DictWriter(results_file, fieldnames=fieldnames)\n #writer.writeheader()\n #results_file.close()\n\n # Parameters:\n # Maximum number of iterations before stopping\n n_iter = 500000\n # Dimension of each data point\n d = 100\n # List of number of points to iterate over\n num_pts = range(2400, 100000, 100)# + range(500, 1100, 50)\n # Number of hidden units\n Hs = [10]\n # Number of layers\n L = 1\n # Number of times to repeat each result\n num_repeats = 10\n # Learning Rate\n lr = 0.1\n\n # Seed random for reproducibility\n random.seed(2)\n\n for repeat in range(num_repeats):\n for H_idx, H in enumerate(Hs):\n for N_idx, N in enumerate(num_pts):\n print(\"Trying with N = {} points, and H = {}\".format(N, H))\n\n # Generate N d-dimensional data points sampled from a \n # uniform distribution over [0,1]\n X = random.uniform(low=0, high=1, size=(N, d))\n\n # Ranomly pick half od the points and assign them a positive label\n labels = np.zeros((N, 1))\n pos_labels = random.choice(np.arange(0,N), np.floor(float(N)/2.0), replace=False)\n labels[pos_labels] = 1;\n labels = labels.astype(np.int32)\n\n # While we arent getting nan's\n nan_loss = True\n repeat_count = 0\n while (nan_loss and repeat_count < num_repeats):\n # Define the network with a sigmoid output and LogLoss\n mlp = Vector(d) >> Repeat(Tanh(H), L) >> Sigmoid(1)\n sgd_optimizer = SGD(mlp, LogLoss(), clip_gradients=0.01)\n \n # Train the network until we reach 0 error or n_iter iterations\n error, iterations = train(sgd_optimizer, X, labels, n_iter, lr)\n if not np.isnan(error):\n nan_loss = False\n else:\n print(\"Trying same network again... With new X data\")\n X = random.uniform(low=0, high=1, size=(N, d))\n repeat_count += 1\n\n # Print the final error\n print(\"Succesfully shattered {} Pts, Error: {}\".format(N, error))\n\n # Write the results as a line to the csv file\n results_file = open(file_name, 'a')\n writer = csv.DictWriter(results_file, fieldnames=fieldnames)\n writer.writerow({'num_layers': L, 'num_hidden': H, 'data_dim': d,\n 'N_data':N, 'error':error, 'iterations':iterations})\n results_file.close()\n","sub_path":"shattering_random.py","file_name":"shattering_random.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"235605140","text":"\"\"\"\nMODULE 2\n\nThis module collects the classes of the other WINDOWs that play role of important thing\n\n\"\"\"\n#-*-coding: utf-8 -*-\nimport sys\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport TradingProject\nimport CalculationTool\nimport InitDownloadedDataHandling\nimport xingAPIClassModule\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom pandas import Series, DataFrame\nfrom PyQt4 import uic\nimport sqlite3\nimport win32com\nimport pythoncom\nimport pyqtgraph as pg\nimport numpy as np\nimport datetime\nimport openpyxl\nfrom threading import Timer\nimport numpy as np\nimport __main__\n\nclass otherWindow():\n\n TradingOptionUI = uic.loadUiType(\"TradingOption.ui\")[0] #This code functions as connecting '테스트알고리즘' -> 'Trading Option' GUI\n UserInfoUI = uic.loadUiType(\"UserInfo.ui\")[0]\n AccountInfoUI = uic.loadUiType(\"AccountsInfo.ui\")[0] #This code functions as connecting '계좌정보' GUI\n DataInfoUI = uic.loadUiType(\"DataInfo.ui\")[0] #This code functions as connecting '데이터열람' GUI\n AlgorithmOption_1_UI = uic.loadUiType(\"AlgorithmOption1.ui\")[0] #This code functions as connecting '알고리즘1' -> 'Trading Option' GUI\n AlgorithmOption_2_UI = uic.loadUiType(\"AlgorithmOption2.ui\")[0] #This code functions as connecting '알고리즘2' -> 'Trading Option' GUI\n DataExtractionUI = uic.loadUiType(\"DataExtraction.ui\")[0]\n ProgressBarUI = uic.loadUiType(\"ProgressBar.ui\")[0]\n ProfitUI = uic.loadUiType(\"ProfitWindow.ui\")[0]\n InitProgressBarUI = uic.loadUiType(\"InitProgressBar.ui\")[0]\n\n class ProfitShow(QMdiSubWindow, ProfitUI):\n\n dataReceive = pyqtSignal(list, list)\n\n def __init__(self, algoNumber):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.profitGraph = pg.GraphicsLayoutWidget()\n self.setWidget(self.profitGraph)\n self.profitChart()\n self.algoNumber = algoNumber\n self.dataReceive.connect(self.update)\n\n def getStockData(self, kospiProfit, stockProfit):\n\n self._kospiProfit = kospiProfit\n self._stockProfit = stockProfit\n self.dataReceive.emit(kospiProfit, stockProfit)\n\n def update(self):\n \n self.algorithm.setData(self._stockProfit)\n self.kospi.setData(self._kospiProfit)\n\n def profitChart(self):\n \n self.profitPlot = self.profitGraph.addPlot(row=0, col=0)\n self.profitPlot.addLegend()\n self.algorithm = self.profitPlot.plot(pen='g',name = \"algorithm\")\n self.kospi = self.profitPlot.plot(pen = 'r', name = \"kospi\")\n\n #Set timer to close graph\n today=datetime.datetime.today()\n closingTime = today.replace(day=today.day, hour=3, minute=15, second=0, microsecond=0)\n delta_t = closingTime - today\n secs = delta_t.seconds\n #Timer start! after 'secs'\n print(\"Graph will be closed at : \" + str(closingTime))\n t = Timer(secs, self.closeGraph)\n t.start()\n\n def closeGraph(self):\n\n exporter = pg.exporters.ImageExporter(self.profitPlot)\n exporter.parameters()['width'] = 640 # (note this also affects height parameter)\n exporter.parameters()['height'] = 480 # (note this also affects height parameter)\n exporter.export('daily_profit1.png')\n self.close()\n\n class StockGraphShow(QMdiSubWindow):\n\n def __init__(self):\n super().__init__()\n self.cw = QWidget()\n self.setStyleSheet(\"QMdiSubWindow {background: 'black';}\")\n self.setWidget(self.cw)\n self.layout = QGridLayout()\n self.cw.setLayout(self.layout)\n self.totalGraph = pg.GraphicsLayoutWidget()\n self.graphMenu = QGridLayout()\n self.textEdit = QLineEdit()\n self.textEdit.setGeometry(0,0,50,50)\n self.textEdit.returnPressed.connect(self.readEdit)\n self.readButton = QPushButton()\n self.graphMenu.addWidget(self.textEdit, 0,1)\n self.graphMenu.addWidget(self.readButton, 0,2)\n self.layout.addLayout(self.graphMenu, 0,1)\n self.layout.addWidget(self.totalGraph,1,1)\n \n #This flag means 0 -> graph isn't created yet 1->graph plot is created\n self.isDraw = 0\n self.drawGraph(\"대림산업\")\n #This flag means 0 -> graph isn't created yet 1->graph plot is created\n self.isDraw = 1\n \n self.setWindowTitle(\"Stock Graph\")\n self.setWindowIcon(QIcon('L&K.png'))\n\n gridLayout = self.totalGraph.ci.layout\n gridLayout.setRowStretchFactor(1,2.5)\n gridLayout.setRowStretchFactor(0,0.5)\n\n self.connect(self.readButton, SIGNAL('clicked()'), self.readEdit)\n\n def readEdit(self):\n\n print(self.layout.count())\n self.winStock.clear()\n self.winVolume.clear()\n self.stockName = self.textEdit.text() \n self.drawGraph(self.stockName)\n \n #self.layout = QGridLayout()\n #self.cw.setLayout(self.layout)\n #totalGraph = pg.GraphicsLayoutWidget()\n #self.layout.addWidget(totalGraph,1,1)\n #self.drawGraph(self.stockName)\n\n def drawGraph(self, stock):\n\n stockName = stock\n #totalGraph.setWindowIcon(QIcon('L&K.png')) \n self.cPrice, self.tVal, self.tVol, self.oPrice, self.iPrice, self.hPrice, self.date = self.setStockInfo([stockName])\n upX, upY, tVolX, upHeight, downX, downY, downHeight, errUpX, errUpY, errUpTop, errDownX, errDownY, errDownTop = self.setStockDataSet()\n\n color = 'g'\n priceHeight = []\n # The plot is not created yet, make it\n if self.isDraw == 0:\n self.winStock = self.totalGraph.addPlot(row = 1, col =1, rowspan=2, colspane=1)\n self.winVolume = self.totalGraph.addPlot(row = 3, col =1)\n self.winStock.setWindowTitle(stock)\n self.winStock.setLabel('bottom',test = self.date)\n\n up = pg.BarGraphItem(x=upX, y0 = upY, width=1, height = upHeight, brush='r')\n vol = pg.BarGraphItem(x=tVolX, y0=0, width=1, height=self.tVol, brush='w')\n down = pg.BarGraphItem(x=downX, y0 = downY, width=1, height = downHeight, brush='g')\n errUp = pg.ErrorBarItem(x=errUpX, y=errUpY, top=errUpTop, beam=0, pen = {'color' : 'r', 'width':1})\n errDown = pg.ErrorBarItem(x=errDownX, y=errDownY, top=errDownTop, beam=0, pen = {'color' : 'g', 'width':1})\n \n self.winStock.addItem(up)\n self.winStock.addItem(down)\n self.winStock.addItem(errUp)\n self.winStock.addItem(errDown)\n \n self.winVolume.addItem(vol)\n #win.setXRange(0,5)\n #win.setWindowIcon(QIcon('L&K.png')) \n #win.setLimits(xMin=0, xMax=5, yMin=0)\n\n def setStockInfo(self, stockNameList):\n\n connect = sqlite3.connect(\"TradingProject.db\") #Connect 'TradingProject.db' database which stores the stock's data\n dataBaseCursor = connect.cursor() #Connect database cursor! \n \n where = \"Name=\" + \"\\\"\" +stockNameList[0] +\"\\\"\"\n for i in range(1,len(stockNameList)):\n stockName = stockNameList[i]\n where += \" OR Name=\" + \"\\\"\" + stockName + \"\\\"\"\n \n selectQuery = \"SELECT * FROM kospiValue WHERE \" + where\n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchall()\n if len(ret) == 0:\n selectQuery = \"SELECT * FROM kosdaqValue WHERE \" + where\n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchall()\n\n cPrice = []\n tVal = []\n tVol = []\n oPrice = []\n iPrice = []\n hPrice = []\n date =[]\n for i in range(len(ret)):\n closing = ret[i][1]\n tradingVal = ret[i][2]\n tradingVol = ret[i][3]\n opening = ret[i][4]\n highest = ret[i][5]\n lowest = ret[i][6]\n day = ret[i][7]\n cPrice.append(closing)\n tVal.append(tradingVal)\n tVol.append(tradingVol)\n oPrice.append(opening)\n hPrice.append(highest)\n iPrice.append(lowest)\n date.append(day)\n\n return cPrice, tVal, tVol, oPrice, iPrice, hPrice, date\n\n def setStockDataSet(self):\n\n upX = []\n upY = []\n tVolX = []\n upHeight = []\n downX = []\n downY = []\n downHeight = []\n errUpX = []\n errUpY = []\n errUpTop = []\n errDownX = []\n errDownY = []\n errDownTop = []\n count = 0\n print(len(self.cPrice))\n for i in range(len(self.cPrice)):\n\n if self.cPrice[count] >= self.oPrice[count]:\n diff = self.cPrice[count] - self.oPrice[count]\n upX.append(count)\n errUpX.append(count)\n errYelement = self.iPrice[count]\n errTopElement = self.hPrice[count] - errYelement\n errUpY.append(errYelement)\n errUpTop.append(errTopElement)\n upY.append(self.oPrice[count])\n upHeight.append(diff)\n else:\n diff = self.oPrice[count] - self.cPrice[count]\n downX.append(count)\n errDownX.append(count)\n errYelement = self.iPrice[count]\n errTopElement = self.hPrice[count] - errYelement\n downY.append(self.cPrice[count])\n downHeight.append(diff)\n errDownY.append(errYelement)\n errDownTop.append(errTopElement)\n\n tVolX.append(count)\n count+=1\n \n errUpX = np.array(errUpX)\n errUpY = np.array(errUpY)\n errUpTop = np.array(errUpTop)\n errDownX = np.array(errDownX)\n errDownY = np.array(errDownY)\n errDownTop = np.array(errDownTop)\n\n return upX, upY, tVolX, upHeight, downX, downY, downHeight, errUpX, errUpY, errUpTop, errDownX, errDownY, errDownTop\n\n def closeEvent(self, event):\n self.close()\n\n #This window class is for giving information of user\n class UserInfo(QMdiSubWindow, UserInfoUI ):\n \n def __init__(self, name, account, deposit):\n super().__init__()\n self.setupUi(self)\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.setWindowTitle(\"User Info\")\n self.setWindowIcon(QIcon('L&K.png'))\n u = \"사용자 : \" + str(name)\n a = \"계좌번호 : \" + str(account)\n d = \"예수금 : \" + str(deposit)\n self.user.setText(u)\n self.account.setText(a)\n self.deposit.setText(d)\n self.connect(self.acceptButton, SIGNAL('clicked()'), self.acceptData)\n\n def acceptData(self):\n self.close()\n \n #This window class is created in order to check state\n class StateInfo(QMdiSubWindow):\n\n def __init__(self):\n super().__init__()\n #self.setupUi(self)\n self.stateInfo = QTextBrowser()\n self.setWidget(self.stateInfo)\n self.setWindowTitle(\"Info\")\n self.setWindowIcon(QIcon('L&K.png'))\n\n def closeEvent(self, event):\n event.accept()\n\n #This class is for creating the option window whcih is executed when we click '트레이딩' -> '알고리즘1' -> '옵션'\n class Algorithm_1_Option(QDialog, AlgorithmOption_1_UI): \n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n\n self.connect(self.acceptButton, SIGNAL('clicked()'), self.acceptData) #when clicking '확인' button, set all option '전체데이터', 'Window 크기', '자본금' \n self.connect(self.cancelButton, SIGNAL('clicked()'), self.cancelData) #when clicking '취소', cancel all option\n\n def acceptData(self):\n \n investMentWindowSize['Algorithm1'] = self.windowEdit.text() #'windowEdit' is the line editor that receives the value of 'window size'\n investMentPercentage['Algorithm1'] = self.capitalEdit.text() #'capitalEdit' is the line editor that receives that value of 'the percentage of the capital'\n investMentSize['Algorithm1'] = self.StockNumberEdit.text() #'StockNumberEdit' is the line editor that receives the number of stocks will be invested\n self.close() \n\n def cancelData(self):\n self.close()\n\n #This class is for creating the option window whcih is executed when we click '트레이딩' -> '알고리즘2' -> '옵션'\n class Algorithm_2_Option(QDialog, AlgorithmOption_2_UI): \n \n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.connect(self.acceptButton, SIGNAL('clicked()'), self.acceptData) #when clicking '확인' button, set all option '전체데이터', 'Window 크기', '자본금' \n self.connect(self.cancelButton, SIGNAL('clicked()'), self.cancelData) #when clicking '취소', cancel all option\n\n def acceptData(self):\n \n investMentWindowSize['Algorithm2'] = self.windowEdit.text() #'windowEdit' is the line editor that receives the value of 'window size'\n investMentPercentage['Algorithm2'] = self.capitalEdit.text() #'capitalEdit' is the line editor that receives that value of 'the percentage of the capital'\n\n self.close() \n\n def cancelData(self):\n\n self.close()\n\n #This class is for creating the 'account info' window whcih is executed when we click '보기' -> '계좌정보'\n class AccounInfo(QMdiSubWindow, AccountInfoUI):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n\n def setAccountNo(self, Accountlist): #Must be updated\n pass\n\n #This class is for creating the 'account info' window whcih is executed when we click '보기' -> '데이터열람'\n class DataInfo(QDialog, DataInfoUI):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.connect(self.findDataButton, SIGNAL(\"clicked()\"), self.searchingData)\n\n def searchingData(self): \n\n dataConnect = sqlite3.connect(\"TradingProject.db\") #Connect 'TradingProject.db' database which stores the stock's data\n dataBaseCursor = dataConnect.cursor() #Connect database cursor! \n code = self.codeEdit.text() #'codeEdit' is the line editor that acts role as accepting a stock's code\n item = self.itemEdit.text() #'itemEdit' is the line editor that acts role as accepting a stock's item like '종가', '시가', '거래량', .....\n\n if item == \"종목명\": #if you type '종목명' on 'itemEdit', show a name of stock\n #This query is for selecting '종목명' item\n whereCondition = \"Code = \" + \"'\" + code + \"'\"\n selectQuery = \"SELECT Name FROM kospiCode WHERE \" + whereCondition \n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchall()\n print(ret[0][0]) \n #NEED TO BE UPDATED \n elif item == \"종가\" or item == \"거래량\" or item == \"거래대금\" or item == \"시가\" or item == \"저가\" or item == \"고가\":\n #This dictionary is for converting the text which is typed on 'itemEdit' to the attribute of database which correspond to the text like '종가', '시가', '거래량', ....\n convertingItemNametoColumnName = {'종가':\"ClosingPrice\", '거래량':\"tradingVolume\", '거래대금':\"tradingValue\", '시가':\"openingPrice\", '고가':\"highestPrice\", '저가':\"lowestPrice\", '일자':\"date\"}\n #This query is for selecting '종목명' item\n whereCondition = \"Code = \" + \"'\" + code + \"'\"\n selectQuery = \"SELECT Name FROM kospiCode WHERE \" + whereCondition \n \n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchone()\n \n name = ret[0] #Get a stock's name\n item = convertingItemNametoColumnName[item] #Convert item's text to the attribute of database\n\n #This query is for selecting certain items of a certain stock\n whereCondition = \"Name = \" + \"'\" + name + \"'\" \n selectQuery = \"SELECT \" + item + \", date FROM kospiValue WHERE \" + whereCondition\n\n print(selectQuery)\n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchall()\n print(Series(ret))\n #NEED TO BE UPDATED \n else:\n financeInfo = __main__.LoginWindow.main.InitData.financial_DB\n print(financeInfo)\n whereCondition = \"Code = \" + \"'\" + code + \"'\"\n selectQuery = \"SELECT Name FROM kospiCode WHERE \" + whereCondition \n dataBaseCursor.execute(selectQuery)\n ret = dataBaseCursor.fetchall()\n stockName = ret[0][0]\n print(financeInfo[stockName][item])\n\n\n #This class is for creating the 'account info' window whcih is executed when we click '보기' -> '데이터열람'\n class DataExtraction(QMdiSubWindow, DataExtractionUI):\n\n progressSignal = pyqtSignal(str, float)\n\n def __init__(self, kospiCode):\n super().__init__()\n self.setupUi(self)\n self.kospiCode = kospiCode\n self.stockList = self.getKospiList()\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.connect(self.acceptButton, SIGNAL(\"clicked()\"), self.printData)\n self.connect(self.cancelButton, SIGNAL(\"clicked()\"), self.closeWindow)\n self.connect(self.inputButton, SIGNAL(\"clicked()\"), self.chooseStock)\n self.dateEdit.setDateTime(QDateTime.currentDateTime())\n self.dateEdit.setMinimumDate(QDate(2014,1,1))\n self.today = time.localtime()\n self.dateEdit.setMaximumDate(QDate(self.today.tm_year, self.today.tm_mon, self.today.tm_mday))\n self.dateEdit.setCalendarPopup(True)\n #Connect signal to event\n self.progressSignal.connect(self.updateBar)\n\n def setProgressBar(self, text, value):\n\n self.progressSignal.emit(text, value)\n\n def updateBar(self, text, value):\n\n self.progress.progressState.setText(text)\n self.progress.progressBar.setValue(value)\n\n def getKospiList(self):\n \n size = len(self.kospiCode)\n kospiList = []\n for i in range(size):\n kospiList.append(self.kospiCode[i][1])\n \n return kospiList\n\n def chooseStock(self):\n \n return kospiList\n\n def getKospi200(self):\n kospi200 = __main__.TRQuery.XAQuery(\"t1444\")\n return kospi200\n\n def printData(self): \n\n self.index = self.indexBox.currentText()\n pythoncom.CoInitialize()\n\n #Set the date of starting point\n startDate = self.dateEdit.date().toPyDate()\n self.startDate = CalculationTool.calculateTodayFormat(startDate.year, startDate.month, startDate.day)\n self.progress = otherWindow.ProgressBarWindow(\"출력 준비 중..\", self.stockList, self.startDate)\n self.progress.show()\n import threading\n printingData = threading.Thread(target = self.printing, args=())\n printingData.start()\n #self.progress.close()\n\n def printing(self):\n\n __main__.LoginWindow.main.StateInfo.stateInfo.append(\"데이터 출력 중입니다.\")\n indexText = self.indexBox.currentText()\n\n window = 60\n\n dataConnect = sqlite3.connect(\"TradingProject.db\") #Connect 'TradingProject.db' database which stores the stock's data\n dataBaseCursor = dataConnect.cursor() #Connect database cursor! \n \n selectQuery = \"SELECT date FROM kospiValue WHERE Name = '종합(KOSPI)'\" #This query is that searching all stock's name without duplication\n \n dataBaseCursor.execute(selectQuery) #Execute the query 'selectQuery'\n ret = dataBaseCursor.fetchall() #This code is that fetching all of datas which is selected by the query\n \n dayIndex = []\n\n #Get day index data\n for i in range(len(ret)):\n dayIndex.append(ret[i][0])\n\n selectQuery = \"SELECT * FROM kospiValue\" #This query is that searching all stock's name without duplication\n dataBaseCursor.execute(selectQuery) #Execute the query 'selectQuery'\n ret = dataBaseCursor.fetchall() #This code is that fetching all of datas which is selected by the query\n \n kospiCp = {}\n kospiOp = {}\n kospiList = []\n kospiLen = {}\n\n token = 100/len(self.stockList)\n progressValue= 0\n\n filteredList = []\n\n for name in self.stockList:\n\n selectQuery = \"SELECT * FROM kospiValue WHERE Name = '\" + name + \"'\" \n dataBaseCursor.execute(selectQuery) \n ret = dataBaseCursor.fetchall() \n\n if len(ret) < 60:\n continue\n\n progressValue += token\n text = \"데이터 추출 중:\" + str(name)\n self.setProgressBar(text, progressValue)\n\n cp = []\n op = []\n for i in range(len(ret)):\n cp.append(ret[i][1])\n op.append(ret[i][4])\n\n kospiCp[name] = cp\n kospiOp[name] = op\n kospiLen[name] = len(cp)\n filteredList.append(name)\n print(name)\n\n self.setProgressBar(\"데이터 추출 과정 완료\", 100)\n\n time.sleep(1)\n\n self.setProgressBar(\"엑셀파일로 내보내는 중\", 0)\n size = len(kospiCp[\"종합(KOSPI)\"])\n \n workbook = openpyxl.Workbook()\n fileName = \"kospi.xlsx\"\n worksheet1 = workbook.active\n worksheet1.title = \"opening\"\n worksheet2 = workbook.create_sheet(title=\"closing\")\n worksheet3 = workbook.create_sheet(title=\"return value\")\n worksheet4 = workbook.create_sheet(title=\"return value(log)\")\n worksheet5 = workbook.create_sheet(title=\"mean\")\n worksheet6 = workbook.create_sheet(title=\"std\")\n worksheet7 = workbook.create_sheet(title=\"filtered\")\n\n dayIndex = dayIndex[-window:]\n\n #Write date info\n dayIndex.reverse()\n for i in range(window):\n worksheet1.cell(row = i+2, column = 1, value = dayIndex[i])\n worksheet2.cell(row = i+2, column = 1, value = dayIndex[i])\n worksheet3.cell(row = i+2, column = 1, value = dayIndex[i])\n worksheet4.cell(row = i+2, column = 1, value = dayIndex[i])\n dayIndex.reverse()\n\n worksheet5.cell(row = 2, column = 1, value = dayIndex[-1])\n worksheet6.cell(row = 2, column = 1, value = dayIndex[-1])\n worksheet7.cell(row = 2, column = 1, value = dayIndex[-1])\n\n i=1\n k=1\n progressValue= 0\n print(token)\n for name in filteredList:\n\n print(name)\n worksheet1.cell(row = 1, column = i+1, value = str(name))\n worksheet2.cell(row = 1, column = i+1, value = str(name))\n worksheet3.cell(row = 1, column = i+1, value = str(name))\n worksheet4.cell(row = 1, column = i+1, value = str(name))\n worksheet5.cell(row = 1, column = i+1, value = str(name))\n worksheet6.cell(row = 1, column = i+1, value = str(name))\n \n cpInfo = kospiCp[name]\n opInfo = kospiOp[name]\n\n progressValue += token\n text = \"엑셀파일로 내보내는 중:\" + str(name)\n self.setProgressBar(text, progressValue)\n\n size = len(cpInfo)\n cpInfo.reverse()\n opInfo.reverse()\n rVList = []\n for j in range(window):\n\n closingPrice = cpInfo[j]\n openingPrice = opInfo[j]\n rV = openingPrice - closingPrice\n logRv = math.log10(closingPrice/openingPrice)\n rVList.append(logRv)\n worksheet1.cell(row = j+2,column = i+1, value = openingPrice)\n worksheet2.cell(row = j+2,column = i+1, value = closingPrice)\n worksheet3.cell(row = j+2, column = i+1, value = rV)\n worksheet4.cell(row = j+2, column = i+1, value = logRv)\n\n mean = np.mean(rVList)\n std = np.std(rVList)\n worksheet5.cell(row = 2,column = i+1, value = mean)\n worksheet6.cell(row = 2,column = i+1, value = std)\n if std >= 0.01:\n worksheet7.cell(row = 1, column = k+1, value = str(name))\n worksheet7.cell(row = 2,column = k+1, value = std)\n k+=1\n\n i += 1\n\n print(k-1)\n workbook.save(filename = fileName)\n __main__.LoginWindow.main.StateInfo.stateInfo.append(\"데이터 출력이 완료되었습니다.\")\n\n def closeWindow(self):\n self.close()\n \n class ProgressBarWindow(QMdiSubWindow, ProgressBarUI):\n\n def __init__(self, text, stockList, startDate):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.excel = win32com.client.Dispatch(\"Excel.Application\")\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.stockList = stockList\n self.startDate = startDate\n self.text = text\n self.progressState.setAlignment(Qt.AlignCenter)\n self.progressState.setText(text)\n __main__.LoginWindow.main.mdi.addSubWindow(self)\n \n def processing(self, value):\n\n self.completed = 0\n\n if self.completed <= 100:\n self.completed += value\n self.progressbar.setvalue(self.completed)\n\n class InitProgressBarWindow(QWidget, InitProgressBarUI):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('L&K.png'))\n self.setFixedSize(self.geometry().width(), self.geometry().height())\n self.progressState.setAlignment(Qt.AlignCenter)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# __main__.LoginWindow.main.StateInfo.stateInfo.append(\"데이터 출력 중입니다.\")\n# indexText = self.indexBox.currentText()\n\n# dataConnect = sqlite3.connect(\"TradingProject.db\") #Connect 'TradingProject.db' database which stores the stock's data\n# dataBaseCursor = dataConnect.cursor() #Connect database cursor! \n \n# selectQuery = \"SELECT date FROM kospiValue WHERE Name = '종합(KOSPI)'\" #This query is that searching all stock's name without duplication\n \n# dataBaseCursor.execute(selectQuery) #Execute the query 'selectQuery'\n# ret = dataBaseCursor.fetchall() #This code is that fetching all of datas which is selected by the query\n \n# dayIndex = []\n\n# #Get day index data\n# for i in range(len(ret)):\n# dayIndex.append(ret[i][0])\n\n# selectQuery = \"SELECT * FROM kospiValue\" #This query is that searching all stock's name without duplication\n# dataBaseCursor.execute(selectQuery) #Execute the query 'selectQuery'\n# ret = dataBaseCursor.fetchall() #This code is that fetching all of datas which is selected by the query\n \n# kospiCp = {}\n# kospiOp = {}\n# kospiList = []\n# kospiLen = {}\n\n# token = 100/len(self.stockList)\n# progressValue= 0\n\n# for name in self.stockList:\n\n# selectQuery = \"SELECT * FROM kospiValue WHERE Name = '\" + name + \"'\" \n# dataBaseCursor.execute(selectQuery) \n# ret = dataBaseCursor.fetchall() \n\n# progressValue += token\n# text = \"데이터 추출 중:\" + str(name)\n# self.setProgressBar(text, progressValue)\n\n# cp = []\n# op = []\n# for i in range(len(ret)):\n# cp.append(ret[i][1])\n# op.append(ret[i][4])\n\n# kospiCp[name] = cp\n# kospiOp[name] = op\n# kospiLen[name] = len(cp)\n# print(name)\n\n# self.setProgressBar(\"데이터 추출 과정 완료\", 100)\n\n# time.sleep(1)\n\n# self.setProgressBar(\"엑셀파일로 내보내는 중\", 0)\n# size = len(kospiCp[\"종합(KOSPI)\"])\n \n# workbook = openpyxl.Workbook()\n# fileName = \"kospi.xlsx\"\n# worksheet1 = workbook.active\n# worksheet1.title = \"opening\"\n# worksheet2 = workbook.create_sheet(title=\"closing\")\n# worksheet3 = workbook.create_sheet(title=\"return value\")\n# worksheet4 = workbook.create_sheet(title=\"return value(log)\")\n\n# #Write date info\n# dayIndex.reverse()\n# for i in range(size):\n# worksheet1.cell(row = i+2, column = 1, value = dayIndex[i])\n# worksheet2.cell(row = i+2, column = 1, value = dayIndex[i])\n# worksheet3.cell(row = i+2, column = 1, value = dayIndex[i])\n# worksheet4.cell(row = i+2, column = 1, value = dayIndex[i])\n# dayIndex.reverse()\n\n# i=1\n# progressValue= 0\n# print(token)\n# for name in self.stockList:\n\n# print(name)\n# worksheet1.cell(row = 1, column = i+1, value = str(name))\n# worksheet2.cell(row = 1, column = i+1, value = str(name))\n# worksheet3.cell(row = 1, column = i+1, value = str(name))\n# worksheet4.cell(row = 1, column = i+1, value = str(name))\n# cpInfo = kospiCp[name]\n# opInfo = kospiOp[name]\n# cpInfo.reverse()\n# opInfo.reverse()\n# progressValue += token\n# text = \"엑셀파일로 내보내는 중:\" + str(name)\n# self.setProgressBar(text, progressValue)\n\n# size = len(cpInfo)\n\n# for j in range(size):\n\n# closingPrice = cpInfo[j]\n# openingPrice = opInfo[j]\n# rV = openingPrice - closingPrice\n# logRv = math.log10(closingPrice/openingPrice)\n# worksheet1.cell(row = j+2,column = i+1, value = closingPrice)\n# worksheet2.cell(row = j+2,column = i+1, value = openingPrice)\n# worksheet3.cell(row = j+2, column = i+1, value = rV)\n# worksheet4.cell(row = j+2, column = i+1, value = logRv)\n# i += 1\n\n# workbook.save(filename = fileName)","sub_path":"TradingSystem/otherWindowClass.py","file_name":"otherWindowClass.py","file_ext":"py","file_size_in_byte":33481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234324887","text":"import json\nimport time\nfrom json import JSONEncoder\nfrom typing import Any\n\n\nclass BatchInfo:\n def __init__(self, appId: str, batchTime: int = int(time.time())):\n self.appId = appId\n self.batchTime = batchTime\n self.submissionTime = -1\n self.processingStartTime = -1\n self.processingEndTime = -1\n\n def __str__(self) -> str:\n return \"BatchInfo(%s,%d)\" % (self.appId, self.batchTime)\n\n\nclass MyEncoder(JSONEncoder):\n\n def default(self, o: Any) -> Any:\n return o.__dict__\n\n\nif __name__ == \"__main__\":\n b1 = BatchInfo(\"app1\")\n print(b1)\n print(MyEncoder().encode(b1))\n j = json.dumps(b1, cls=MyEncoder)\n print(j)\n\n# def submit_batch():\n# # batchs.append()\n\n# def complete_batch():\n","sub_path":"flinkl/src/main/python/cep/BatchInfo.py","file_name":"BatchInfo.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592828118","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n'''\nsome useful function\n'''\ndef sigmoid(x):\n return 1. / (1 + np.exp(-x))\n\ndef dsigmoid(x):\n return sigmoid(x) * (1. - sigmoid(x))\n\ndef tanh(x):\n return np.tanh(x) \n\ndef dtanh(x):\n return 1. - x * x\n\ndef softmax(x):\n e = np.exp(x - np.max(x)) # prevent overflow\n if e.ndim == 1:\n return e / np.sum(e, axis=0)\n else: \n return e / np.array([np.sum(e, axis=1)]).T # ndim = 2\n\ndef relu(x):\n return x * (x > 0)\n\ndef drelu(x):\n return 1. * (x > 0)\n\ndef relu_backward(dA, activation_cache):\n Z = activation_cache\n dZ = dA * drelu(Z)\n return dZ\n\ndef sigmoid_backward(dA, activation_cache):\n Z = activation_cache\n dZ = dA * dsigmoid(Z)\n return dZ\n\n##########################################\n#############test code###################\n#print sigmoid(np.array([2,3,4]))\n#print relu(np.array([2,-1]))\n#########################################\n\n\n\n\n\n\ndef initialize_parameters(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array containing the information of network structure\n \n Returns:\n parameters \n \"\"\"\n \n parameters = {}\n L = len(layer_dims)\n \n for l in range(1,L):\n parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l] , 1))\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) \n \n return parameters\n\n#####################################################\n##################debug code #########################\n#parameters = initialize_parameters([3,5,2])\n#print parameters\n####################################################\n\n\n\ndef linear_forward(A, W, b):\n \"\"\"\n compute W*A\n \"\"\"\n \n Z = np.dot(W, A) + b\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n return Z, cache\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n compute the activation of the forward propagation\n \n Arguments:\n A_prev --activations from previous layer\n W --weight matrix\n b -- bias\n \n Returns:\n A -- activation \n cache -- dictionary containing W, b, Z of each layer\n \n example :\n A_prev =np.array([[2],[2],[3]])\n W = np.array([[1,2,3],[2,3,4]])\n b = 2.0\n print linear_activation_forward(A_prev, W, b, \"sigmoid\")\n \n return: (array([[ 0.99999996],\n [ 1. ]]), ((array([[2],\n [2],\n [3]]), array([[1, 2, 3],\n [2, 3, 4]]), 2.0), array([[ 17.],\n [ 24.]])))\n\n \"\"\"\n if activation ==\"sigmoid\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A = sigmoid(Z)\n activation_cache = (Z)\n \n elif activation ==\"relu\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A = relu(Z)\n activation_cache = (Z)\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n \n return A, cache\n\n##########################################\n#############test code###################\n#A_prev =np.array([[2],[2],[3]])\n#W = np.array([[1,2,3],[2,3,4]])\n#b = 2.0\n#print linear_activation_forward(A_prev, W, b, \"sigmoid\")\n##########################################\n\ndef L_layer_forward(X, parameters, construction):\n \"\"\"\n implement forward propagation from start to the end\n \n Arguments:\n X -- input vector\n parameters -- neural network layout(numpy array)\n activation -- activation function of each layer \n \"relu&sigmoid\": relu*(L-1) and sigmoid*1\n \n Returns:\n A -- last activation value(output)\n caches -- dictionary containing W, b, Z of each layer\n \n Example:\n parameters = initialize_parameters([3,5,2])\n X = np.array([[2],[3],[4]])\n AL,caches = L_layer_forward(X, parameters, \"relu&sigmoid\")\n print (\"AL\",AL)\n print (\"caches\",caches)\n \n Return:\n ('AL', array([[ 0.4993538 ],\n [ 0.49992187]]))\n ('caches', [((array([[2],\n [3],\n [4]]), array([[-0.00217631, 0.00394372, -0.00784468],\n [ 0.01541106, 0.00394234, -0.01923715],\n [ 0.00249434, 0.00518348, 0.01985839],\n [ 0.00509152, -0.00168692, 0.00453093],\n [-0.00016401, 0.00041234, 0.0018633 ]]), array([[ 0.],\n [ 0.],\n [ 0.],\n [ 0.],\n [ 0.]])), array([[-0.02390018],\n [-0.03429948],\n [ 0.09997265],\n [ 0.02324601],\n [ 0.0083622 ]]))])\n \"\"\"\n caches = []\n A = X\n L = len(parameters) //2 #number of layers\n \n if(activationType == \"relu&sigmoid\"):\n for l in range(1,L):\n A_prev = A\n W = parameters['W' + str(l)]\n b = parameters['b' + str(l)]\n A, cache = linear_activation_forward(A_prev, W, b, \"relu\")\n caches.append(cache)\n\n W = parameters['W' + str(L)]\n b = parameters['b' + str(L)]\n AL, cache = linear_activation_forward(A, W, b, \"sigmoid\")\n assert(AL.shape == (parameters['W'+str(L)].shape[0], X.shape[1]))\n\n return AL, caches\n\n###################################################\n################test code ###########################\n#parameters = initialize_parameters([3,5,2])\n#X = np.array([[2],[3],[4]])\n#AL,caches = L_layer_forward(X, parameters, \"relu&sigmoid\")\n#print (\"AL\",AL)\n#print (\"caches\",caches)\n#######################################################\n\n\n\ndef compute_cost(AL, Y):\n \"\"\"\n compute the cost\n \n Example:\n AL = np.array([[.2,.3]])\n Y = np.array([[1,0]])\n compute_cost(AL,Y)\n \n return:\n array(0.9830564281864164)\n \"\"\"\n N = Y.shape[1] #number of dataset\n cost = -(np.dot(Y,np.log(AL).T)+np.dot(1-Y,np.log(1-AL).T))/N #cross entropy\n \n cost = np.squeeze(cost)\n assert(cost.shape == ())\n\n return cost\n\n\n\ndef linear_backward(dZ, cache):\n \"\"\"\n backward propagation for a single layer\n \n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n\n Example:\n dZ = np.array([[ 1.62434536 ,-0.61175641]])\n cache = (np.array([[-0.52817175, -1.07296862],\n [ 0.86540763, -2.3015387 ],\n [ 1.74481176, -0.7612069 ]]), np.array([[ 0.3190391 , -0.24937038, 1.46210794]]), np.array([[-2.06014071]]))\n dA_prev, dW, db = linear_backward(dZ, cache)\n print dA_prev, dW, db\n \n Return:\n [[ 0.51822968 -0.19517421]\n [-0.40506362 0.15255393]\n [ 2.37496825 -0.8944539 ]] [[-0.10076895 1.40685096 1.64992504]] [[ 0.50629448]]\n \"\"\"\n A_prev, W, b = cache\n N = A_prev.shape[1]\n \n dW = np.dot(dZ, A_prev.T)/N\n db = np.sum(dZ, axis = 1, keepdims = True)/N\n dA_prev = np.dot(W.T, dZ)\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db\n\n###########################################\n##############test code##################\n#dZ = np.array([[ 1.62434536 ,-0.61175641]])\n#cache = (np.array([[-0.52817175, -1.07296862],\n# [ 0.86540763, -2.3015387 ],\n# [ 1.74481176, -0.7612069 ]]), np.array([[ 0.3190391 , -0.24937038, 1.46210794]]), np.array([[-2.06014071]]))\n#dA_prev, dW, db = linear_backward(dZ, cache)\n#print dA_prev, dW, db\n###########################################\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n backward propagation for a whole layer(Linear->Activation)\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \n Example:\n AL = np.array([[-0.41675785 ,-0.05626683]])\n linear_activation_cache = ((np.array([[-2.1361961 , 1.64027081],\n [-1.79343559, -0.84174737],\n [ 0.50288142, -1.24528809]]), np.array([[-1.05795222, -0.90900761, 0.55145404]]), np.array([[ 2.29220801]])), np.array([[ 0.04153939, -1.11792545]]))\n dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"sigmoid\")\n print (\"sigmoid:\")\n print (\"dA_prev = \"+ str(dA_prev))\n print (\"dW = \" + str(dW))\n print (\"db = \" + str(db) + \"\\n\")\n\n dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"relu\")\n print (\"relu:\")\n print (\"dA_prev = \"+ str(dA_prev))\n print (\"dW = \" + str(dW))\n print (\"db = \" + str(db))\n \n Return:\n sigmoid:\n dA_prev = [[ 0.11017994 0.0110534 ]\n [ 0.09466817 0.00949723]\n [-0.05743092 -0.00576155]]\n dW = [[ 0.10266786 0.09778551 -0.01968084]]\n db = [[-0.05729622]]\n\n relu:\n dA_prev = [[ 0.44090989 0. ]\n [ 0.37883606 0. ]\n [-0.2298228 0. ]]\n dW = [[ 0.44513825 0.37371418 -0.10478989]]\n db = [[-0.20837892]]\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n elif activation ==\"sigmoid\":\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n return dA_prev, dW, db\n\n###################################################\n####################test code#####################\n'''\nAL = np.array([[-0.41675785 ,-0.05626683]])\nlinear_activation_cache = ((np.array([[-2.1361961 , 1.64027081],\n [-1.79343559, -0.84174737],\n [ 0.50288142, -1.24528809]]), np.array([[-1.05795222, -0.90900761, 0.55145404]]), np.array([[ 2.29220801]])), np.array([[ 0.04153939, -1.11792545]]))\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"sigmoid\")\nprint (\"sigmoid:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db) + \"\\n\")\n\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"relu\")\nprint (\"relu:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))\n'''\n##################################################\n\ndef L_layer_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n\n Example:\n AL = np.array([[ 1.78862847 , 0.43650985]])\n Y = np.array([[1,0]])\n caches = (((np.array([[ 0.09649747, -1.8634927 ],\n [-0.2773882 , -0.35475898],\n [-0.08274148, -0.62700068],\n [-0.04381817, -0.47721803]]), np.array([[-1.31386475, 0.88462238, 0.88131804, 1.70957306],\n [ 0.05003364, -0.40467741, -0.54535995, -1.54647732],\n [ 0.98236743, -1.10106763, -1.18504653, -0.2056499 ]]), np.array([[ 1.48614836],\n [ 0.23671627],\n [-1.02378514]])), np.array([[-0.7129932 , 0.62524497],\n [-0.16051336, -0.76883635],\n [-0.23003072, 0.74505627]])), ((np.array([[ 1.97611078, -1.24412333],\n [-0.62641691, -0.80376609],\n [-2.41908317, -0.92379202]]), np.array([[-1.02387576, 1.12397796, -0.13191423]]), np.array([[-1.62328545]])), np.array([[ 0.64667545, -0.35627076]])))\n grads = L_layer_backward(AL, Y, caches)\n print(grads)\n \n Result:\n {'dW2': array([[-0.39202432, -0.13325855, -0.04601089]]), 'dW1': array([[ 0.41010002, 0.07807203, 0.13798444, 0.10502167],\n [ 0. , 0. , 0. , 0. ],\n [ 0.05283652, 0.01005865, 0.01777766, 0.0135308 ]]), 'dA1': array([[ 0. , 0.52257901],\n [ 0. , -0.3269206 ],\n [ 0. , -0.32070404],\n [ 0. , -0.74079187]]), 'dA2': array([[ 0.12913162, -0.44014127],\n [-0.14175655, 0.48317296],\n [ 0.01663708, -0.05670697]]), 'db1': array([[-0.22007063],\n [ 0. ],\n [-0.02835349]]), 'db2': array([[ 0.15187861]])}\n \"\"\"\n grads ={}\n L = len(caches) #number of layers\n N = AL.shape[1] \n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) ### END CODE HERE ###\n\n current_cache = caches[L-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, \"sigmoid\")\n \n for l in reversed(range(L-1)):\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)],current_cache,\"relu\")\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n \n return grads\n\n\ndef update_parameters(parameters, grads, learning_rate):\n \n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n \n L = len(parameters) // 2 #number of layers\n \n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\"+str(l+1)]-learning_rate*grads[\"db\"+str(l+1)]\n\n return parameters","sub_path":"ML/DL/DLFrame/DLEle.py","file_name":"DLEle.py","file_ext":"py","file_size_in_byte":14813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275057511","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\nfrom typing import Optional, Union\n\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import NlpSweepSettings as RestNlpSweepSettings\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import SamplingAlgorithmType\nfrom azure.ai.ml.entities._job.sweep.early_termination_policy import EarlyTerminationPolicy\nfrom azure.ai.ml.entities._mixins import RestTranslatableMixin\n\n\n# pylint: disable=protected-access\nclass NlpSweepSettings(RestTranslatableMixin):\n \"\"\"Sweep settings for all AutoML NLP tasks.\"\"\"\n\n def __init__(\n self,\n *,\n sampling_algorithm: Union[str, SamplingAlgorithmType],\n early_termination: Optional[EarlyTerminationPolicy] = None,\n ):\n self.sampling_algorithm = sampling_algorithm\n self.early_termination = early_termination\n\n def _to_rest_object(self) -> RestNlpSweepSettings:\n return RestNlpSweepSettings(\n sampling_algorithm=self.sampling_algorithm,\n early_termination=self.early_termination._to_rest_object() if self.early_termination else None,\n )\n\n @classmethod\n def _from_rest_object(cls, obj: RestNlpSweepSettings) -> \"NlpSweepSettings\":\n return cls(\n sampling_algorithm=obj.sampling_algorithm,\n early_termination=EarlyTerminationPolicy._from_rest_object(obj.early_termination)\n if obj.early_termination\n else None,\n )\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, NlpSweepSettings):\n return NotImplemented\n\n return self.sampling_algorithm == other.sampling_algorithm and self.early_termination == other.early_termination\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_sweep_settings.py","file_name":"nlp_sweep_settings.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"263249772","text":"import arff\nimport copy\nimport json\nimport logging\nimport os\nimport pandas as pd\nimport warnings\n\nfrom functools import wraps\n\nfrom a2ml.api.utils import fsclient, get_uid, get_uid4, remove_dups_from_list, process_arff_line\nfrom a2ml.api.utils.local_fsclient import LocalFSClient\n\n# To avoid warnings for inplace operation on datasets\npd.options.mode.chained_assignment = None\n\nclass DataFrame(object):\n BOOLEAN_WORDS_TRUE = ['yes', 'on']\n BOOLEAN_WORDS_FALSE = ['no', 'off']\n\n def __init__(self, options):\n self.options = options\n self.categoricals = {}\n self.transforms_log = [[],[],[],[]]\n self.df = None\n self.dataset_name = None\n self.loaded_columns = None\n\n def _get_compression(self, extension):\n compression = self.options.get('data_compression', 'infer')\n if extension.endswith('.gz') or extension.endswith('.gzip'):\n compression = 'gzip'\n elif extension.endswith('.bz2'):\n compression = 'bz2'\n elif extension.endswith('.zip'):\n compression = 'zip'\n elif extension.endswith('.xz'):\n compression = 'xz'\n\n return compression\n\n @staticmethod\n def create_dataframe(data_path=None, records=None, features=None):\n if data_path:\n ds = DataFrame({'data_path': data_path})\n ds.load(features = features)\n else:\n ds = DataFrame({})\n ds.load_records(records, features=features)\n\n return ds\n\n @staticmethod\n def load_from_files(files, features=None):\n for file in files:\n path = file if type(file) == str else file['path']\n df = DataFrame.create_dataframe(path, None, features)\n yield (file, df)\n\n def load_from_file(self, path, features=None, nrows=None):\n from collections import OrderedDict\n\n extension = path\n if self.options.get('data_extension', 'infer') != 'infer':\n extension = self.options['data_extension']\n\n if self.options.get('content_type') == 'multipart':\n fsclient.merge_folder_files(path)\n\n if extension.endswith('.arff') or extension.endswith('.arff.gz'):\n arffFile = None\n class ArffFile:\n def __init__(self, file):\n self.file = file\n self.date_attrs = {}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = process_arff_line(next(self.file), self.date_attrs)\n return line\n\n try:\n\n with fsclient.open(path, 'r') as f:\n arffFile = ArffFile(f)\n arff_data = arff.load(arffFile, return_type=arff.COO)\n\n convert_arff = DataFrame._convert_arff_coo\n except arff.BadLayout:\n with fsclient.open(path, 'r') as f:\n arffFile = ArffFile(f)\n arff_data = arff.load(arffFile, return_type=arff.DENSE)\n\n convert_arff = DataFrame._convert_arff_dense\n\n columns = [a[0] for a in arff_data['attributes']]\n series = convert_arff(features, columns, arff_data['data'])\n\n res = pd.DataFrame.from_dict(OrderedDict(\n (c, s) for c, s in zip(columns, series) if s is not None\n ))\n for date_field, fmt in arffFile.date_attrs.items():\n res[date_field] = pd.to_datetime(res[date_field], infer_datetime_format=True, errors='ignore', utc=True)\n\n return res\n elif extension.endswith('.pkl') or extension.endswith('.pkl.gz'):\n return self.loadFromBinFile(path, features)\n elif extension.endswith('.json') or extension.endswith('.json.gz'):\n path = fsclient.s3fs_open(path)\n return pd.read_json(path, orient=self.options.get('json_orient',None))\n elif extension.endswith('.xlsx') or extension.endswith('.xls'):\n path = fsclient.s3fs_open(path)\n return pd.read_excel(path)\n elif extension.endswith('.feather') or extension.endswith('.feather.gz') or extension.endswith('.feather.zstd') or extension.endswith('.feather.lz4'):\n return self.loadFromFeatherFile(path)\n\n csv_with_header = self.options.get('csv_with_header', True)\n header = 0 if csv_with_header else None\n prefix = None if csv_with_header else 'c'\n\n compression = self._get_compression(extension)\n path = fsclient.s3fs_open(path)\n\n res_df = None\n try:\n res_df = pd.read_csv(\n path,\n encoding='utf-8',\n escapechar=\"\\\\\",\n usecols=features,\n na_values=['?'],\n header=header,\n prefix=prefix,\n sep = ',',\n nrows=nrows,\n low_memory=False,\n compression=compression\n )\n except Exception as e:\n logging.error(\"read_csv failed: %s\"%e)\n res_df = pd.read_csv(\n path,\n encoding='utf-8',\n escapechar=\"\\\\\",\n usecols=features,\n na_values=['?'],\n header=header,\n prefix=prefix,\n sep = '|',\n nrows=nrows,\n low_memory=False,\n compression=compression\n )\n\n if res_df is not None:\n for name, value in res_df.dtypes.items():\n if value == 'object':\n res_df[name] = pd.to_datetime(res_df[name], infer_datetime_format=True, errors='ignore', utc=True)\n\n return res_df\n\n def load(self, features=None, nrows=None):\n self.categoricals = {}\n self.transforms_log = [[],[],[],[]]\n\n import csv\n from io import StringIO\n\n path = self.options['data_path']\n if isinstance(path, StringIO):\n path.seek(0)\n self.df = pd.read_csv(path, encoding='utf-8', escapechar=\"\\\\\", usecols=features, na_values=['?'], nrows=nrows)\n if self.options.get(\"targetFeature\") in self.df.columns:\n self.dropna([self.options[\"targetFeature\"]])\n else:\n if path.startswith(\"jdbc:\"):\n import psycopg2\n from psycopg2.extensions import parse_dsn\n path = path.replace('sslfactory=org.postgresql.ssl.NonValidatingFactory&', '')\n ary = path.split('tablename')\n path = ary[0]\n tablename = ary[1]\n dataset_name = tablename\n\n self.dbconn_args = parse_dsn(path[5:])\n conn = psycopg2.connect(**self.dbconn_args)\n self.df = pd.read_sql(\"select * from %s\"%tablename, con=conn)\n else:\n path, remote_path = self._check_remote_path()\n try:\n self.df = self.load_from_file(path, features=features, nrows=nrows)\n except:\n if remote_path:\n logging.exception(\"Loading local file failed. Download it again...\")\n self.options['data_path'] = remote_path\n path, remote_path = self._check_remote_path(force_download=True)\n self.df = self.load_from_file(path, features=features, nrows=nrows)\n else:\n raise\n\n self.dataset_name = os.path.basename(path)\n\n if self.options.get(\"targetFeature\") in self.df.columns:\n self.dropna([self.options[\"targetFeature\"]])\n return self\n\n def _check_remote_path(self, force_download=False):\n remote_path = None\n if self.options['data_path'].startswith(\"http:\") or self.options['data_path'].startswith(\"https:\"):\n local_dir = Localfsclient.get_temp_folder()\n file_name = 'data-' + get_uid4\n\n local_file_path = download_file(self.options['data_path'],\n local_dir=local_dir, file_name=file_name, force_download=force_download)\n\n remote_path = self.options['data_path']\n self.options['data_path'] = local_file_path\n\n return self.options['data_path'], remote_path\n\n def load_records(self, records, features=None):\n self.categoricals = {}\n self.transforms_log = [[],[],[],[]]\n\n if features:\n self.df = pd.DataFrame.from_records(records, columns=features)\n self.loaded_columns = features\n else:\n self.df = pd.DataFrame(records) #dict\n\n return self\n\n def get_records(self):\n return self.df.values.tolist()\n\n def saveToCsvFile(self, path, compression=\"gzip\"):\n fsclient.remove_file(path)\n fsclient.create_parent_folder(path)\n\n with fsclient.save_local(path) as local_path:\n self.df.to_csv(local_path, index=False, compression=compression, encoding='utf-8')\n\n def saveToBinFile(self, path):\n fsclient.save_object_to_file(self.df, path)\n\n def loadFromBinFile(self, path, features=None):\n self.df = fsclient.load_object_from_file(path)\n\n if features:\n self.df = self.df[features]\n\n return self.df\n\n def saveToFeatherFile(self, path):\n fsclient.save_object_to_file(self.df, path, fmt=\"feather\")\n\n def loadFromFeatherFile(self, path, features=None):\n self.df = fsclient.load_db_from_feather_file(path, features) \n return self.df\n\n def saveToFile(self, path):\n if path.endswith('.feather') or path.endswith('.feather.gz') or path.endswith('.feather.zstd') or path.endswith('.feather.lz4'):\n self.saveToFeatherFile(path)\n else:\n compression = None\n if path.endswith('.gz'):\n compression = 'gzip'\n\n self.saveToCsvFile(path, compression) \n\n def count(self):\n if self.df is not None:\n return len(self.df)\n else:\n return 0\n\n @property\n def columns(self):\n return self.df.columns.get_values().tolist()\n\n def _map_dtypes(self, dtype):\n dtype_map = {'int64': 'integer', 'float64':'double', 'object': 'string',\n 'categorical':'categorical', 'datetime64[ns]': 'datetime', 'bool': 'boolean'}\n if dtype_map.get(dtype, None):\n return dtype_map[dtype]\n\n if dtype and (dtype.startswith('int') or dtype.startswith('uint')):\n return 'integer'\n\n if dtype and dtype.startswith('float'):\n return 'float'\n\n if dtype and dtype.startswith('double'):\n return 'double'\n\n if dtype and dtype.startswith('datetime64'):\n return 'datetime'\n\n return dtype\n\n @property\n def dtypes(self):\n types_list = []\n columns_list = self.columns\n for idx, dtype in enumerate(self.df.dtypes):\n types_list.append((columns_list[idx], self._map_dtypes(dtype.name)))\n\n return types_list\n\n @property\n def dtypes_dict(self):\n types_dict = {}\n columns_list = self.columns\n for idx, dtype in enumerate(self.df.dtypes):\n types_dict[columns_list[idx]] = self._map_dtypes(dtype.name)\n\n return types_dict\n\n def select(self, features):\n self.df = self.df[features]\n return self\n\n def drop(self, columns):\n self.df.drop(columns, inplace=True, axis=1)\n\n def drop_duplicates(self, columns=None):\n self.df.drop_duplicates(subset=columns, inplace=True)\n self.df.reset_index(drop=True, inplace=True)\n return self\n\n def dropna(self, columns=None):\n self.df.dropna(subset=columns, inplace=True, axis=0)\n self.df.reset_index(drop=True, inplace=True)\n return self\n\n def fillna(self, value):\n if isinstance(value, dict):\n value = value.copy()\n for item in self.dtypes:\n if list(value.keys())[0] == item[0]:\n if item[1] == 'string':\n value[list(value.keys())[0]] = str(list(value.values())[0])\n elif item[1] == 'integer':\n value[list(value.keys())[0]] = int(list(value.values())[0])\n else:\n value[list(value.keys())[0]] = float(list(value.values())[0])\n\n self.df.fillna(value, inplace=True)\n return self\n\n def convertToCategorical(self, col_names, is_target = False, categories = None):\n #print(\"convertToCategorical:%s\"%col_names)\n if not isinstance(col_names, list):\n col_names = [col_names]\n\n if is_target:\n for col in col_names:\n if col in self.columns and self.categoricals.get(col, None) is None:\n self.df[col] = pd.Categorical(self.df[col], categories=categories)\n self.categoricals[col] = {'categories': list(self.df[col].cat.categories)}\n self.df[col] = self.df[col].cat.codes\n else:\n cols_to_process = []\n cols = self.columns\n for col in col_names:\n if col in cols:\n cols_to_process.append(col)\n\n #print(cols_to_process)\n if cols_to_process:\n self.df = pd.get_dummies(self.df, columns=cols_to_process)\n new_cols = self.columns\n\n for col in cols_to_process:\n generated_cols = []\n for new_col in new_cols:\n if new_col.startswith(col+'_'):\n generated_cols.append(new_col)\n\n self.categoricals[col] = {'columns': generated_cols}\n\n return self\n\n @staticmethod\n def _convert_arff_coo(features, columns, arff_data_data):\n if features is None:\n data = [([], []) for _ in columns]\n else:\n fset = remove_dups_from_list(features)\n data = [([], []) if c in fset else None for c in columns]\n\n for v, i, j in zip(*arff_data_data):\n d = data[j]\n if d is not None:\n indices, values = d\n if indices:\n assert indices[-1] < i\n indices.append(i)\n values.append(v)\n\n max_i = -1\n for d in data:\n if d is not None and len(d[0]) > 0:\n max_i = max(max_i, d[0][-1])\n height = max_i + 1\n\n series = []\n for d in data:\n if d is None:\n s = None\n else:\n keys, values = d\n sa = pd.SparseArray(\n values,\n sparse_index=pd._libs.sparse.IntIndex(height, keys),\n fill_value=0\n )\n s = pd.Series(sa.values)\n series.append(s)\n\n return series\n\n @staticmethod\n def _convert_arff_dense(features, columns, arff_data_data):\n if features is None or set(features) == set(columns):\n return zip(*arff_data_data)\n\n fset = remove_dups_from_list(features)\n return [\n [row[i] for row in arff_data_data] if c in fset else None\n for i, c in enumerate(columns)\n ]\n\n","sub_path":"a2ml/api/utils/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":15307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391108005","text":"import os\r\nimport re\r\n\r\nfrom tika import parser\r\n\r\n\r\nclass PdfRenamer:\r\n def __init__(self, file_path):\r\n self.file_path = file_path\r\n self.raw = parser.from_file(self.file_path)\r\n self.content = (self.raw['content'])\r\n\r\n def extract_name(self):\r\n pattern = r\"Nombre Completo \\w* \\w*\"\r\n name_regex = re.findall(pattern, self.content)\r\n name_regex = name_regex[0].split(\" \")\r\n # Prune extra words\r\n name = name_regex[2:]\r\n # Convert array to string\r\n name_as_string = \"\"\r\n for word in name:\r\n name_as_string += \" \" + word\r\n name_as_string = name_as_string.strip()\r\n return name_as_string\r\n\r\n def is_day(self):\r\n regex_pattern = r\"Diurno\"\r\n regex = re.findall(regex_pattern, self.content)\r\n for word in regex:\r\n if word == \"Diurno\":\r\n return True\r\n else:\r\n return False\r\n\r\n def rename_file(self, is_day, name_as_string):\r\n if is_day:\r\n os.rename(self.file_path, os.path.join(os.path.dirname(self.file_path), name_as_string + \".pdf\"))\r\n else:\r\n os.rename(self.file_path, os.path.join(os.path.dirname(self.file_path), name_as_string +\r\n \" Nocturno\" + \".pdf\"))\r\n","sub_path":"pdf_renamer.py","file_name":"pdf_renamer.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150257376","text":"\"\"\"\r\nFilename: ranking.py\r\nDescription: This task involves processing the data and rank ordering it for a given year\r\n@author: Isaias Villalobos\r\nDate: 11/20/17\r\n\"\"\"\r\n\r\nfrom rit_lib import *\r\nfrom utils import *\r\n\r\ncountryValue = struct_type(\"countryValue\", (str, \"country\"), (float, \"value\"))\r\n\r\n\r\ndef sorted_ranking_data(data, year):\r\n \"\"\"\r\n :param data: containing information from the data and metadata files\r\n :param year: integer representing the year under consideration.\r\n :return: A list of CountryValue structures\r\n \"\"\"\r\n CODE_TO_LIFE = data[3]\r\n REGION_TO_COUNTRY_DICT = data[2]\r\n INCOME_TO_COUNTRY_CODE_DICT = data[1]\r\n COUNTRY_CODE_TO_NAME = data[0]\r\n #DATA IS A TUPLE OF 4 THINGS\r\n\r\n lst_of_country_values = []\r\n\r\n for code in COUNTRY_CODE_TO_NAME:\r\n life_ex = CODE_TO_LIFE[code][int(year) - 1960]\r\n if life_ex == '':\r\n continue\r\n countryValue1 = countryValue(COUNTRY_CODE_TO_NAME[code],float(life_ex))\r\n lst_of_country_values += [countryValue1]\r\n\r\n\r\n lst_of_country_values.sort(key=lambda x: x.value, reverse=True)\r\n\r\n return lst_of_country_values\r\n\r\ndef main():\r\n \"\"\"\r\n CALLS THE READ DATA FUNCCTION, FILTER REGION, FILTER INCOME\r\n \"\"\"\r\n while True:\r\n filename = \"worldbank_life_expectancy\"\r\n\r\n data = read_data(filename)\r\n\r\n year = input(\"Enter year of interest (-1 to quit): \")\r\n if int(year) == -1:\r\n break\r\n region = input(\"Enter region (type ’all’ to consider all): \")\r\n if region == '':\r\n print(\"Enter valid string\")\r\n break\r\n income = input(\"Enter income of interest (-1 to quit): \")\r\n\r\n\r\n\r\n filtered_data = filter_region(data,region)\r\n filter_data_again = filter_income(filtered_data, income)\r\n\r\n country_lst = sorted_ranking_data(filter_data_again, year)\r\n # print(country_lst)\r\n # exit()\r\n\r\n print(\"Top 10 Life Expectancy for\", year)\r\n\r\n for i in range(0, len(country_lst)):\r\n if i == 10:\r\n break\r\n else:\r\n print(str(i+1),country_lst[i].country, country_lst[i].value)\r\n\r\n reversed_country_list = list(reversed(country_lst))\r\n print()\r\n print(\"Bottom 10 Life Expectancy for\", year)\r\n\r\n for i in range(0, len(reversed_country_list)):\r\n if i == 10:\r\n break\r\n else:\r\n print(str(i+1),reversed_country_list[i].country, reversed_country_list[i].value)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # main runs only when directly invoking this module\r\n main()\r\n # end of program file\r\n","sub_path":"ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"547987785","text":"# coding=utf-8\n\n\"\"\"\nSimple metamodel for object states. Contains definitions for:\n\n- State,\n- Object,\n- Link,\n- LinkObject.\n\"\"\"\n\nfrom collections import OrderedDict\n\nclass State(object):\n def __init__(self):\n self.objects = OrderedDict()\n self.links = OrderedDict()\n self.linkObject = OrderedDict()\n\n\n\nclass StateElement(object):\n def __init__(self, state):\n self.state = state\n\n\nclass Object(StateElement):\n def __init__(self, state, className, name):\n super(Object,self).__init__(state)\n state.objects[name] = self\n self.name = name\n self.className = className\n self.attributes = OrderedDict()\n\n def set(self, name, value):\n self.attributes[name] = value\n\n\n\nclass Link(StateElement):\n def __init__(self, state, associationName, objects):\n super(Link, self).__init__(state)\n link_name = '_'.join(map(lambda o: o.name, objects))\n state.links[link_name] = self\n self.associationName = associationName\n self.roles = objects\n\n\n\nclass LinkObject(StateElement):\n def __init__(self, state, associationClassName, name, objects) :\n super(LinkObject, self).__init__(state)\n state.linkObject[name] = self\n self.name = name\n self.className = associationClassName\n self.attributes = OrderedDict()\n self.roles = objects\n\n def set(self, name, value):\n self.attributes[name] = value\n\n\ndel OrderedDict","sub_path":"pyuseocl/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"291214828","text":"from fhirstore.search import SearchArguments\n\n\nclass Bundle:\n def __init__(self):\n self.content = {\"resource_type\": \"Bundle\", \"total\": 0, \"entry\": []}\n\n def fill(self, hits, formatting_args):\n if formatting_args[\"is_summary_count\"] == True:\n self.content[\"tag\"] = {\"code\": \"SUBSETTED\"}\n self.content.pop(\"entry\")\n if len(hits):\n self.content[\"total\"] += hits[\"count\"]\n\n elif formatting_args[\"is_summary_count\"] == False and len(hits):\n for h in hits[\"hits\"][\"hits\"]:\n self.content[\"entry\"].append(\n {\"resource\": h[\"_source\"], \"search\": {\"mode\": \"match\"}}\n )\n self.content[\"total\"] += hits[\"hits\"][\"total\"][\"value\"]\n if formatting_args[\"elements\"] or formatting_args[\"summary\"]:\n self.content[\"tag\"] = {\"code\": \"SUBSETTED\"}\n\n def complete(self, new_bundle, formatting_args):\n if new_bundle.content[\"resource_type\"] == \"OperationOutcome\":\n self.content = new_bundle.content\n else:\n self.content[\"total\"] += new_bundle.content[\"total\"]\n if not formatting_args[\"is_summary_count\"]:\n self.content[\"entry\"].extend(new_bundle.content[\"entry\"])\n\n def append(self, included_hits, formatting_args):\n if formatting_args[\"include\"] and \"hits\" in included_hits:\n for h in included_hits[\"hits\"][\"hits\"]:\n self.content[\"entry\"].append(\n {\"resource\": h[\"_source\"], \"search\": {\"mode\": \"include\"}}\n )\n\n def fill_error(self, severity=\"error\", code=\"invalid\", details=None, diagnostic=None):\n self.content[\"resource_type\"] = \"OperationOutcome\"\n self.content[\"issue\"] = {\"severity\": severity, \"code\": code}\n self.content.pop(\"total\", None)\n self.content.pop(\"entry\", None)\n self.content.pop(\"tag\", None)\n\n if details:\n self.content[\"issue\"][\"details\"] = details\n if diagnostic:\n self.content[\"issue\"][\"diagnostic\"] = diagnostic\n","sub_path":"fhirstore/search/bundle.py","file_name":"bundle.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"37533369","text":"from library.xpy import TestReport\nfrom case.gtmc.item import ItemDeleteCaseLibrary, ItemUpdateCaseLibrary, ItemGetCaseLibrary, ItemCreateCaseLibrary\nfrom case.gtmc.vehicle import VehicleBaseCaseLibrary\n\nhost = 'http://192.168.0.131:8054'\nurl = host + '/api/v1.0'\nurl_item = url + '/mat/item'\n\n##########################################################################\n# 测试: ItemApi.create()\n##########################################################################\n\ntitle = {\n \"num\": \"No.\",\n \"method\": \"method\",\n \"url\": \"URL\",\n \"status\": \"Status\",\n \"time\": \"Time\",\n \"name\": \"Case Description\",\n \"data\": \"Response\",\n}\n\nTestReport.report(title)\ncases = []\n# cases.extend(ItemCreateCaseLibrary.getAllCases())\n# cases.extend(ItemGetCaseLibrary.getAllCases())\n# cases.extend(ItemDeleteCaseLibrary.getAllCases())\n# cases.extend(ItemUpdateCaseLibrary.getAllCases())\ncases.extend(VehicleBaseCaseLibrary.getAllCases())\n\nTestReport.report(title)\nTestReport.run(cases, needReport=True, reportData=False, needTotal=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"428097891","text":"# BE10105 Project S1\r\n\r\n# Submitted by:\r\n#\tAkshay Raj PB\r\n#\tGokul K\r\n#\tReuben Thomas Peter\r\n#\tTeena Sabu\r\n\r\nimport random\r\n\r\nprint(\"HANGMAN Game\")\r\nprint(\"-------------------------------\")\r\n\r\n\r\ndef startGame(word, attempts, found):\r\n if attempts == 0:\r\n print(\"You Lost! The word is {0}\".format(word))\r\n return\r\n if len(found) == len(word):\r\n print(\"You won! The word is {0}\".format(word))\r\n return\r\n s = \"\"\r\n\r\n for i in range(len(word)):\r\n\r\n if i in found:\r\n k = word[i] + \" \"\r\n else:\r\n k = \"_ \"\r\n s = s + k\r\n print(s)\r\n print(\"Attempts left: {0}\".format(attempts))\r\n y = input(\"Enter guess: \").upper()\r\n\r\n c = -1\r\n\r\n for i in range(len(word)):\r\n if word[i] == y:\r\n if i not in found:\r\n found.append(i)\r\n print(\"The letter found at {0}th position\".format(i+1))\r\n c = 0\r\n if c == -1:\r\n attempts = attempts - 1\r\n print()\r\n print()\r\n print()\r\n startGame(word, attempts, found)\r\n\r\n\r\nwords = [\r\n \"ALPHABET\", \"AEROPLANE\", \"IRREGARDLESS\",\r\n \"CALCULATOR\", \"ENCYCLOPEDIA\", \"DISINTERESTED\",\r\n \"MASTERPIECE\", \"TELEVISION\"\r\n]\r\n\r\nx = int(len(words) * random.random())\r\nword = words[x].upper()\r\nattempts = 5\r\nfound = []\r\nprint(\"Length of the word is: {0}\".format(len(word)))\r\nstartGame(word, attempts, found)\r\n\r\n\r\n# sfgn\r\n# sfgnsr\r\n# gnsrgn\r\n# srgn\r\n# srgn\r\n# srgmn\r\n# sg\r\n\r\n#this is for checking\r\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"67589537","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\naccess log中间件,替换tornado的log_request实现插件式日志输出\r\n\"\"\"\r\nimport datetime\r\nimport logging\r\nimport pytz\r\n\r\naccess_log = logging.getLogger('access_log')\r\n\r\n\r\nclass AccessLogMiddleware(object):\r\n def process_init(self, application):\r\n application.settings['log_function'] = self.log\r\n\r\n def log(self, handler):\r\n utc_now = datetime.datetime.now(pytz.timezone('UTC'))\r\n message = {\r\n 'remote_ip': handler.request.remote_ip,\r\n 'utc_created_at': str(utc_now),\r\n 'host': handler.request.headers.get('host', ''),\r\n 'method': handler.request.method,\r\n 'uri': handler.request.uri,\r\n 'version': handler.request.version,\r\n 'status_code': handler.get_status(),\r\n 'content_length': handler.request.headers.get('Content-Length', ''),\r\n 'referer': handler.request.headers.get('Referer', ''),\r\n 'user_agent': handler.request.headers.get('User-Agent', ''),\r\n 'request_time': 1000.0 * handler.request.request_time(),\r\n 'id': handler.get_argument('id', ''),\r\n 'client': handler.get_argument('client', ''),\r\n 'token': handler.get_argument('token', ''),\r\n }\r\n access_log.info(message)\r\n","sub_path":"applications/core/middleware/accesslog.py","file_name":"accesslog.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36512132","text":"import numpy as np\nimport sys\nimport re\npattern = re.compile(\"([.?!,()\\[\\]\\-\\\"':;\\d])\") # to split into words\n\nclass io(object):\n\n '''/**\n * [separate description]\n * @param {[type]} string [description] a sentence\n * @return {[type]} list [description] splitted words\n */'''\n def separate(self, sentence):\n words = []\n frags = sentence.split() # split with space\n for frag in frags:\n words.extend(re.split(pattern, frag))\n words = [w.lower() for w in words if w]\n # set all the chars to lower case & remove empty elements\n return words\n\n\n '''/**\n * [count_words description]\n * @param {[type]} list [description] splitted words\n * @return {[type]} dict [description] dict (k: word, v: n of each word)\n */'''\n def count_words(self, words):\n for word in words:\n if word in self.dic_n_word:\n self.dic_n_word[word] += 1\n else:\n self.dic_n_word[word] = 1\n\n \n '''/**\n * [create_vocab description]\n * @param {[type]} int [description] vocaburary size\n * @param {[type]} filename [description] translation data\n */'''\n def create_vocab(self, vocab_size, filename, top=-1):\n self.dic_n_word = {}\n with open(filename) as lines:\n i = 1\n for line in lines:\n words = self.separate(line)\n self.count_words(words)\n if i % 10000 == 0:\n sys.stderr.write('\\r\\033' +\n 'Processed {0:,d} sentences.'.format(i))\n sys.stderr.flush()\n if i == top: break\n i += 1\n self.UNK = vocab_size + 0 # unknown word\n self.EOS = vocab_size + 1 # end marker\n self.BOS = vocab_size + 2 # begin marker\n self.vocab_size = vocab_size + 3\n n_word = sorted(self.dic_n_word.items(), key=lambda x: x[1], reverse=True)\n self.dict_id_w = [key for (key, value) in n_word[:vocab_size]]\n self.dict_id_w.extend(['', ''])\n self.dict_w_id = dict(zip(self.dict_id_w, range(len(self.dict_id_w))))\n\n\n '''/**\n * [words_to_ids description]\n * @param {[type]} list [description] word-splitted sentence\n * @return {[type]} list [description] sentence in ID\n */'''\n def words_to_ids(self, words):\n ids = []\n for word in words:\n w_id = self.dict_w_id.get(word)\n if w_id is None:\n ids.append(self.UNK)\n else:\n ids.append(w_id)\n ids.append(self.EOS)\n return ids\n\n\n '''/**\n * [sentence_to_ids description]\n * @param {[type]} string [description] sentence\n * @return {[type]} list [description] sentence in ID\n */'''\n def sentence_to_ids(self, sentence):\n words = self.separate(sentence)\n ids = self.words_to_ids(words)\n return ids\n\n\n '''/**\n * [ids_to_sentence description]\n * @param {[type]} list [description] sentence in ID\n * @return {[type]} string [description] sentence\n */'''\n def ids_to_sentence(self, ids):\n words = [self.dict_id_w[w_id] for w_id in ids]\n sentence = ' '.join(words)\n return sentence\n\n\n '''/**\n * [create_train_data description]\n * @param {[type]} filename [description] translation data\n * @return {[type]} lists [description] sentences in ID\n */'''\n def create_train_data(self, filename, top=-1):\n train_data = []\n with open(filename) as lines:\n i = 1\n for line in lines:\n ids = self.sentence_to_ids(line)\n train_data.append(ids)\n if i % 10000 == 0:\n sys.stderr.write('\\r\\033' +\n 'Processed {0:,d} sentences.'.format(i))\n sys.stderr.flush()\n if i == top: break\n i += 1\n return train_data\n","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"601321287","text":"import os\nfrom os.path import split\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils.plot import conv_plots\n\nif __name__ == '__main__':\n base = split(split(os.getcwd())[0])[0]\n skip = 0\n for poly_order in [1, 2]:\n full_path = os.path.join(base, f\"build/src/stokes/errors-o{poly_order}.csv\")\n\n head = list(map(str.strip, open(full_path).readline().split(\",\")))\n data = np.genfromtxt(full_path, delimiter=\",\", skip_header=True)\n data = data[skip:, :]\n\n conv_plots(data, head, title=r\"$\\textrm{Stokes: polynomial order: (\" + str(poly_order + 1) + \", \" + str(poly_order) + \")}$\")\n\n hs = data[:, 0]\n l2 = data[:, 1]\n h1 = data[:, 2]\n eoc_l2 = np.log(l2[:-1] / l2[1:]) / np.log(hs[:-1] / hs[1:])\n eoc_h1 = np.log(h1[:-1] / h1[1:]) / np.log(hs[:-1] / hs[1:])\n print()\n print(\"========================================\")\n print(\"EOC (L2): \", eoc_l2)\n print(\"EOC (H1): \", eoc_h1)\n print(\"========================================\")\n plt.savefig(f\"figure-o{poly_order}.pdf\")\n plt.show()\n","sub_path":"src/stokes/conv_plot.py","file_name":"conv_plot.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"219879826","text":"\"\"\"Tests of fiona.env\"\"\"\n\nimport pytest\n\nimport fiona\nimport fiona.env\nfrom fiona.session import AWSSession\n\n\ndef test_nested_credentials(monkeypatch):\n \"\"\"Check that rasterio.open() doesn't wipe out surrounding credentials\"\"\"\n\n @fiona.env.ensure_env_with_credentials\n def fake_opener(path):\n return fiona.env.getenv()\n\n with fiona.env.Env(session=AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar')):\n assert fiona.env.getenv()['AWS_ACCESS_KEY_ID'] == 'foo'\n assert fiona.env.getenv()['AWS_SECRET_ACCESS_KEY'] == 'bar'\n\n monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'lol')\n monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'wut')\n gdalenv = fake_opener('s3://foo/bar')\n assert gdalenv['AWS_ACCESS_KEY_ID'] == 'foo'\n assert gdalenv['AWS_SECRET_ACCESS_KEY'] == 'bar'\n","sub_path":"tests/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319605716","text":"# coding:utf8\nimport copy\nimport json\nimport time\n\nimport requests\nfrom pymongo import MongoClient\n\nuname = 'songs_user'\npasswd = 'songs2018'\nhost = '127.0.0.1'\nport = 27017\nthe_db = 'songs'\nmc = MongoClient(host, port)\ndb = mc[the_db]\ndb.authenticate(uname, passwd)\n\n\ndef crawl_song(_url):\n \"\"\"\n 抓取歌曲详情和歌词\n :param _url:\n :return:\n \"\"\"\n\n _headers = {\n 'method': 'GET',\n 'authority': 'c.y.qq.com',\n 'scheme': 'https',\n 'Referer': _url,\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh,en-US;q=0.9,en;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6',\n }\n\n print(_url)\n mid = _url.split('/')[-1].split('.html')[0]\n\n # 抓取歌曲详情\n path = '/v8/fcg-bin/fcg_play_single_song.fcg?songmid={}&tpl=yqq_song_detail&format=jsonp&callback=getOneSongInfoCallback&g_tk=5381&jsonpCallback=getOneSongInfoCallback&loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0'.format(\n mid)\n url = 'https://c.y.qq.com{}'.format(path)\n headers = copy.deepcopy(_headers)\n headers['path'] = path\n\n res0 = requests.get(url, headers=headers)\n # print(res0.text)\n\n rt = res0.text.replace('getOneSongInfoCallback(', '').replace(')', '')\n rj = json.loads(rt)\n print(rj)\n\n music_id = rj['data'][0]['id']\n name = rj['data'][0]['name']\n singers = rj['data'][0]['singer']\n singers = [s['name'] for s in singers]\n\n # 抓取歌曲详情\n path = '/lyric/fcgi-bin/fcg_query_lyric.fcg?nobase64=1&musicid={}&callback=jsonp1&g_tk=5381&jsonpCallback=jsonp1&loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0'.format(\n music_id)\n url = 'https://c.y.qq.com{}'.format(path)\n headers = copy.deepcopy(_headers)\n headers['path'] = path\n\n res1 = requests.get(url, headers=headers)\n # print(res1.text)\n\n rt = res1.text.replace('jsonp1(', '').replace(')', '')\n rj = json.loads(rt)\n print(rj)\n lyric = rj['lyric']\n\n song = {\n 'name': name,\n 'music_id': music_id,\n 'url': _url,\n 'mid': mid,\n 'singers': singers,\n 'lyric': lyric,\n }\n return song\n\n\ndef crawl_singer(_url):\n \"\"\"\n\n :param _url:\n :return:\n \"\"\"\n singermid = _url.split('/')[-1].split('.')[0]\n _t = int(time.time() * 1000)\n path = '/splcloud/fcgi-bin/fcg_get_singer_desc.fcg?singermid={}&utf8=1&outCharset=utf-8&format=xml&r={}'.format(\n singermid, _t)\n\n headers = {\n 'method': 'GET',\n 'authority': 'c.y.qq.com',\n 'scheme': 'https',\n 'path': path,\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'accept': '*/*',\n 'referer': _url,\n 'accept-encoding': '',\n 'accept-language': 'zh,en-US;q=0.9,en;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6',\n }\n\n url = 'https://c.y.qq.com{}'.format(_url)\n\n res0 = requests.get(url, headers=headers)\n\n\n return\n\ndef crawl_album(self):\n \"\"\"抓取专辑\"\"\"\n return\n\ndef crawl_album_list(self):\n \"\"\"抓取专辑列表\"\"\"\n return\n\ndef run():\n urls = [\n 'https://y.qq.com/n/yqq/song/003FFWnA3AIczD.html',\n 'https://y.qq.com/n/yqq/song/001Qu4I30eVFYb.html',\n ]\n for url in urls:\n song = crawl_song(url)\n print(song)\n _url = 'http://127.0.0.1:5000/insert_data?data={}'.format(song)\n res = requests.get(_url)\n print(res.text)\n # if not db['new_songs'].find_one({'url': song['url']}):\n # db['new_songs'].insert_one(song)\n\n for item in db['new_songs'].find():\n print(item['name'])\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"simple_music_spider_v1/qq_spider.py","file_name":"qq_spider.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587235112","text":"#!/usr/bin/env python2.7\n\n# Delete the current webhook and create new webhook to include all affilates with surveys listed in survey monkey\n\nfrom pprint import pprint\nimport json\nimport requests\nimport sys\n\nwith open(\".cred.json\") as credentials:\n creds = json.load(credentials)\n\n# get information from survey monkey\ndef survey_monkey(url):\n\n # survey monkey access token\n token = creds[\"sm\"][\"token\"]\n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"bearer %s\" % (token)}\n\n response = requests.request(\"GET\", url, headers=headers)\n data = response.json()\n\n return data\n\n# get list of all survey ids\ndef get_surveyids():\n\n data = survey_monkey(\"https://api.surveymonkey.net/v3/surveys\")\n # id of surveys to ignore\n ignore = []\n ids = []\n\n for survey in data[\"data\"]:\n if survey[\"id\"] not in ignore:\n ids.append(survey[\"id\"])\n\n return ids\n\ndef sm_session():\n\n token = creds[\"sm\"][\"token\"]\n\n s = requests.session()\n\n s.headers.update({\n \"Authorization\": \"Bearer %s\" % token,\n \"Content-Type\": \"application/json\"\n })\n\n return s\n\ndef get_hookid():\n\n hooks = []\n\n s = sm_session()\n\n url = \"https://api.surveymonkey.net/v3/webhooks\"\n\n data = s.get(url)\n data = data.content\n data = json.loads(data)\n\n for hook in data[\"data\"]:\n hooks.append(hook[\"id\"])\n\n return hooks\n\ndef create_hook(hookids):\n\n s = sm_session()\n\n payload = {\n \"name\": \"SurveyMonkey Webhook Test\",\n \"event_type\": \"response_completed\",\n \"object_type\": \"survey\",\n \"object_ids\": hookids,\n #\"object_ids\": [\"83770439\", \"84765436\"], #keyprinn / batesmotel\n \"subscription_url\": creds[\"hook\"][\"subscription_url\"]\n }\n\n print(payload[\"subscription_url\"])\n\n print(\"hookids: %s\" % hookids)\n\n url = \"https://api.surveymonkey.net/v3/webhooks\"\n\n data = s.post(url, json=payload)\n\n return data.content\n\ndef delhook(hookid):\n\n token = creds[\"sm\"][\"token\"]\n\n s = requests.session()\n\n s.headers.update({\n \"Authorization\": \"Bearer %s\" % token,\n \"Content-Type\": \"application/json\"\n })\n \n url = \"https://api.surveymonkey.net/v3/webhooks/%s\" % hookid\n\n data = s.delete(url)\n\n pprint(data.content)\n\ndef main():\n\n # get list of all affiliates in SM\n affiliates = get_surveyids()\n\n # get list of current webhook(s)\n hookid = get_hookid()\n\n # delete current webhook(s)\n if len(hookid) >= 1:\n for hook in hookid:\n print(\"Deleing webhook: %s\" % hook)\n delhook(hook)\n \n # re-create webhook tracking list of all affiliates\n newhook = create_hook(affiliates)\n\n print(\"Creating webhook: %s\" % newhook)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/hookgen.py","file_name":"hookgen.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185829738","text":"#!/usr/bin/python3\n\nimport pickle\nimport numpy as np\nfrom numpy.random import choice, randint\nfrom tqdm import tqdm\nimport simplejson as json\n\nRACES = [\n 'Dwarf', 'Elf', 'Gnome', 'Halfling', 'Human', 'Half-Elf', 'Half-Orc',\n 'Orc', 'Aasimer', 'Catfolk', 'Changeling', 'Dhampir', 'Drow', 'Duergar',\n 'Fetchling', 'Gillman', 'Goblin', 'Grippli', 'Hobgoblin', 'Ifrit',\n 'Kitsune', 'Kobold', 'Lizardfolk', 'Merfolk', 'Nagaji', 'Oread', 'Ratfolk',\n 'Samsarans', 'Strix', 'Suli', 'Svirfneblin', 'Sylph', 'Tengu', 'Tiefling',\n 'Undine', 'Vanara', 'Vishkanya', 'Wayangs'\n]\n\nsettings = None\nglobal_pop = None\n\n\ndef load_settings():\n \"\"\" Settings\n 1: Base population: see lists above\n 2: Population Size: [1, Infinte)\n 3: Core Population Variance: [0, 100] | 0 = No population Varience, 100 = Every Diverse\n 4: Exotic Populations: [0, 32] | 0 = No exotic, 32 = All Exotic\n \"\"\"\n global settings\n settings = json.loads(open('settings.json', 'r').read())\n\n if settings is None: # Check for Illegal Settings\n print(\"Unable to open settings\")\n exit()\n elif settings[\"Race\"] not in RACES:\n print(\"Invalid Base Race\")\n exit()\n elif settings[\"Population\"] <= 0:\n print(\"Invalid Population size\")\n exit()\n elif settings[\"Variance\"] < 0 or settings[\"Variance\"] > 100:\n print(\"Invalid Core Population Variance\")\n exit()\n elif settings[\"Exotic\"] < 0 or settings[\"Exotic\"] > 38:\n print(\"Invalid Exotic Race Count\")\n exit()\n\n\ndef custom_settings(ra, po, va, ex):\n global settings\n settings = {\n 'Race': ra,\n 'Population': po,\n 'Variance': va,\n 'Exotic': ex,\n }\n\n\ndef create_variance():\n global settings\n global global_pop\n if settings is None:\n load_settings()\n if global_pop is not None:\n return global_pop\n pop = {}\n if settings['Variance'] == 0:\n pop[settings['Race']] = 1.0\n else: # Create Variance\n # Prime race\n base_pop = settings['Population'] - round(\n settings['Population'] * (settings['Variance'] / 100))\n pop[settings['Race']] = base_pop\n\n # Add Exotics\n races = RACES\n races.remove(settings['Race'])\n choices = choice(races, settings['Exotic'], replace=False)\n for i in choices:\n pop[i] = round(settings['Population'] *\n (settings['Variance'] / 100) / settings['Exotic'])\n\n global_pop = normalize_dict(pop)\n return global_pop\n\n\ndef create(l):\n v = np.array(l)\n if len(v.shape) < 2:\n print(\"Too little or irregular dimensions\")\n exit()\n d = {}\n for x in v:\n d[x[0]] = int(x[1])\n return d\n\n\ndef normalize_dict(v):\n d = {}\n total = sum(v.values())\n for x in v.keys():\n # print(x, v[x])\n d[x] = v[x] / total\n return d\n","sub_path":"variance.py","file_name":"variance.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615296333","text":"import contextlib\nimport typing\nimport binascii\nimport asyncio\nfrom torba.testcase import AsyncioTestCase\nfrom tests import dht_mocks\nfrom lbrynet.conf import Config\nfrom lbrynet.dht import constants\nfrom lbrynet.dht.node import Node\nfrom lbrynet.dht.peer import PeerManager\nfrom lbrynet.dht.blob_announcer import BlobAnnouncer\nfrom lbrynet.extras.daemon.storage import SQLiteStorage\n\n\nclass TestBlobAnnouncer(AsyncioTestCase):\n async def setup_node(self, peer_addresses, address, node_id):\n self.nodes: typing.Dict[int, Node] = {}\n self.advance = dht_mocks.get_time_accelerator(self.loop, self.loop.time())\n self.conf = Config()\n self.storage = SQLiteStorage(self.conf, \":memory:\", self.loop, self.loop.time)\n await self.storage.open()\n self.peer_manager = PeerManager(self.loop)\n self.node = Node(self.loop, self.peer_manager, node_id, 4444, 4444, 3333, address)\n await self.node.start_listening(address)\n self.blob_announcer = BlobAnnouncer(self.loop, self.node, self.storage)\n for node_id, address in peer_addresses:\n await self.add_peer(node_id, address)\n self.node.joined.set()\n\n async def add_peer(self, node_id, address, add_to_routing_table=True):\n n = Node(self.loop, PeerManager(self.loop), node_id, 4444, 4444, 3333, address)\n await n.start_listening(address)\n self.nodes.update({len(self.nodes): n})\n if add_to_routing_table:\n await self.node.protocol.add_peer(\n self.peer_manager.get_kademlia_peer(\n n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port\n )\n )\n\n @contextlib.asynccontextmanager\n async def _test_network_context(self, peer_addresses=None):\n self.peer_addresses = peer_addresses or [\n (constants.generate_id(2), '1.2.3.2'),\n (constants.generate_id(3), '1.2.3.3'),\n (constants.generate_id(4), '1.2.3.4'),\n (constants.generate_id(5), '1.2.3.5'),\n (constants.generate_id(6), '1.2.3.6'),\n (constants.generate_id(7), '1.2.3.7'),\n (constants.generate_id(8), '1.2.3.8'),\n (constants.generate_id(9), '1.2.3.9'),\n ]\n try:\n with dht_mocks.mock_network_loop(self.loop):\n await self.setup_node(self.peer_addresses, '1.2.3.1', constants.generate_id(1))\n yield\n finally:\n self.blob_announcer.stop()\n self.node.stop()\n for n in self.nodes.values():\n n.stop()\n\n async def chain_peer(self, node_id, address):\n previous_last_node = self.nodes[len(self.nodes) - 1]\n await self.add_peer(node_id, address, False)\n last_node = self.nodes[len(self.nodes) - 1]\n peer = last_node.protocol.get_rpc_peer(\n last_node.protocol.peer_manager.get_kademlia_peer(\n previous_last_node.protocol.node_id, previous_last_node.protocol.external_ip,\n previous_last_node.protocol.udp_port\n )\n )\n await peer.ping()\n return peer\n\n async def test_announce_blobs(self):\n blob1 = binascii.hexlify(b'1' * 48).decode()\n blob2 = binascii.hexlify(b'2' * 48).decode()\n\n async with self._test_network_context():\n await self.storage.add_blobs((blob1, 1024), (blob2, 1024), finished=True)\n await self.storage.db.execute(\n \"update blob set next_announce_time=0, should_announce=1 where blob_hash in (?, ?)\",\n (blob1, blob2)\n )\n to_announce = await self.storage.get_blobs_to_announce()\n self.assertEqual(2, len(to_announce))\n self.blob_announcer.start()\n await self.advance(61.0)\n to_announce = await self.storage.get_blobs_to_announce()\n self.assertEqual(0, len(to_announce))\n self.blob_announcer.stop()\n\n # test that we can route from a poorly connected peer all the way to the announced blob\n\n await self.chain_peer(constants.generate_id(10), '1.2.3.10')\n await self.chain_peer(constants.generate_id(11), '1.2.3.11')\n await self.chain_peer(constants.generate_id(12), '1.2.3.12')\n await self.chain_peer(constants.generate_id(13), '1.2.3.13')\n await self.chain_peer(constants.generate_id(14), '1.2.3.14')\n\n last = self.nodes[len(self.nodes) - 1]\n search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop)\n search_q.put_nowait(blob1)\n\n _, task = last.accumulate_peers(search_q, peer_q)\n found_peers = await peer_q.get()\n task.cancel()\n\n self.assertEqual(1, len(found_peers))\n self.assertEqual(self.node.protocol.node_id, found_peers[0].node_id)\n self.assertEqual(self.node.protocol.external_ip, found_peers[0].address)\n self.assertEqual(self.node.protocol.peer_port, found_peers[0].tcp_port)\n","sub_path":"tests/unit/dht/test_blob_announcer.py","file_name":"test_blob_announcer.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"69959025","text":"import metric_learn\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_iris\n\n# visualisation imports\nimport metric_learn\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_iris\n\n# visualisation imports\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n# loading our dataset\n\niris_data = load_iris()\n# this is our data\nX = iris_data['data']\nX = pd.read_csv('./result_py/matrixCountAdj_cortrim.csv',sep=',').as_matrix()\n# these are our constraints\nY = iris_data['target']\nY = np.squeeze(np.asarray( pd.read_csv('./result_py/aml.csv',sep=',').as_matrix() ))\n\n\n\n# function to plot the results\ndef plot(X, Y):\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n plt.figure(2, figsize=(8, 6))\n\n # clean the figure\n plt.clf()\n\n plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)\n plt.xlabel('Sepal length')\n plt.ylabel('Sepal width')\n\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n plt.xticks(())\n plt.yticks(())\n\n plt.show()\n\n\n# plotting the dataset as is.\n\nplot(X, Y)\n\n\n# ---------------------------------------\n\n# setting up LMNN\nlmnn = metric_learn.LMNN(k=5, learn_rate=1e-6)\n# fit the data!\nlmnn.fit(X, Y)\n# transform our input space\nX_lmnn = lmnn.transform()\nplot(X_lmnn, Y)\n\n# ITML; http://www.cs.utexas.edu/users/pjain/pubs/metriclearning_icml.pdf\nitml = metric_learn.ITML_Supervised(num_constraints=200)\nX_itml = itml.fit_transform(X, Y)\nplot(X_itml, Y)\n\n# SDML; http://lms.comp.nus.edu.sg/sites/default/files/publication-attachments/icml09-guojun.pdf\nsdml = metric_learn.SDML_Supervised(num_constraints=200)\nX_sdml = sdml.fit_transform(X, Y, random_state = np.random.RandomState(1234))\nplot(X_sdml, Y)\n\n# LSML; http://web.cs.ucla.edu/~weiwang/paper/ICDM12.pdf\nlsml = metric_learn.LSML_Supervised(num_constraints=200)\nX_lsml = lsml.fit_transform(X, Y)\nplot(X_lsml, Y)\n\n# NCA; https://papers.nips.cc/paper/2566-neighbourhood-components-analysis.pdf\n\nnca = metric_learn.NCA(max_iter=1000, learning_rate=0.01)\nnca.fit(X, Y)\nX_nca = nca.transform()\nplot(X_nca, Y)\n\n# LFDA; http://www.machinelearning.org/proceedings/icml2006/114_Local_Fisher_Discrim.pdf\nlfda = metric_learn.LFDA(k=2, dim=2)\nX_lfda = lfda.fit_transform(X, Y)\nplot(X_lfda, Y)\n\n# RCA; https://www.aaai.org/Papers/ICML/2003/ICML03-005.pdf\nrca = metric_learn.RCA_Supervised(num_chunks=30, chunk_size=2)\nX_rca = rca.fit_transform(X, Y)\nplot(X_rca, Y)\n\n\n\n\n\n","sub_path":"flowtype_flowcap_pipeline-master/201704/02_nca.py","file_name":"02_nca.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557696988","text":"# jl\n# 2018-01-12\n# to list series in LaCie/PHRC\n\nimport os,io\n\npathIn = '/Volumes/LaCie/PHRC'\n\nlist_dir = os.listdir(pathIn)\n\nfile_results = '/Users/jessica/Documents/PHRC/liste_phrc_with_series.txt'\nfirst_line = 'Patient_ID|Patient_Name|Acquisition_Date|Serie\\n'\n\nf = io.open(file_results,'wb')\nf.write(first_line)\n\nfor d in list_dir:\n\tdir = os.path.join(pathIn,d)\n\tif not os.path.isdir(dir):\n\t\tprint(dir + ' not exists')\n\telse:\n\t\tprint('##################\\n' + d + '\\n##################\\n')\n\t\tsubject = d.split('-')\n\t\tsubject_id = int(subject[0][1:] + subject[1])\n\t\tif len(subject) == 3:\n\t\t\tif '.' in subject[2]:\n\t\t\t\tsubject_name = subject[2].replace('.',' ')\n\t\t\telse:\n\t\t\t\tsubject_name = subject[2]\n\t\telif len(subject) == 4:\n\t\t\tsubject_name = subject[2] + ' ' + subject[3]\n\t\tsubject_dir = str(subject_id) + '_' + subject_name.replace(' ','')\n\t\tlist_studies = os.listdir(dir)\n\t\tfor d_study in list_studies:\n\t\t\tstudy_dir = os.path.join(dir,d_study)\n\t\t\tif os.path.isdir(study_dir):\n\t\t\t\tstudy = d_study.split('-')\n\t\t\t\tstudy_name = study[0]\n\t\t\t\tacquisition_date = int(study[1] + study[2] + study[3])\n\t\t\t\tsub_study_dir = study_name + '_' + str(acquisition_date)\n\t\t\t\tlist_series = os.listdir(study_dir)\n\t\t\t\tfor d_serie in list_series:\t\t\t\t\n\t\t\t\t\tserie_dir = os.path.join(study_dir,d_serie)\n\t\t\t\t\tif os.path.isdir(serie_dir):\n\t\t\t\t\t\tline = subject_dir.split('_')[0] + '|' + subject_dir.split('_')[1] + '|' + sub_study_dir + '|' + d_serie + '\\n'\n\t\t\t\t\t\tf.write(line)\nf.close()","sub_path":"liste_phrc.py","file_name":"liste_phrc.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184159622","text":"# IF Statements:\n\nages = {\n 'kevin': 12,\n 'marcus': 9,\n 'evan': 28,\n 'Biko': 30\n}\n\nnew_ages = {key:value for (key, value) in ages.items() if value > 25}\nprint(new_ages)\n\n# Adding Multiple IF Statements:\n\nages = {\n 'bob': 58,\n 'lee': 20,\n 'ibra': 23,\n 'dave': 19,\n 'moh': 28,\n 'jane': 24,\n 'oti': 33\n}\n\nyounger = {key: value for (key, value) in ages.items() if value < 25 if value % 2 == 0}\nprint(younger)\n\n# If Else:\n\nages = {\n 'Yusuf': 32,\n 'Sikujua': 25,\n 'Machio': 30,\n 'Ojasi': 28,\n 'Alvo': 21,\n 'Nuri': 33\n}\n\noddeven = {key: ('odd' if value % 2 == 1 else 'even') for (key, value) in ages.items()}\nprint(oddeven)\n\n# Nested Dictionary Comprehension:\n\nmutiples = {\n key1: {\n key2: key1 * key2\n for key2 in range(1, 6)\n }\n for key1 in range(10, 60, 10)\n}\n\nprint(mutiples)\n\n# Filtering Items:\na_dict = {'Moja': 1, 'Mbili': 2, 'Tatu': 3, 'Nne': 4}\nnew_dict = {k: v for k, v in a_dict.items() if v <= 2}\nprint(new_dict)\n\n# Conditionals to Dictionary Comprehension:\n# If Condition:\ndict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\n# Check for items greater than 2:\ndict1_cond = {k:v for (k, v) in dict1.items() if v>2}\nprint(dict1_cond)\n\n# Multiple If Conditions:\n# items greater than 2 & they are multiples of 2 at the same time.\n\ndict1_doubleCond = {k:v for (k,v) in dict1.items() if v>2 if v%2 == 0}\nprint(dict1_doubleCond)\n\ndict2 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f':6}\n\ndict2_tripleCond = {k:v for (k, v) in dict2.items() if v>2 if v%2 == 0 if v%3 == 0}\nprint(dict2_tripleCond)\n\n# If-Else Conditions:\ndict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f':6}\n\n# Identify odd and even entries:\ndict1_tripleCond = {k: ('even' if v%2 == 0 else 'odd') for (k,v) in dict1.items()}\n\nprint(dict1_tripleCond)\n\n# Nested Dictionary Comprehension:\nnested_dict = {'first': {'a': 1}, 'second': {'b': 2}}\nfloat_dict = {outer_k: {float(inner_v) for (inner_k, inner_v) in outer_v.items()}\n for (outer_k, outer_v) in nested_dict.items()}\n\nprint(float_dict)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"comprehensions/dictionaries/conditional-dictionaries.py","file_name":"conditional-dictionaries.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578475786","text":"import cv2\r\nimport socket\r\nimport time\r\nimport os\r\n\r\n\r\nprint(1)\r\na = 0\r\nday= '-'.join('_'.join(str(time.ctime()).split(' ')).split(':'))\r\nos.mkdir(day)\r\nfile = \"test0.jpg\"\r\nReserv = \"Reserv.jpg\"\r\nerr = 0\r\nsock = socket.socket()\r\nim=cv2.imread('1.jpg')\r\nwhile err == 0:\r\n try:\r\n sock.connect(('192.168.1.101', 9001))\r\n err = 1\r\n print('connect')\r\n\r\n except:\r\n pass\r\n\r\n\r\nimage_result = open(file, 'wb')\r\n\r\nimage = bytes('', 'utf-8')\r\nprint(2)\r\n\r\ndef b_left():\r\n print('left')\r\n sock.send(bytes('left', 'utf-8'))\r\n\r\ndef b_light():\r\n print('light')\r\n sock.send(bytes('light', 'utf-8'))\r\n\r\ndef b_shut():\r\n print('shut')\r\n sock.send(bytes('shut', 'utf-8'))\r\n\r\n\r\ndef b_right():\r\n print('right')\r\n sock.send(bytes('right', 'utf-8'))\r\n\r\n\r\ndef b_up():\r\n print('up')\r\n sock.send(bytes('up', 'utf-8'))\r\n\r\n\r\ndef b_stop():\r\n print('stop')\r\n sock.send(bytes('stop', 'utf-8'))\r\n\r\n\r\ndef b_down():\r\n print('down')\r\n sock.send(bytes('down', 'utf-8'))\r\n\r\n\r\ndef b_plus():\r\n print('+')\r\n sock.send(bytes('+', 'utf-8'))\r\n\r\n\r\ndef b_minus():\r\n print('-')\r\n sock.send(bytes('-', 'utf-8'))\r\n\r\n\r\ndata_1 = bytes('', 'utf-8')\r\ni = 0\r\nnum=0\r\nfile='test3.jpg'\r\nwhile True:\r\n #cv2.imshow('l',im)\r\n #num = cv2.waitKey(33)\r\n #print(num)\r\n if num == 52: # left arrow\r\n b_left()\r\n if num == 54: # right arrow\r\n b_right()\r\n if num == 56: # up arrow\r\n b_up()\r\n if num == 50: # down arrow\r\n b_down()\r\n if num == 53: # central button\r\n b_stop()\r\n if num == 45: # -\r\n b_minus()\r\n if num == 43: # +\r\n b_plus()\r\n if num == 27: # escape\r\n b_shut()\r\n if num == 32:\r\n b_light()\r\n try:\r\n image_result = open(file, 'wb')\r\n except:\r\n image_result = open('err.jpg', 'wb')\r\n image = b''\r\n data = b''\r\n # print(1)\r\n\r\n # i+=1\r\n while b'stop' not in data:\r\n\r\n data += sock.recv(2**15)\r\n #image_result.write(data)\r\n print('not ')\r\n if b'stop' in data:\r\n image_result.write(data[:data.index(b'stop')])\r\n\r\n data_1 = data[data.index(b'stop') + 4:]\r\n\r\n image_result.close()\r\n if os.path.getsize(file) > 500:\r\n print(file,2)\r\n img = cv2.putText(cv2.imread(file,0), str(i) ,(30,100),cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,0),2)\r\n\r\n try :\r\n print('>500 Ok')\r\n cv2.imshow('Transport', img)\r\n num = cv2.waitKey(33)\r\n\r\n except Exception as e:\r\n print(e)\r\n #os.remove(file)\r\n\r\n else:\r\n print(\"STOP______________\")\r\n #os.remove(file)\r\n file = day+'/'+ str(i) + \".jpg\"\r\n image_result = open(file, 'wb')\r\n data = b''\r\n i+=1\r\n image_result.write(data_1)\r\n data_1 = b''\r\n a = 0\r\n","sub_path":"local/client_comp-v9.py","file_name":"client_comp-v9.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491661414","text":"\"\"\"\n Cleaning methods.\n\"\"\"\n\n###########################################################################\n# Clean signature samples.\n###########################################################################\ndef clean():\n\n # Import libraries.\n import pandas as pd\n import numpy as np\n\n # Disable pandas warnings.\n pd.options.mode.chained_assignment = None\n\n # Clean each user's samples.\n for user in range(1, 41):\n\n # Log user.\n print('Cleaning user ' + str(user) + '...')\n\n # Load data.\n print('\\tLoading data...')\n data = pd.read_csv('./Task1/Data/Input/user' + str(user) + '.csv')\n\n # Separate the samples.\n samples = []\n for i in range(1, 41):\n samples.append(data.loc[data['signature'] == i])\n\n # Zero x, y, and time.\n print('\\tCleaning data...')\n for sample in samples:\n sample['time'] = sample['time'] - sample['time'].min() \n sample['x'] = sample['x'] - sample['x'].min()\n sample['y'] = sample['y'] - sample['y'].min()\n\n # Find the max number of rows in any sample.\n sample_totals = []\n\n for sample in samples:\n sample_totals.append(sample['time'].count())\n\n series_sample_totals = pd.Series(sample_totals)\n max_rows = series_sample_totals.max()\n \n # Add rows to each sample so they're even.\n print('\\tAdding rows...')\n for i in range(0, len(samples)):\n \n # Get last time value.\n end_time = samples[i]['time'].max() + 10\n \n # Set genuine label.\n genuine = 0\n if i < 20:\n genuine = 1\n\n # Insert rows.\n for j in range(0, max_rows - samples[i]['time'].count()):\n df = pd.DataFrame([[0, 0, end_time, 1, user, i+1, genuine]], columns=['x', 'y', 'time', 'button', 'user', 'signature', 'genuine'])\n samples[i] = samples[i].append(df)\n end_time += 10\n\n # Output cleaned sample to file.\n print('\\tWriting data to file...')\n for i in range(0, len(samples)):\n if i == 0:\n samples[i].to_csv('./Task1/Data/Cleaning/clean_user' + str(user) + '.csv', mode='w', header=True, index=False)\n else:\n samples[i].to_csv('./Task1/Data/Cleaning/clean_user' + str(user) + '.csv', mode='a', header=False, index=False)\n \n return\n","sub_path":"src/Task1/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"422164106","text":"import musicbox\n\n\n# The list of tuples holding the notes and integers\nNOTES = [(\"C\", 60),(\"D\", 62), (\"E\", 64), (\"F\", 65), (\"G\", 67), (\"A\", 69), (\"B\", 71)]\nMAJOR_INTERVALS = [2,2,1,2,2,2,1]\nMINOR_INTERVALS = [2,1,2,2,1,2,2]\n# Sets the import of musicbox to variable in order\n# to be used later on to play the notes\nmy_music = musicbox.MusicBox()\n\n# Gets the letters of the note from the list of tuples\nlist_notes = [x[0] for x in NOTES]\n\n# Turns the note given into the correct integer\ndef note_to_int(note):\n\n octave_number = note.rfind(\"^\")+1\n for let in NOTES:\n if let[0] == note[octave_number]:\n num = let[1]\n if len(note[octave_number:]) == 2:\n if note[octave_number+1] == \"#\":\n flat_orsharp = 1\n elif note[octave_number + 1] == \"b\":\n flat_orsharp = -1\n else:\n flat_orsharp = 0\n return_list = (num + 12 * octave_number + flat_orsharp)\n\n return return_list\n\n\n\n# Just prints out the menu, nothing special\ndef print_menu():\n print(\"Main Menu: \\n 1. Play Scale \\n 2. Play Song \\n 3. Quit\")\n\n\n# Gets and validates the users option at menu screen\ndef get_menu_choice():\n selection = int(input(\"Make a selection: \"))\n while (selection < 1) or (selection > 3):\n print(\"Please enter a valid option\")\n selection = int(input(\"Make a selection: \"))\n return selection\n\n\n# Get's the notes from the user\n# Used if else for the invalid note ex. P\ndef get_scale():\n scale = str(input(\"Please input a scale:\\n\"))\n while (\"minor\" not in scale ) and (\"major\"not in scale):\n scale=str(input(\"Enter scale name:\\n\"))\n separated = scale.split(\" \")\n scale = [note_to_int((separated[0])), (separated[1])]\n return scale\n\n\n\n# Plays the notes after getting the int\ndef scale_to_ints(scale):\n if scale[1] == \"major\":\n notes = [scale[0]]\n for i in MAJOR_INTERVALS:\n notes.append(i+notes[-1])\n return notes\n else:\n notes = [scale[0]]\n for i in MINOR_INTERVALS:\n notes.append(i+notes[-1])\n return notes\n\n\n\n# Glues everything together in order to play notes\ndef menu_play_scale():\n g_s = get_scale()\n s_to_i = scale_to_ints(g_s)\n for i in s_to_i:\n my_music.play_note(i, 500)\n return\n\n\n# Get's the name of the text file from the user as a string\ndef get_song_file():\n song_file = input(\"Please enter a the song file name including the extension: \")\n return song_file\n\n\n# Play the song file given by the user\ndef play_song(file):\n # Opens up the file and reads the lines\n for line in open(file):\n separate = line.split(\" \")\n\n # Variable is used when notes have a # or b\n find_suffix = separate[0]\n\n # If the lines starts with //, it's skipped\n if line.startswith(\"//\"):\n continue\n\n # This variable gets the duration from every note and chord in the given files\n duration = int(separate[-1].strip('\"').strip('\\n'))\n\n # Used for seperate lines with single notes to multiple\n if len(separate) > 2:\n chord_notes = []\n for x in separate:\n chord_checker = x.strip('^#b')\n if chord_checker.isalpha():\n chords = note_to_int(x)\n chord_notes.append(chords)\n my_music.play_chord(chord_notes, duration)\n\n # This else section is used for the lines with single notes\n else:\n # Used only when note is vanilla\n # Ex. C, G, B\n if separate[0] in list_notes:\n note_orig = note_to_int(separate[0])\n\n my_music.play_note(note_orig, duration)\n\n # If the note has a ^ at the beginning, figures out how many octaves\n # it was raised by\n elif separate[0].find('^') == 0:\n octave_increase = note_to_int(separate[0])\n my_music.play_note(octave_increase, duration)\n\n # These next two elif statements are used when the note\n # has a suffix\n # This one is used when it's a sharp note\n elif find_suffix[-1] == '#':\n sharp_note = note_to_int(separate[0])\n my_music.play_note(sharp_note, duration)\n\n # This one is used when it's a flat note\n elif find_suffix[-1] == 'b':\n flat_note = note_to_int(separate[0])\n for number in flat_note:\n my_music.play_note(number, duration)\n\n # If there are invalid notes, they are treated as a pause\n elif separate[0] not in separate:\n my_music.pause(duration)\n\n\ndef menu_play_song():\n song = get_song_file()\n play_song(song)\n\n\n# This is where all the magic happens ;)\ndef main():\n while True:\n print_menu()\n menu_choice = get_menu_choice()\n if menu_choice == 1:\n menu_play_scale()\n if menu_choice == 2:\n menu_play_song()\n if menu_choice == 3:\n quit\n\n\nmain()\nmy_music.close()\n \n\n \n\n\n","sub_path":"Homework 3 (2).py","file_name":"Homework 3 (2).py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"548751688","text":"#square divider\nn0 = n = int(input())\n\nif (n >= 4) and n%4 == 0:\n n //= 4\n\nd = 3\nwhile d*d <= n:\n while n%(d*d) == 0:\n n //= d*d\n\n d += 2\nprint (n0//n)\n","sub_path":"square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106862023","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# https://leetcode-cn.com/problems/maximum-subarray/\n# Given an integer array nums, find the contiguous subarray (containing at least one number)\n# which has the largest sum and return its sum.\n# For example, Given Input = [-2, 1, -3, 4, -1, 2, 1, -5, 4],\n# Output will be: 6, Because [4,-1,2,1] has the largest sum = 6.\n# If you have figured out the O(n) solution, try coding another solution using the divide\n# and conquer approach, which is more subtle.\n#\nfrom Utils.timer_decorater import timer\n\n\nclass Solution(object):\n @timer\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n maxSum = nums[0]\n maxContinuousSum = 0\n for num in nums:\n if maxContinuousSum > 0:\n maxContinuousSum += num\n else:\n maxContinuousSum = num\n maxSum = max(maxContinuousSum, maxSum)\n return maxSum\n\n\nif __name__ == \"__main__\":\n nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n print(Solution().maxSubArray(nums))\n","sub_path":"Python/DP/53. maximum_subarray.py","file_name":"53. maximum_subarray.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348914038","text":"from exts import db\nfrom datetime import datetime\n\n\nclass Permission(object):\n # 255的二进制方式来表示,在python里,0b表示后面的数字是二进制的\n ALL_PERMISSION = 0b11111111\n # 1、访问者权限,只能看页面,不能修改、添加文章\n VISITOR = 0b00000001\n # 2、可以录入文章的人的权限\n OPERATOR = 0b00000010\n # 3、可以设置文章分值的人的权限\n SYSTEMMANAGER= 0b00000100\n # 4、可以修改一切区域的文章的人的权限,仅用于addarticle,可以修改所有人的文章\n ADMINISTRATOR = 0b00001000\n\n\ncms_role_user = db.Table(\n 'cms_role_user',\n db.Column('cms_role_id', db.Integer, db.ForeignKey('role.id'), primary_key=True),\n db.Column('cms_user_id', db.String(100), db.ForeignKey('user.user_login'), primary_key=True)\n)\n\n\n# 权限表\nclass CMSRole(db.Model):\n __tablename__ = 'role'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(50), nullable=False)\n desc = db.Column(db.String(200), nullable=True)\n create_time = db.Column(db.DateTime, default=datetime.now())\n permissions = db.Column(db.Integer, default=Permission.VISITOR)\n users = db.relationship('User', secondary=cms_role_user, backref='roles')\n\n\n# 上级媒体的基本信息定义\nclass Uppermedia(db.Model):\n __tablename__ = 'uppermedia'\n # 属性id\n id = db.Column(db.String(20), primary_key=True)\n # 分类名称\n uppermedia_name = db.Column(db.String(50))\n # 停启状态\n status = db.Column(db.Integer)\n\n\n# 媒体的基本信息定义(媒体从属于上级分类uppermedia)\nclass Media(db.Model):\n __tablename__ = 'media'\n # 属性id\n id = db.Column(db.String(20), primary_key=True)\n # 媒体名称\n media_name = db.Column(db.String(50))\n # 记录与分类的级联关系\n uppermedia_id = db.Column(db.String(20), db.ForeignKey(\"uppermedia.id\"))\n # 定义ORM一对多的查询关系\n uppermedia = db.relationship(\"Uppermedia\", backref=\"medias\")\n # 停启状态\n status = db.Column(db.Integer)\n\n\n# 文章所在版块的结构定义(版块从属于媒体)\nclass Board(db.Model):\n __tablename__ = 'board'\n # 属性id\n id = db.Column(db.String(20), primary_key=True)\n # 板块名字\n board_name = db.Column(db.String(100))\n # 板块得分\n points = db.Column(db.Integer)\n # 记录与媒体的关系\n media_id = db.Column(db.String(20), db.ForeignKey(\"media.id\"))\n # 定义ORM一对多的查询关系\n media = db.relationship(\"Media\", backref=\"boards\")\n # 停启状态\n status = db.Column(db.Integer)\n # 按字计费标准\n calc_price = db.Column(db.Float)\n # 内置稿费标准\n price = db.Column(db.Float)\n\n\n# 作者的基本信息,用于文章作者的记录和登录\nclass User(db.Model):\n __tablename__ = 'user'\n # 6位单位编码,比如,南通就是320601\n unit_id = db.Column(db.Integer)\n # 单位的名字\n unit_name = db.Column(db.String(100))\n # 用户所在部门的ID\n dept_id = db.Column(db.Integer)\n # 用户所在部门(有可能是子部门、如果没有子部门,则和根部门名字一致)的名字\n dept_name = db.Column(db.String(100))\n # 用户所在父部门的编号\n root_dept_id = db.Column(db.Integer)\n # 用户所在部门父部门的名字\n root_dept_name = db.Column(db.String(100))\n # 用户姓名\n user_name = db.Column(db.String(100))\n # 用户岗位\n user_job = db.Column(db.String(100))\n # 用户登录ID\n user_login = db.Column(db.String(100), primary_key=True)\n # 用户密码\n user_password = db.Column(db.String(100))\n # 用户岗位类型\n role_name = db.Column(db.String(100))\n # 用户创建时间\n create_time = db.Column(db.DateTime, default=datetime.now)\n\n def checkpassword(self, rawpassword):\n if self.user_password == rawpassword:\n return True\n else:\n return False\n\n @property\n def permissions(self):\n if not self.roles:\n return 0\n all_permissions = 0\n for role in self.roles:\n permissions = role.permissions\n all_permissions |= permissions\n return all_permissions\n\n def has_permission(self, permission):\n # all_permissions = self.permissions\n # result = all_permissions&permission\n # return result\n tmp = self.permissions & permission\n if tmp == permission:\n return True\n else:\n return False\n # return (self.permissions & permission) == permission\n\n @property\n def is_systemmanager(self):\n return self.has_permission(Permission.SYSTEMMANAGER)\n\n @property\n def is_visitor(self):\n return self.has_permission(Permission.VISITOR)\n\n @property\n def is_operator(self):\n return self.has_permission(Permission.OPERATOR)\n\n @property\n def is_administrator(self):\n return self.has_permission(Permission.ADMINISTRATOR)\n\n\n\n def __init__(self,unit_name,\n dept_id,\n dept_name,\n root_dept_id,\n root_dept_name,\n user_name,\n user_job,\n user_login,\n user_password,\n role_name,\n create_time):\n self.unit_name = unit_name\n self.dept_id = dept_id\n self.dept_name = dept_name\n self.root_dept_name = root_dept_name\n self.root_dept_id = root_dept_id\n self.user_name = user_name\n self.user_job = user_job\n self.user_login = user_login\n self.user_password = user_password\n self.role_name = role_name\n self.create_time = create_time\n\n\n# 文章主体表\nclass CMSArticle(db.Model):\n __tablename__ = 'article'\n # 文章ID、主键\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n # 6位单位编码,比如,南通就是320601\n unit_id = db.Column(db.Integer)\n # 单位的名字\n unit_name = db.Column(db.String(100))\n # 用户所在部门的ID\n dept_id = db.Column(db.Integer)\n # 用户所在部门(有可能是子部门、如果没有子部门,则和根部门名字一致)的名字\n dept_name = db.Column(db.String(100))\n # 用户所在父部门的编号\n root_dept_id = db.Column(db.Integer)\n # 用户所在部门父部门的名字\n root_dept_name = db.Column(db.String(100))\n # 用户姓名\n user_name = db.Column(db.String(100))\n # 用户岗位\n user_job = db.Column(db.String(100))\n # 用户登录ID\n user_login = db.Column(db.String(100))\n # 文章标题\n article_title = db.Column(db.String(200))\n # 文章的月份\n month = db.Column(db.String(20))\n # 文章记录所属板块的ID,外键联系\n board_id = db.Column(db.String(20), db.ForeignKey(\"board.id\"))\n # 文章记录所属版块的名称\n board_name = db.Column(db.String(100))\n # 文章所属板块的媒体ID,外键联系\n media_id = db.Column(db.String(20), db.ForeignKey(\"media.id\"))\n # 文章所属板块的媒体名称\n media_name = db.Column(db.String(100))\n # 文章所属媒体的上级归口类型ID,外键联系\n uppermedia_id = db.Column(db.String(20), db.ForeignKey(\"uppermedia.id\"))\n # 文章所属媒体,上级媒体的名称(媒体的总分类,例如,国家级媒体,省局媒体等)\n uppermedia_name = db.Column(db.String(100))\n # 文章所获得的积分\n points = db.Column(db.Integer)\n # 第二作者姓名\n secend_user = db.Column(db.String(100))\n # 录入时间\n create_time = db.Column(db.DateTime)\n # 和人员表关联的外键\n user_login = db.Column(db.String(100), db.ForeignKey(\"user.user_login\"))\n # 定义一对多的ORM关系\n board = db.relationship(\"Board\", backref=\"articles\")\n # 状态\n status = db.Column(db.Integer)\n # 排序\n order = db.Column(db.Integer)\n # 字数\n article_number = db.Column(db.Integer)\n # 稿费\n article_price = db.Column(db.Float)\n\n# 用户中间表模型\nclass CMSV_pas_unit(db.Model):\n __tablename__='v_pas_unit'\n UNIT_ID = db.Column(db.String(10))\n UNIT_DESC = db.Column(db.String(50))\n alldept = db.Column(db.String(10))\n DEPT_ID = db.Column(db.String(10))\n DEPT_NAME = db.Column(db.String(60))\n EMP_NAME = db.Column(db.String(50))\n TITLE_NAME = db.Column(db.String(100))\n EMP_LOGIN = db.Column(db.String(30), primary_key=True)\n EMP_PWD = db.Column(db.String(30))\n ROLE_NAME = db.Column(db.String(40))\n dept = db.Column(db.String(60))\n\n","sub_path":"apps/cms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"251159297","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# gazebo world上に指定した物体をランダムな位置に配置するコード\n\n#import sys\nimport rospy\nimport rospkg\nfrom gazebo_msgs.srv import DeleteModel\nfrom gazebo_msgs.srv import SpawnModel\n#from std_msgs.msg import Header, Float64, Bool, String, Int16\nfrom geometry_msgs.msg import Pose, Quaternion\nfrom __init__ import *\nimport tf.transformations as tft\nimport random\n#import argparse\n\nclass DropModel:\n # @staticmethod\n def __init__(self):\n pass\n\n def delete_model(self, model_name):\n rospy.loginfo(\"Delete_model: \" + model_name)\n # Delete the old model if it's stil around\n self.delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n self.delete_model_prox(model_name)\n rospy.loginfo(\"Delete_model\")\n\n # @staticmethod\n def spawn_model(self, model_name):\n rospy.loginfo(\"Spawn_model: \" + model_name)\n\n # 物体の種類を決める\n # pig_doll\n #o = random.randint(0, len(objects)-1)\n o = 2\n \n\n # 物体の位置を決める (主観)\n ## doll : living, bedroom \n ## bottle : living, kitchen\n ## cup : kitchen, toilet\n if o == 0 or o == 1 or o == 2:\n i = random.randrange(2)\n if i == 0:\n p = 0\n else:\n p = 2\n \n elif o == 3 or o == 4 or o == 5:\n i = random.randrange(2)\n if i == 0:\n p = 0\n else:\n p = 1\n \n else:\n i = random.randrange(2)\n if i == 0:\n p = 1\n else:\n p = 3\n\n \n #p = random.randint(0, len(places)-1)\n \n\n self.initial_pose = Pose()\n self.initial_pose.position.x = places[p][0]\n self.initial_pose.position.y = places[p][1]\n self.initial_pose.position.z = places[p][2]\n roll = places[p][3]\n pitch = places[p][4]\n yaw = places[p][5]\n tmpq = tft.quaternion_from_euler(roll, pitch, yaw)\n q = Quaternion(tmpq[0], tmpq[1], tmpq[2], tmpq[3])\n self.initial_pose.orientation = q\n\n # Spawn the new model #\n self.model_path = rospkg.RosPack().get_path('nrp_gazebo_worlds')+'/models/{}/'.format(objects[o])\n self.model_xml = ''\n rospy.loginfo(model_name)\n rospy.loginfo(self.model_path)\n rospy.loginfo(\"FILEPATH: \" + self.model_path + model_name + '.sdf')\n with open (self.model_path + model_name + '.sdf', 'r') as xml_file:\n self.model_xml = xml_file.read().replace('\\n', '')\n self.spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)\n self.spawn_model_prox('training_model', self.model_xml, '', self.initial_pose, 'world')\n rospy.loginfo(\"Spawn_model\")\n\n\nif __name__ == '__main__':\n rospy.init_node('spawn_model_naito')\n\n drop_model = DropModel()\n #rospy.logwarn(\"Customer entering the environment\")\n #human_action = sys.argv[1]\n drop_model.spawn_model(\"model\")\n","sub_path":"src/spawn_model_pos_fix.py","file_name":"spawn_model_pos_fix.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259909692","text":"\"\"\"\nStep 1 test based on largest singular vector. \n\nThis is the test described in `Kac Rice`_ for $X=I$ and the penalty being the nuclear norm\n\n.. math::\n\n {\\cal P}(\\beta) = \\sim_{i=1}^{\\text{min(n,p)}} \\sigma_i(\\beta)\n\nfor $\\beta \\in \\mathbb{R}^{n \\times p}$.\n\n.. _Kac Rice: http://arxiv.org/abs/1308.3020\n\"\"\"\n\nimport numpy as np\nfrom .pvalue import general_pvalue\n\ndef pvalue(X, sigma=1, nsim=5000):\n n, p = X.shape\n D = np.linalg.svd(X)[1] / sigma\n m = n+p-2\n H = np.zeros(m)\n \n nonzero = np.hstack([D[1:],-D[1:]])\n H[:nonzero.shape[0]] = nonzero\n \n return max(0, min(general_pvalue(D[0], D[1], np.inf, H, nsim=nsim), 1))\n","sub_path":"selection/algorithms/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"295041924","text":"import os, glob, csv, zipfile\n\ndef parse_RAST(tsvfile):\n with open(tsvfile, 'r') as tsv:\n features = [line for line in csv.reader(tsv, dialect = 'excel-tab')][1:]\n \n peg = list(filter(lambda f: f[2] == 'peg', features))\n repeat = list(filter(lambda f: f[2] == 'repeat', features))\n rna = list(filter(lambda f: f[2] == 'rna', features))\n tRNA = list(filter(lambda f: 'tRNA' in f[7], rna))\n rRNA = list(filter(lambda f: 'rRNA' in f[7], rna))\n \n return peg, repeat, rna, tRNA, rRNA\n\nif __name__ == '__main__':\n with open('strains_list.csv') as f:\n reader = csv.reader(f)\n with open('output.csv', 'w', newline = '') as g:\n writer = csv.writer(g)\n for row in reader:\n if row[0] == 'Name':\n new_row = row\n else:\n peg, repeat, rna, tRNA, rRNA = parse_RAST(glob.glob(row[1] + '/RAST/' + '*.txt')[0])\n new_row = row\n new_row[5] = len(peg)\n new_row[6] = len(repeat)\n new_row[7] = len(rna)\n new_row[8] = len(tRNA)\n new_row[9] = len(rRNA)\n writer.writerow(new_row)","sub_path":"process_RAST.py","file_name":"process_RAST.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617619713","text":"import numpy as np\n\ndef load_text(text_path):\n '''\n 导入文本,进行数据处理,返回正反向字典库,返回所有英文单词组成的列表\n :param text_path:\n :return:\n '''\n\n with open(text_path, 'r') as f:\n\n count = f.read()\n words = count.split()\n # print(words)\n vocb = np.unique(words)\n # print(vocb)\n # print(len(words))\n word2id, id2word = {}, {}\n id2word.update({\n i: vocb[i] for i in range(len(vocb))\n })\n word2id.update({\n vocb[i]:i for i in range(len(vocb))\n })\n # print(word2id)\n # print(len(id2word))\n\n\n return word2id, id2word, vocb, words","sub_path":"Python基础/文本处理.py","file_name":"文本处理.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391360188","text":"#!/usr/bin/env python3\nfrom math import sqrt\n\n\ndef square_root_int(x):\n return int(sqrt(x))\n\n\ndef is_prime(number):\n if number == 1:\n return False\n upper_bound = square_root_int(number) + 1\n for x in range(2, upper_bound):\n if x == number:\n next\n if number % x == 0:\n return False\n return True\n\n\ndef factors_of(number):\n factors = []\n upper_bound = square_root_int(number) + 1\n for x in range(2, upper_bound):\n if number % x == 0:\n factors.append(x)\n factors.append(number / x)\n return factors\n\n\ndef solve():\n factors = factors_of(600851475143)\n return max(x for x in factors if is_prime(x))\n\n\nif __name__ == '__main__':\n result = solve()\n print(result)\n","sub_path":"project-euler/003.py","file_name":"003.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171819131","text":"#!/usr/bin/python\n\nimport tempfile\nimport os\nimport random\nimport string\nimport codecs\nimport shutil\nimport sys\nimport MySQLdb\nfrom redis import Redis\n\nfrom subprocess32 import Popen, PIPE\n\n\nclass CompileErrorException(Exception):\n pass\n\n\nclass JudgeClientException(Exception):\n pass\n\n\nclass ExecutionRecord:\n FETCH_QUERY = \"SELECT executionRecord.id, executionRecord.status, user1.id AS attackerUserId, sc1.language AS attackerLanguage, sc1.code AS attackerCode, user2.id AS defenderUserId, sc2.language AS defenderLanguage, sc2.code AS defenderCode , attackerId, defenderId , executionRecord.tag AS tag FROM executionRecord, program AS p1 , program AS p2 , sourceCode AS sc1 , sourceCode AS sc2 , user AS user1 , user AS user2 WHERE executionRecord.attackerId = p1.id AND p1.id = sc1.programId AND executionRecord.defenderId = p2.id AND p2.id = sc2.programId AND p1.userId = user1.id AND p2.userId = user2.id AND executionRecord.id = %s\"\n UPDATE_QUERY = \"UPDATE executionRecord SET status = %s , winner = %s , replay = %s , log = %s WHERE id = %s\"\n UPDATE_STATUS_QUERY = \"UPDATE executionRecord SET status = %s WHERE id = %s\"\n\n TRANSACTION_BEGIN_QUERY = \"BEGIN TRANSACTION\"\n TRANSACTION_COMMIT_QUERY = \"COMMIT\"\n\n GET_MAX_SCORE_QUERY = \"SELECT MAX(rating) FROM userScore\"\n GET_USER_SCORE_QUERY = \"SELECT rating FROM userScore WHERE userId = %s FOR UPDATE\"\n CHECK_EXISTENT_QUERY = 'SELECT COUNT(*) FROM executionRecord, program WHERE executionRecord.id <> %s AND defenderId = %s AND attackerId = program.id AND program.userId = %s AND winner = 1 AND status <> 7'\n UPDATE_USER_SCORE_QUERY = \"UPDATE userScore SET rating = rating + (%s) WHERE userId = %s\"\n\n STATUS_PENDING = 0\n STATUS_RUNNING = 1\n STATUS_FINISHED = 2\n STATUS_RUNTIME_ERROR = 3\n STATUS_TLE = 4\n STATUS_ILLEGAL_MOVE = 5\n STATUS_BAD_FORMAT = 6\n STATUS_COMPILE_ERROR = 7\n STATUS_INTERNAL_ERROR = -1\n STATUS_REJECTED = -2\n\n WINNER_ATTACKER = 1\n WINNER_DEFENDER = 2\n\n def __init__(self, program_id):\n self.connection = None\n self.connection = MySQLdb.connect(\n db='ovs',\n user='ovs',\n passwd='online-vs-platform',\n charset='utf8',\n use_unicode=True\n )\n cur = self.connection.cursor()\n cur.execute(self.FETCH_QUERY, [program_id])\n result = cur.fetchone()\n cur.close()\n (self.id, self.status,\n self.attacker_id, self.attacker_lang, self.attacker_code,\n self.defender_id, self.defender_lang, self.defender_code, self.attacker_program_id,\n self.defender_program_id, self.tag) = result\n self.log = []\n self.winner = 0\n self.replay = []\n\n def update_status(self, status):\n self.status = status\n cur = self.connection.cursor()\n cur.execute(self.UPDATE_STATUS_QUERY, (self.status, self.id))\n cur.close()\n self.connection.commit()\n\n def attacker_wins(self):\n self.winner = self.WINNER_ATTACKER\n\n def defender_wins(self):\n self.winner = self.WINNER_DEFENDER\n\n def ranking_change(self):\n if self.winner == self.WINNER_ATTACKER and self.status != ExecutionRecord.STATUS_COMPILE_ERROR: # we don't count on compile errors\n cur = self.connection.cursor()\n cur.execute(self.CHECK_EXISTENT_QUERY, (self.id, self.defender_program_id, self.attacker_id,))\n result = cur.fetchone()\n if not result[0]:\n cur.execute(self.GET_USER_SCORE_QUERY, (self.attacker_id,))\n attacker_score = cur.fetchone()[0]\n cur.execute(self.GET_USER_SCORE_QUERY, (self.defender_id,))\n defender_score = cur.fetchone()[0]\n cur.execute(self.GET_MAX_SCORE_QUERY)\n max_score = cur.fetchone()[0]\n delta = min(((attacker_score - defender_score) / (1.0 * max_score)) ** 2, max_score / 4.0)\n delta = max(1, delta)\n cur.execute(self.UPDATE_USER_SCORE_QUERY, (delta, self.attacker_id,))\n cur.close()\n\n def save_to_database(self):\n cur = self.connection.cursor()\n cur.execute(self.UPDATE_QUERY,\n (self.status, self.winner, '\\n'.join(self.replay), '\\n'.join(self.log), self.id))\n cur.close()\n self.connection.commit()\n\n\ndef __del__(self):\n if self.connection is not None:\n self.connection.close()\n\n\ndef random_string(length=32):\n return ''.join(random.choice(string.letters + string.digits) for i in range(length))\n\n\nclass Execution:\n COMPILE_TIMEOUT = 10\n\n def __init__(self, record):\n \"\"\"\n\n :type record: ExecutionRecord\n \"\"\"\n self.record = record\n self.base_dir = os.path.join(tempfile.gettempdir(), 'judge-' + random_string())\n os.mkdir(self.base_dir)\n self.log = []\n\n def __del__(self):\n shutil.rmtree(self.base_dir, ignore_errors=True)\n\n def run_compiler(self, language, filename, executable_name):\n args = [\"g++\" if language else \"gcc\", \"-static\", \"-w\", \"-O2\", filename, \"-o\",\n executable_name]\n self.log += ['Running: ' + ' '.join(args)]\n proc = Popen(args,\n cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n output = proc.communicate(timeout=self.COMPILE_TIMEOUT)\n self.log += [str(output[1])]\n if proc.poll() is None:\n try:\n self.log += ['Compile timeout.']\n proc.kill()\n except Exception:\n pass\n self.log += [\"Compiler returns %d.\" % proc.returncode]\n if proc.returncode:\n raise CompileErrorException()\n\n def compile(self):\n try:\n attacker_code_path = os.path.join(self.base_dir,\n 'attacker-' + random_string(8) + ('.cpp' if self.record.attacker_lang else '.c'))\n attacker_code_file = codecs.open(attacker_code_path, 'w', 'utf8')\n attacker_code_file.write(self.record.attacker_code)\n attacker_code_file.close()\n self.run_compiler(self.record.attacker_lang, os.path.basename(attacker_code_path), 'attacker')\n except Exception as e:\n self.record.defender_wins()\n raise e\n try:\n defender_code_path = os.path.join(self.base_dir,\n 'defender-' + random_string(8) + ('.cpp' if self.record.defender_lang else '.c'))\n defender_code_file = codecs.open(defender_code_path, 'w', 'utf-8')\n defender_code_file.write(self.record.defender_code)\n defender_code_file.close()\n self.run_compiler(self.record.defender_lang, os.path.basename(defender_code_path), 'defender')\n except Exception as e:\n self.record.attacker_wins()\n raise e\n\n def copy_assets(self):\n if os.path.exists('./assets/'):\n path = os.path.realpath('./assets/')\n self.log += ['Copying assets files.']\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isfile(file_path):\n target = os.path.join(self.base_dir, file)\n self.log += ['Copying ' + file_path + ' to ' + target]\n shutil.copyfile(file_path, target)\n self.log += ['Assets files copied.']\n\n def run_judge_client(self):\n self.copy_assets()\n args = [\"python\", os.path.realpath('judge-client.py'), './attacker', './defender']\n self.log += ['Running: ' + ' '.join(args)]\n proc = Popen(args, cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n output = proc.communicate()\n self.log += [str(output[1])]\n if proc.returncode:\n self.log += [\"Judge client crashed with return code %d.\" % proc.returncode]\n raise JudgeClientException(\"judge client crashed.\")\n result = output[0].split('\\n')\n winner = result[0]\n if winner == \"attacker\":\n self.record.attacker_wins()\n elif winner == \"defender\":\n self.record.defender_wins()\n else:\n self.log += [\"Judge client return unknown winner %s.\" % winner]\n raise JudgeClientException(\"unknown winner.\")\n reason = result[1]\n if reason == \"Finished\":\n self.record.status = ExecutionRecord.STATUS_FINISHED\n elif reason == \"IllegalMovement\":\n self.record.status = ExecutionRecord.STATUS_ILLEGAL_MOVE\n elif reason == \"IllegalOutput\":\n self.record.status = ExecutionRecord.STATUS_BAD_FORMAT\n elif reason == \"TLE\":\n self.record.status = ExecutionRecord.STATUS_TLE\n elif reason == \"Crashed\":\n self.record.status = ExecutionRecord.STATUS_RUNTIME_ERROR\n else:\n self.log += [\"Judge client return unknown reason %s.\" % reason]\n raise JudgeClientException(\"unknown reason.\")\n self.record.replay = result[2:]\n\n def save_to_database(self):\n self.save_log()\n self.record.save_to_database()\n\n def save_log(self):\n self.record.log = self.log\n\n\ndef judge_by_id(program_id):\n record = ExecutionRecord(program_id)\n execution = Execution(record)\n try:\n execution.record.update_status(ExecutionRecord.STATUS_RUNNING)\n execution.compile()\n execution.run_judge_client()\n except CompileErrorException:\n execution.record.status = ExecutionRecord.STATUS_COMPILE_ERROR\n execution.log += ['Judge process terminated due to a compile error.']\n except Exception as e:\n execution.record.status = ExecutionRecord.STATUS_INTERNAL_ERROR\n execution.log += ['Exception caught in judge daemon.', e.message]\n finally:\n execution.record.ranking_change()\n execution.save_to_database()\n\n\ndef run_daemon():\n redis = Redis()\n while True:\n try:\n to_do = redis.blpop(['judge_queue'])\n program_id = to_do[1]\n judge_by_id(int(program_id))\n except Exception as e:\n print >> sys.stderr, e\n\n\nos.environ['LANG'] = 'C'\nos.environ['LD_LIBRARY_PATH'] = os.path.realpath(os.path.curdir)\nrun_daemon()\n","sub_path":"judge/judge-daemon.py","file_name":"judge-daemon.py","file_ext":"py","file_size_in_byte":10299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467243107","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# %load_ext autoreload\n# %autoreload 2\n\n# In[1]:\n\n\nimport pandas as pd\nimport json\nimport pickle\nimport os\nimport itertools\nfrom ast import literal_eval\nimport numpy as np\nimport sys\nfrom datetime import datetime\nfrom IPython.display import clear_output, display\nimport subprocess\nfrom datetime import datetime\npd.options.display.float_format = '{:.0f}'.format\n\n\n# In[2]:\n\n\ndef LoadJsonFile(filename): \n with open(filename, 'r') as f:\n DicConfig = json.load(f)\n return DicConfig\n\n\ndef GlobalDicDeplier(OneDic):\n for k,v in OneDic.items():\n exec('globals()[k] = v')\n return None\n\n\n# In[3]:\n\n\nDicConfig = LoadJsonFile(os.path.join(os.getcwd(),\"config.json\"))\nGlobalDicDeplier(DicConfig)\nsys.path.append(Root)\nfrom fun import *\nprint(\"Load Config variables\")\n\n\n# In[4]:\n\n\npath = os.path.join(Root,FolderProject,\"RefRT.pkl\")\nRefRT = LoadPickleOrInit(path)\n\npath = os.path.join(Root,FolderProject,\"RefFam.pkl\")\nRefFam = LoadPickleOrInit(path)\n\npath = os.path.join(Root,FolderProject,\"RefInf.pkl\")\nRefInf = LoadPickleOrInit(path)\n\n\npath = os.path.join(Root,FolderProject,\"BatchRT.pkl\")\nBatchRT = LoadPickleOrInit(path)\n\npath = os.path.join(Root,FolderProject,\"BatchFamousTweet.pkl\")\nBatchFam = LoadPickleOrInit(path)\n\npath = os.path.join(Root,FolderProject,\"BatchInf.pkl\")\nBatchInf = LoadPickleOrInit(path)\n\nprint(\"Load Dataframes\")\n\n\n# # RT Part\n\n# In[5]:\n\n\nRefRT = RefRT.append(BatchRT,ignore_index = True)\nRefRT.reset_index(inplace = True,drop = True)\nRefRT.drop_duplicates(subset=[\"TWEETID\",\"USERID\"],inplace=True)\n\nPickleDump(os.path.join(Root,FolderProject,\"RefRT.pkl\"),RefRT)\n\npath = os.path.join(Root,FolderProject,\"RefRT.pkl\")\n\nRefRT_memory = RetrieveSize(path)\nRefRT_rows = len(RefRT)\nRefRT_tweets = len(np.unique(RefRT.AUTHORTWEETID))\nRefRT_users = len(np.unique(RefRT.USERID))\nRefRT_authors = len(np.unique(RefRT.AUTHORID))\nRefRT_datemin = RefRT.TWEETUNIXEPOCH.min()\nRefRT_datemax = RefRT.TWEETUNIXEPOCH.max()\n\nprint(\"RT Part\")\n\n\n# # Fam Part\n\n# In[6]:\n\n\nRefFam = RefFam.append(BatchFam,ignore_index = True)\nRefFam.reset_index(inplace = True,drop = True)\nRefFam.drop_duplicates(inplace=True)\nRefFam.drop_duplicates(subset = \"AUTHORTWEETID\", inplace = True)\n\nPickleDump(os.path.join(Root,FolderProject,\"RefFam.pkl\"),RefFam)\n\npath = os.path.join(Root,FolderProject,\"RefFam.pkl\")\n\nRefFam_memory = RetrieveSize(path)\nRefFam_rows = len(RefFam)\nRefFam_authors = len(np.unique(RefFam.AUTHORID))\nRefFam_tweets = len(np.unique(RefFam.AUTHORTWEETID))\nRefFam_datemin = RefFam.AUTHORTWEETUNIXEPOCH.min()\nRefFam_datemax = RefFam.AUTHORTWEETUNIXEPOCH.max()\n\nprint(\"Fam Part\")\n\n\n# # Inf Part\n\n# In[7]:\n\n\nRefInf = RefInf.append(BatchInf,ignore_index = True)\nRefInf.reset_index(inplace = True,drop = True)\nRefInf = RefInf.groupby(\"AUTHORID\").first().reset_index()\nPickleDump(os.path.join(Root,FolderProject,\"RefInf.pkl\"),RefInf)\n\npath = os.path.join(Root,FolderProject,\"RefInf.pkl\")\n\nRefInf_memory = RetrieveSize(path)\nRefInf_rows = len(RefInf)\nRefInf_authors = len(np.unique(RefInf.AUTHORID))\n\nprint(\"Inf Part\")\n\n\n# # Logs\n\n# In[8]:\n\n\ndef FormatNumber(Size):\n res = f'{Size:,}'\n return res\n\n\n# In[9]:\n\n\nRefInf_rows = FormatNumber(RefInf_rows)\nRefInf_authors = FormatNumber(RefInf_authors)\n\nRefFam_rows = FormatNumber(RefFam_rows)\nRefFam_authors = FormatNumber(RefFam_authors)\nRefFam_tweets = FormatNumber(RefFam_tweets)\n\nRefRT_rows = FormatNumber(RefRT_rows)\nRefRT_tweets = FormatNumber(RefRT_tweets)\nRefRT_users = FormatNumber(RefRT_users)\nRefRT_authors = FormatNumber(RefRT_authors)\n\nRefFam_datemin = str(pd.to_datetime(RefFam_datemin,unit=\"s\"))\nRefFam_datemax = str(pd.to_datetime(RefFam_datemax,unit=\"s\"))\nRefRT_datemin = str(pd.to_datetime(RefRT_datemin,unit=\"s\"))\nRefRT_datemax = str(pd.to_datetime(RefRT_datemax,unit=\"s\"))\n\nprint(\"Format values\")\n\n\n# In[10]:\n\n\nRefInfDic = {\"RefInf_memory\":RefInf_memory,\n\"RefInf_rows\":RefInf_rows,\n\"RefInf_authors\":RefInf_authors}\n\nRefFamDic = {\"RefFam_memory\" : RefFam_memory,\n\"RefFam_rows\" : RefFam_rows,\n\"RefFam_authors\" : RefFam_authors,\n\"RefFam_tweets\" : RefFam_tweets,\n\"RefFam_datemin\" : RefFam_datemin,\n\"RefFam_datemax\" : RefFam_datemax}\n\nRefRTDic = {\"RefRT_memory\" : RefRT_memory,\n\"RefRT_rows\" : RefRT_rows,\n\"RefRT_tweets\" : RefRT_tweets,\n\"RefRT_users\" : RefRT_users,\n\"RefRT_authors\" : RefRT_authors,\n\"RefRT_datemin\" : RefRT_datemin,\n\"RefRT_datemax\" : RefRT_datemax}\n\nRefLogs = {\"Fam\":RefFamDic,\n \"RT\":RefRTDic,\n \"Inf\":RefInfDic}\nfilename = os.path.join(Root,FolderProject,\"Ref.log\")\nAppendStringToFile(filename,RefLogs)\n\nprint(\"Write logs\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"trackIran/BuildRef.py","file_name":"BuildRef.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"480353402","text":"apple_price = 1000\r\ngrape_price = 3000\r\npear_price = 2000\r\norange_price = 500\r\napple_num = 0\r\ngrape_num = 0\r\npear_num = 0\r\norange_num = 0\r\ntotal = 0\r\nsale = False\r\nwhile True:\r\n fruit = input(\"구입 하려는 과일의 이름을 입력하세요 (입력을 종료하려면 q를 입력) : \")\r\n if fruit == \"q\":\r\n break\r\n else:\r\n num = int(input(\"과일의 개수를 입력해주세요\"))\r\n if fruit == \"사과\":\r\n apple_num += num\r\n total += apple_price * num\r\n elif fruit == \"포도\":\r\n grape_num += num\r\n total += grape_price * num\r\n elif fruit == \"배\":\r\n pear_num += num\r\n total += pear_price * num\r\n elif fruit == \"귤\":\r\n orange_num += num\r\n total += orange_price * num\r\nif grape_num >= 3:\r\n total *= 0.7\r\nprint(\"귀하는 \", end=\"\")\r\nif apple_num > 0:\r\n print(\"사과 -> %d 개 \" % apple_num, end=\"\")\r\nif grape_num > 0:\r\n print(\"포도 -> %d 개 \" % grape_num, end=\"\")\r\nif pear_num > 0:\r\n print(\"배 -> %d 개 \" % pear_num, end=\"\")\r\nif orange_num > 0:\r\n print(\"귤 -> %d 개 \" % orange_num, end=\"\")\r\nprint(\"총 가격은 %d원 입니다. \" % total)\r\n","sub_path":"python/2_2_afternoon/추가문제.py","file_name":"추가문제.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545455942","text":"# logistic regression model for Titanic survivor prediction\n\nimport pandas as pd\nimport os\nimport numpy as np\nfrom pandas import Series, DataFrame\nimport matplotlib.pyplot as plt\nfrom data_missing import set_missing_ages_mean, set_missing_ages_rf, set_Cabin_type\nimport sklearn.preprocessing as preprocessing\nfrom sklearn import linear_model\nfrom sklearn import cross_validation\nfrom sklearn.utils import shuffle\nfrom plt_learning_curve import plot_learning_curve\n\nprint('training code is processing : ')\n# os.system(\"pause\")\n\ndata_train = pd.read_csv('J:/Code/kaggle/Titanic/train.csv')\ndata_train.info()\n\n#### data analysis\n\nfig = plt.figure()\nfig.set(alpha = 0.2)\n\n############################ 画图分析\nplt.subplot2grid((2, 3), (0, 0))\ndata_train.Survived.value_counts().plot(kind = 'bar')\nplt.title('survivor_distribution')\nplt.ylabel('population')\n\nplt.subplot2grid((2, 3), (0, 1))\ndata_train.Pclass.value_counts().plot(kind = 'bar')\nplt.title('people_class')\nplt.ylabel('distribution form Pclass')\n\nplt.subplot2grid((2, 3), (0, 2))\nplt.scatter(data_train.Survived, data_train.Age)\n# data_train.Survived.value_counts().plot(kind = 'bar')\nplt.grid(b = True, which = 'major', axis = 'y')\nplt.title('distribution from age')\nplt.ylabel('age')\n\nplt.subplot2grid((2, 3), (1, 0), colspan = 2)\ndata_train.Age[data_train.Pclass == 1].plot(kind = 'kde')\ndata_train.Age[data_train.Pclass == 2].plot(kind = 'kde')\ndata_train.Age[data_train.Pclass == 3].plot(kind = 'kde')\nplt.xlabel('age')\nplt.ylabel('density')\nplt.title('class-age distribution')\nplt.legend(('first', 'second', 'third'), loc = 'best')\n\nplt.subplot2grid((2, 3), (1, 2))\ndata_train.Embarked.value_counts().plot(kind = 'bar')\nplt.title('terminal')\nplt.ylabel('population')\n# plt.show()\n\n# fig2 = plt.figure()\n# fig2.set(alpha = 0.2)\n\n# fig = plt.figure()\n# fig.set(alpha = 0.2)\n# ax1=fig.add_subplot(121)\nSurvived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()\ndf = pd.DataFrame({'survived' : Survived_1, 'not survived' : Survived_0})\ndf.plot(kind = 'bar')\nplt.title('class-surviving')\nplt.xlabel('passenger-class')\nplt.ylabel('population')\n# plt.show()\n\n# ax2=fig.add_subplot(122)\nSurvived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()\nSurvived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()\ndf = pd.DataFrame({'male' : Survived_m, 'female' : Survived_f})\ndf.plot(kind = 'bar', stacked = True)\nplt.title('sex-surviving')\nplt.xlabel('sex')\nplt.ylabel('population')\n# plt.show()\n\nfig = plt.figure()\nplt.title(\"class-sex surviving\")\n\nax1=fig.add_subplot(141)\ndata_train.Survived[data_train.Sex == 'female'][data_train.Pclass != 3].value_counts().plot(kind='bar', label=\"female high class\", color='#FA2479')\nax1.set_xticklabels([\"not survived\", \"survived\"], rotation=0)\nax1.legend([\"female/highclass\"], loc='best')\n\nax2=fig.add_subplot(142, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='female, low class', color='pink')\nax2.set_xticklabels([\"not survived\", \"survived\"], rotation=0)\nplt.legend([\"female/low class\"], loc='best')\n\nax3=fig.add_subplot(143, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'male'][data_train.Pclass != 3].value_counts().plot(kind='bar', label='male, high class',color='lightblue')\nax3.set_xticklabels([\"not survived\", \"survived\"], rotation=0)\nplt.legend([\"male/highclass\"], loc='best')\n\nax4=fig.add_subplot(144, sharey=ax1)\ndata_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind='bar', label='male low class', color='steelblue')\nax4.set_xticklabels([\"not survived\", \"survived\"], rotation=0)\nplt.legend([\"male/low class\"], loc='best')\n# plt.show()\n\nSurvived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()\nSurvived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()\ndf=pd.DataFrame({'survived':Survived_1, 'not survived':Survived_0})\ndf.plot(kind='bar', stacked=True)\nplt.title(\"terminal-surviving\")\nplt.xlabel(\"terminal\") \nplt.ylabel(\"population\") \n\n# plt.show()\n##################################################\n\n\n######################## featrue engineering\n\ndata_train = set_missing_ages_mean(data_train)\n\n# print(rfr)\n\ndata_train = set_Cabin_type(data_train)\n\nprint(data_train)\n\n# 将需要的类目属性全部转换成0,1的数值\n\ndummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix = 'Cabin')\n\ndummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix = 'Embarked')\n\ndummies_Sex = pd.get_dummies(data_train['Sex'], prefix = 'Sex')\n\ndummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix = 'Pclass')\n\ndf = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis = 1)\nprint('df : \\n', df)\n\ndf.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis = 1, inplace = True)\nprint('df : \\n', df)\nprint(pd.concat([df['Age'], df['Fare']], axis = 1))\n\nscaler = preprocessing.StandardScaler()\n\n# 标准化,调整为均值为0方差为1的标准正太分布\nage_scale_param = scaler.fit(df[['Age']])\ndf['Age_scaled'] = scaler.fit_transform(df[['Age']], age_scale_param)\nprint('df[\\'Age_scaled\\'] : \\n', df['Age_scaled'])\n\nfare_scale_param = scaler.fit(df[['Fare']])\ndf['Fare_scaled'] = scaler.fit_transform(df[['Fare']], age_scale_param)\nprint('df[\\'Fare_scaled\\'] : \\n', df['Fare_scaled'])\nprint(pd.concat([df['Age_scaled'], df['Fare_scaled']], axis = 1))\n\n### building the logistic-regression model\n\n# 正则表达式取出想要的属性值\ntrain_df = df.filter(regex = 'PassengerId|Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.\\D|Embarked_[^0]|Sex_.\\D|Pclass_[1-3]')\n\n### training_set : validation_set = 7 : 3\nsplit_train_df, split_cv_df = cross_validation.train_test_split(train_df, test_size = 0.3, random_state = 0)\n\n# pd.set_option('display.max_columns',None)\nprint(train_df)\n# os.system(\"pause\")\n\ntrain_np = train_df.as_matrix()\nprint(train_np)\n\nsplit_train_df = split_train_df.as_matrix()\n\n# label : survived\ny = split_train_df[:, 1]\n\n# featrue : Parch, Cabin_No, Cabin_Yes...\nX = split_train_df[:, 2:]\n\nllr = linear_model.LogisticRegression(C = 1.0, penalty = 'l1', tol = 1e-6)\n\nllr.fit(X, y)\n\nprint(llr)\n\nprint(pd.DataFrame({\"columns\" : list(train_df.columns)[2:], \"coef\" : list(llr.coef_.T)}))\n\nos.system(\"pause\")\n########## validation set/cross validation\n\n### training_set : validation_set = 7 : 3\n\n# llr.fit(split_train_df.as_matrix()[:, 2:], split_train_df.as_matrix()[:, 1]) # 这里是因为加入了passengerid所以从2开始\n\n### predict validation set\n# split_cv_df = split_cv_df.filter(regex = 'Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.\\D|Embarked_[^0]|Sex_.\\D|Pclass_[1-3]')\nprediction_cv = llr.predict(split_cv_df.as_matrix()[:, 2:])\n\n### 预测失败的validation\n\nprint(prediction_cv)\nprint(split_cv_df)\nprint(split_cv_df[prediction_cv != split_cv_df.values[:, 1]])#['PassengerId'].values)\n\n\nori_data_train = pd.read_csv('J:/Code/kaggle/Titanic/train.csv')\n\nbad_cases = ori_data_train.loc[ori_data_train['PassengerId'].isin(split_cv_df[prediction_cv != split_cv_df.values[:, 1]]['PassengerId'].values)]\n\nbad_cases = bad_cases.filter(regex = 'PassengerId|Survived|Name|Age|SibSp|Parch|Fare|Cabin|Embarked|Sex|Pclass')\n\npd.set_option('display.max_columns',None)\n\nprint('bad_cases : \\n', bad_cases)\nprint(bad_cases.iloc[:, 0].size)\n\n########### analysis of bad_cases\nbad_cases_male = bad_cases.Survived[bad_cases.Sex == 'male'].value_counts()\nprint(bad_cases_male)\nbad_cases_female = bad_cases.Survived[bad_cases.Sex == 'female'].value_counts()\nprint(bad_cases_female)\n# survived_0 = bad_cases.Survived[bad_cases.Survived == 0].value_counts()\n# survived_1 = bad_cases.Survived[bad_cases.Survived == 1].value_counts()\n# df = pd.DataFrame({'survived' : survived_1, 'not survived' : survived_0})\n\n## 该图发现bad_case里面很多男的活了下来,女的死了,没有遵循模型内部的女士优先原则\n\ndf = pd.DataFrame({'bad_cases_female' : bad_cases_female, 'bad_cases_male' : bad_cases_male})\ndf.plot(kind = 'bar')\nplt.title('bad_cases-surviving')\n# plt.xlabel('survived or not')\nplt.xlabel('sex')\nplt.ylabel('population')\n# plt.show()\nprint(\"X_size :\",X[:, 0].size)\nprint(\"y_size :\",y[:].size)\nX_shuf, y_shuf = shuffle(X, y)\nplot_learning_curve(llr, \"learning curve\", X_shuf, y_shuf)\n# plot_learning_curve(llr, \"learning curve\", X, y)","sub_path":"logistic_regressor_4_train.py","file_name":"logistic_regressor_4_train.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"402907942","text":"#! Writing term into dictionary, where key - x^n... ; value - a\n\n# Parsing polynom on terms\ndef get_term(poly):\n term = list()\n dictionary = dict()\n flag = 1 # for \"=\" detection and reverse sign if we`re on right side of polynom\n start = 0\n end = len(poly)\n sign = \"+\"\n if poly[0] == \"-\":\n sign = \"-\"\n start = 1\n for i in range(start, end):\n if poly[i-1] == \"=\" and poly[i] == \"-\":\n sign = \"-\"\n elif poly[i] == \"-\" or poly[i] == \"+\":\n key, value = read_term(term, sign, flag)\n if key in dictionary:\n dictionary[key] += value\n else:\n dictionary[key] = value\n term.clear()\n sign = poly[i]\n elif poly[i] == \"=\":\n key, value = read_term(term, sign, flag)\n if key in dictionary:\n dictionary[key] += value\n else:\n dictionary[key] = value\n term.clear()\n flag = -1\n sign = \"+\"\n else:\n term += poly[i]\n key, value = read_term(term, sign, flag)\n if key in dictionary:\n dictionary[key] += value\n else:\n dictionary[key] = value\n term.clear()\n\n return dictionary\n\n# Adding terms into dictionary\ndef read_term(term, sign, flag): \n dictionary = dict()\n temp_value = \"\"\n temp_key = \"\"\n multiplier_flag = True # to detect end of multiplier for not confuse with degree\n\n for char in term:\n if multiplier_flag and ((char > \"0\" and char <= \"9\") or char == \".\"):\n temp_value = temp_value + char\n else:\n multiplier_flag = False\n temp_key = temp_key + char\n try:\n if temp_value == \"\":\n value = 1\n else:\n try:\n value = int(temp_value)\n except:\n value = float(temp_value)\n if temp_key == \"\":\n key = \"free_digit\"\n else:\n key = str(temp_key)\n except Exception as exp:\n print(\"Somethink wrong\", exp)\n\n #adding right sign to value\n if flag == 1:\n if sign == \"-\":\n value = value*(-1)\n elif flag == -1:\n if sign == \"+\":\n value = value*(-1)\n\n return key, value","sub_path":"CanonizeTerm.py","file_name":"CanonizeTerm.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43072668","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('/detail/', views.detail, name='detail'),\n path('listup/', views.listup, name='listup'),\n path('listup_detail/', views.listup_detail, name='listup_detail'),\n path('detail///', views.playlist_first, name='playlist_first'),\n path('detail///clicked/', views.playlist_clicked, name='playlist_clicked'),\n]\n","sub_path":"lectures/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"636320696","text":"n = int(input())\na = list(map(int,input().split()))\ncount = 0\ni = 0\nwhile True:\n if i+2 < len(a) and a[i+2] == 0:\n i += 2\n count += 1\n elif i+2 < len(a) and a[i+2] == 1:\n i += 1\n count += 1\n if i >= len(a) - 1:\n break\n elif i == len(a) - 2:\n count += 1\n break\nprint(count)","sub_path":"ProblemSolving-Algorithm/Python3/Jumping on clouds p1.py","file_name":"Jumping on clouds p1.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"547530697","text":"import datetime\n\nfrom _lib.lambda_api import LambdaAPI\nfrom _lib.illegal_access_error import IllegalAccessError\nfrom _lib.invalid_request_error import InvalidRequestError\nfrom _lib.no_such_resource_error import NoSuchResourceError\n\ndef lambda_handler(event, context):\n api = LambdaAPI(\"Getting server status\", \"https://my-server/api-errors\", event, context)\n api.set_root_logging_level(10)\n api.log_initial_status()\n\n try:\n api.except_on_missing_value([\n ['body-json']\n ])\n\n error_code = api.fetch_value(['params','querystring', 'error'])\n if error_code is not None and len(error_code)>0:\n if str(error_code) == '500':\n raise Exception('Forced 500')\n elif str(error_code) == '404':\n raise NoSuchResourceError(100,\"Forced 404\")\n elif str(error_code) == '403':\n raise IllegalAccessError(100,\"Forced 403\")\n elif str(error_code) == '400':\n raise InvalidRequestError(100,\"Forced 400\")\n else:\n raise InvalidRequestError(100,\n \"You attempted to force a code {0} but only 500, 403, 404, and 400 are supported\"\n .format(error_code))\n\n now = datetime.datetime.now()\n\n to_output = {\n 'version': '4',\n 'serverTime': int(now.strftime('%s')),\n 'serverTimeFormatted': now.strftime('%Y-%m-%d %H:%M:%S'),\n 'serverStartTime': int(now.strftime('%s')),\n 'serverStartTimeFormatted': now.strftime('%Y-%m-%d %H:%M:%S'),\n 'status': 'ok'\n }\n\n return api.api_success(to_output)\n except Exception as exception:\n api.convert_exception_to_error_response(exception)\n\nif __name__ == '__main__':\n LambdaAPI.run_lambda_function_local(__file__, lambda_handler)\n","sub_path":"V03/api/src/_api/server_status.py","file_name":"server_status.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"144988711","text":"import boto3\nimport click\n\nsession = boto3.Session(profile_name='python')\nec2 = session.resource('ec2')\n\n\n@click.command()\n@click.option('--project', default=None, help=\"Only instance for project\")\ndef list_instances(project):\n instances = []\n if project:\n filters = [{'Name': 'tag:project', 'Values': [project]}]\n instances = ec2.instances.filter(Filters=filters)\n else:\n print(\"in else\")\n instances = ec2.instances.all()\n\n for i in instances:\n tags = {t['Key']: t['Value'] for t in i.tags or []}\n print(', '.join((i.id, tags.get('project','No tag'))))\n\n\nif __name__ == \"__main__\":\n list_instances()\n","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"573879531","text":"import json\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.detail import BaseDetailView\nfrom django.views.generic.list import ListView\n\nfrom djangomaster.conf import settings as conf\n\n\nclass MasterView(ListView):\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_superuser:\n raise PermissionDenied\n\n return super(MasterView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(MasterView, self).get_context_data(**kwargs)\n context['menu_item'] = getattr(self, 'menu_item', '')\n context['settings'] = conf\n return context\n\n\nclass JSONResponseMixin(object):\n \"\"\"\n # @see https://docs.djangoproject.com/en/1.4/topics/class-based-views/\n \"\"\"\n\n def render_to_response(self, context):\n \"Returns a JSON response containing 'context' as payload\"\n return self.get_json_response(self.convert_context_to_json(context))\n\n def get_json_response(self, content, **httpresponse_kwargs):\n \"Construct an `HttpResponse` object.\"\n return HttpResponse(content, content_type='application/json',\n **httpresponse_kwargs)\n\n def convert_context_to_json(self, context):\n \"Convert the context dictionary into a JSON object\"\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n\n if 'object' in context:\n return json.dumps(context['object'], cls=DjangoJSONEncoder)\n else:\n return json.dumps(context, cls=DjangoJSONEncoder)\n\n\nclass JSONDetailView(JSONResponseMixin, BaseDetailView):\n pass\n\n\nclass MasterJSONView(JSONResponseMixin, ListView):\n @method_decorator(login_required)\n def dispach(self, *args, **kwargs):\n if not self.request.user.is_admin:\n raise PermissionDenied\n\n return super(JSONDetailView, self).dispach(*args, **kwargs)\n","sub_path":"djangomaster/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"10168654","text":"\"\"\"Plugin replacement for Otter Assign\"\"\"\n\nimport re\nimport yaml\nimport nbformat\n\nfrom .utils import get_source\n\n\nBEGIN = \"# BEGIN PLUGIN\"\nEND = \"# END PLUGIN\"\nBEGIN_EXPORT = \"# BEGIN PLUGIN EXPORT\"\n\n\ndef replace_plugins(lines):\n \"\"\"\n Replaces plugins with calls in ``lines``\n \n Args:\n lines (``list`` of ``str``): cell contents as a list of strings\n\n Returns:\n ``list`` of ``str``: stripped version of lines with plugin calls\n \"\"\"\n starts, ends = [], []\n stripped = [[]]\n exports = []\n plugin = False\n for i, line in enumerate(lines):\n if line.rstrip().endswith(END):\n assert plugin, f\"END PLUGIN without BEGIN PLUGIN found in {lines}\"\n plugin = False\n ends.append(i)\n stripped.append([])\n \n elif line.rstrip().endswith(BEGIN):\n assert not plugin, f\"Nested plugins found in {lines}\"\n starts.append(i)\n exports.append(False)\n plugin = True\n \n elif line.rstrip().endswith(BEGIN_EXPORT):\n assert not plugin, f\"Nested plugins found in {lines}\"\n starts.append(i)\n exports.append(True)\n plugin = True\n\n elif plugin:\n stripped[len(starts) - 1].append(line)\n\n assert len(stripped) == len(starts) + 1 == len(ends) + 1 == len(exports) + 1, f\"Error processing plugins in {lines}\"\n assert all(s < e for s, e in zip(starts, ends))\n\n starts.reverse()\n ends.reverse()\n stripped.reverse()\n stripped = stripped[1:]\n\n lines = lines.copy()\n\n for i, (s, e) in enumerate(zip(starts, ends)):\n config = yaml.full_load(\"\\n\".join(stripped[i]))\n export = exports[i]\n pg = config[\"plugin\"]\n args = \", \".join(config.get(\"args\", []))\n kwargs = \", \".join([f\"{k}={v}\" for k, v in config.get(\"kwargs\", {}).items()])\n\n call = (\"run_plugin\", \"add_plugin_files\")[export]\n\n call = f'grader.{call}(\"{pg}\"'\n if args:\n call += f', {args}'\n if kwargs:\n call += f', {kwargs}'\n call += ')'\n\n del lines[s:e+1]\n lines.insert(s, call)\n\n return lines\n\n\ndef replace_plugins_with_calls(nb):\n \"\"\"\n Write a notebook with plugins replaced with calls\n \n Args:\n nb (``nbformat.NotebookNode``): the notebook\n \"\"\"\n for cell in nb['cells']:\n cell['source'] = '\\n'.join(replace_plugins(get_source(cell)))\n \n return nb\n","sub_path":"otter/assign/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510009363","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nc=[\"red\",\"green\",\"blue\",\"purple\"]\n\ndef plot_data(data, c = c):\n n_samples = data.shape[1]\n n_subj = data.shape[0]\n one = np.ones(n_samples)\n noise = np.random.rand((n_samples))-0.5\n\n fig, ax = plt.subplots(1,3, figsize=(10,5))\n for i in range(n_subj):\n s = i+1\n ax[0].scatter(one*s+noise, data[i,:], color=c[i], label= f\"subject {s}\")\n ax[0].legend()\n ax[0].set_title(\"subject data\")\n\n ax[1].scatter(np.arange(1,n_subj+1), data.mean(axis=1), color=c)\n ax[1].errorbar(np.arange(1,n_subj+1), data.mean(axis=1), yerr=data.std(axis=1), capsize=4, ecolor=\"black\", fmt='none')\n ax[1].scatter(np.arange(1,n_subj+1), data.mean(axis=1), color=c)\n ax[1].set_title(\"error by subject\")\n\n ax[2].errorbar(1, data.mean(), yerr=data.std(), capsize=4, ecolor=\"black\", fmt='o', marker='s', mfc='yellow', mec='black', ms=20, mew=4)\n ax[2].set_title(\"overall error bar\")\n ax[2].set_xticklabels([])\n return fig, ax","sub_path":"hands_on/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590879815","text":"\"\"\"\nCopyright (c) 2018, salesforce.com, inc.\nAll rights reserved.\nSPDX-License-Identifier: BSD-3-Clause\nFor full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n\"\"\"\n\nfrom crawl import bazel\nfrom crawl import dependency\nimport os\nimport unittest\nimport tempfile\n\n\nclass BazelTest(unittest.TestCase):\n\n def test_parse_maven_install(self):\n fd, path = tempfile.mkstemp()\n with os.fdopen(fd, 'w') as f:\n f.write(MVN_INSTALL_JSON_CONTENT)\n\n result = bazel.parse_maven_install(\"maven\", path)\n\n self.assertEqual(1, len(result))\n dep, transitives, exclusions = result[0]\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"ch.qos.logback:logback-classic:1.2.3\", \"maven\"), dep)\n self.assertEqual(2, len(transitives))\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"ch.qos.logback:logback-core:1.2.3\", \"maven\"), transitives[0])\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"org.slf4j:slf4j-api:jar:1.7.30\", \"maven\"), transitives[1])\n self.assertEqual(2, len(exclusions))\n self.assertEqual(\"jakarta.el\", exclusions[0].group_id)\n self.assertEqual(\"jakarta.el-api\", exclusions[0].artifact_id)\n self.assertEqual(\"org.glassfish\", exclusions[1].group_id)\n self.assertEqual(\"jakarta.el\", exclusions[1].artifact_id)\n\n def test_conflict_resolution_is_honored(self):\n \"\"\"\n Verifies that the pinned file's \"conflict_resolution\" attribute is\n handled.\n \"\"\"\n fd, path = tempfile.mkstemp()\n with os.fdopen(fd, 'w') as f:\n f.write(MVN_INSTALL_JSON_CONTENT_CONFLICT_RESOLUTION)\n\n result = bazel.parse_maven_install(\"maven\", path)\n\n self.assertEqual(2, len(result))\n dep, transitives, exclusions = result[0]\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"ch.qos.logback:logback-classic:1.2.3\", \"maven\"), dep)\n self.assertEqual(1, len(transitives))\n transitive_guava = transitives[0]\n # we expect to get the dep from conflict_resolution map\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"com.google.guava:guava:31.0.1-jre\", \"maven\"), transitive_guava)\n guava, transitives, exclusions = result[1]\n # we expect to get the dep from conflict_resolution map\n self.assertEqual(dependency.new_dep_from_maven_art_str(\"com.google.guava:guava:31.0.1-jre\", \"maven\"), guava)\n self.assertEqual(transitive_guava, guava)\n\n def test_target_pattern_to_path(self):\n \"\"\"\n Tests for bazel.target_pattern_to_path.\n \"\"\"\n self.assertEqual(\"foo/blah\", bazel.target_pattern_to_path(\"//foo/blah\"))\n self.assertEqual(\"foo/blah\", bazel.target_pattern_to_path(\"/foo/blah\"))\n self.assertEqual(\"foo/blah\", bazel.target_pattern_to_path(\"foo/blah:target_name\"))\n self.assertEqual(\"foo/blah\", bazel.target_pattern_to_path(\"foo/blah/...\"))\n self.assertEqual(\"foo/blah\", bazel.target_pattern_to_path(\"foo/blah\"))\n\n def test_ensure_unique_deps(self):\n \"\"\"\n Tests for bazel._ensure_unique_deps\n \"\"\"\n self.assertEqual([\"//a\", \"//b\", \"//c\"],\n bazel._ensure_unique_deps([\"//a\", \"//b\", \"//c\", \"//a\"]))\n\n\nMVN_INSTALL_JSON_CONTENT = \"\"\"\n{\n \"dependency_tree\": {\n \"__AUTOGENERATED_FILE_DO_NOT_MODIFY_THIS_FILE_MANUALLY\": -163681522,\n \"conflict_resolution\": {},\n \"dependencies\": [\n {\n \"coord\": \"ch.qos.logback:logback-classic:1.2.3\",\n \"dependencies\": [\n \"ch.qos.logback:logback-core:1.2.3\",\n \"org.slf4j:slf4j-api:jar:1.7.30\"\n ],\n \"directDependencies\": [\n \"org.slf4j:slf4j-api:1.7.30\"\n ],\n \"exclusions\": [\n \"jakarta.el:jakarta.el-api\",\n \"org.glassfish:jakarta.el\"\n ]\n },\n {\n \"coord\": \"ch.qos.logback:logback-classic:jar:sources:1.2.3\",\n \"dependencies\": [\n \"org.slf4j:slf4j-api:jar:sources:1.7.30\",\n \"ch.qos.logback:logback-core:jar:sources:1.2.3\"\n ],\n \"directDependencies\": [\n \"ch.qos.logback:logback-core:jar:sources:1.2.3\",\n \"org.slf4j:slf4j-api:jar:sources:1.7.30\"\n ],\n \"exclusions\": [\n ]\n\n }\n ],\n \"version\": \"0.1.0\"\n }\n}\n\"\"\"\n\nMVN_INSTALL_JSON_CONTENT_CONFLICT_RESOLUTION = \"\"\"\n{\n \"dependency_tree\": {\n \"__AUTOGENERATED_FILE_DO_NOT_MODIFY_THIS_FILE_MANUALLY\": -163681522,\n \"conflict_resolution\": {\n \"com.google.guava:guava:31.0.1-jre\": \"com.google.guava:guava:31.0.1-jre-SNAPSHOT\"\n },\n \"dependencies\": [\n {\n \"coord\": \"ch.qos.logback:logback-classic:1.2.3\",\n \"dependencies\": [\n \"com.google.guava:guava:31.0.1-jre-SNAPSHOT\"\n ],\n \"directDependencies\": [\n \"com.google.guava:guava:31.0.1-jre-SNAPSHOT\"\n ]\n },\n {\n \"coord\": \"com.google.guava:guava:31.0.1-jre-SNAPSHOT\",\n \"dependencies\": [],\n \"directDependencies\": [],\n \"exclusions\": [\n \"*:*\"\n ]\n }\n ],\n \"version\": \"0.1.0\"\n }\n}\n\"\"\"\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/bazeltest.py","file_name":"bazeltest.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149922759","text":"# file to contain API keys to use for ticketmaster API\n\n# public API rate limit for normal account\n# 5000 requests every 1 day\n\n# we can use the public APIs which are:\n# Discovery API\n# Commerce API\n\n# Partner APIs are:\n# Partner API\n# Publish API\n# International Discovery API\n\n# API keys\n# maxliu@uchicago.edu\nkey1 = \"hDzVBIQUgOvWcpgizjEuhvuM5TI0qxG1\"\n# max_liu@uchicago.edu\nkey2 = \"gUkTRWqOR2vXMqirOvU9pZ1eRVpRJBqA\"\n# max.liu@uchicago.edu\nkey3 = \"8FYGosHlo8zMPBC5COOdl5IduPs7zazC\"\n# maxxliu@uchicago.edu\nkey4 = \"5Ac8YTsJHkWwwcctUTqOrm0Dq1Ya59CL\"\n# maxx_liu@uchicago.edu\n# maxx.liu@uchicago.edu\n\n# all keys\nAPI_KEYS = [key1, key2, key3, key4]\n\n# keep track of what key im using and how to move to next key\nCUR_KEY = [-1] # key we are currently using\nNO_KEYS = len(API_KEYS) # total number of keys\ndef switch_key():\n '''\n switches the key that the API is using, will switch if current key reaches\n limit or if rate limiting due to too many calls too quickly\n\n outputs:\n key (str) - will return the new key to use, if the keys have been\n rotated through too many times then something is wrong and\n error value of -1 will be returned\n '''\n CUR_KEY[0] += 1\n switch_to = CUR_KEY[0] % NO_KEYS\n # too many rotations means they are probably all exhausted\n if CUR_KEY[0] > (10 * NO_KEYS):\n print(\"WARNING: keys have been rotated through too many times\")\n return -1\n\n return API_KEYS[switch_to]\n","sub_path":"activities_data/ticketmaster/tm_auth.py","file_name":"tm_auth.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426497373","text":"import csv\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import brown\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n#manually set intensity of often-used adverbs\ni1 = ['very','highly','truly','really','quite']\ni2 = ['extremely','exceedingly','super','passing','remarkably']\n\n\n\nclass Capture: #Object for saving intensity-adverb, and modified wordm and it's position relative to adverb\n def __init__(self,adverb,position,word):\n self.adverb = adverb\n self.position = position\n self.word = word\n\nclass Store: #bject for Store the Captures\n def __init__(self):\n self.adjective = {}\n self.adverb = {}\n self.verb = {}\n\ndef is_be_verb(syn): # check wheter it's original form is \"be\" or not\n if 'be' is WordNetLemmatizer().lemmatize(syn,'v'):\n return True\n else :\n return False\n\n\ndef is_sign(syn): #check whether it is symbol or not\n if syn in [',','.','``']:\n return True\n else:\n return False\n\n\ndef store_word(store,sentence,current,position,intensity): #handle for three cases and pass it to \"save\" function\n target = sentence[current+position]\n syns = wn.synsets(target)\n if position == -1 and current >1 and is_be_verb(sentence[current-2]): # for the case like \"he is praise highly\" , \"be\" is located two step previous than adverb\n for s in syns:\n if not is_sign(target) and not s.pos() == 'n':\n save(store,s,Capture(sentence[current],position,target),intensity)\n\n elif position == 1: # for the next word\n if sentence[current+position] in ['being','been''doing'] and current+20.7:\n if dict[syn1][1]>dict[syn2][1]: #make triples\n result.append(make_triple(cap2.word,cap1))\n else:\n result.append(make_triple(cap1.word,cap2))\n\ndef make_triple(cap_strong,cap_weak): #make triple for outuput\n if cap_weak.position == -1:\n return (cap_strong,cap_weak.word,cap_weak.adverb)\n else:\n return (cap_strong,cap_weak.adverb,cap_weak.word)\n\nresult = []\ncompare_freq(result,store.adverb)\ncompare_freq(result,store.adjective)\ncompare_freq(result,store.verb)\n\n\nf = open('CS372_HW1_output_20170221.csv','w',newline='')\nwr = csv.writer(f)\nwr.writerow(result[:50])\nf.close()\n\n\n","sub_path":"CS372_HW1_code_20170221.py","file_name":"CS372_HW1_code_20170221.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"485103281","text":"from collections import deque\nclass Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n queue = deque([(i, j) for i in range(len(mat)) for j in range(len(mat[0])) if mat[i][j] == 0])\n neighbours = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n visited = set()\n dist = 0\n \n while queue:\n size = len(queue)\n for _ in range(size):\n row, col = queue.popleft()\n mat[row][col] = dist\n for neighbour in neighbours:\n new_row = row + neighbour[0]\n new_col = col + neighbour[1]\n if 0 <= new_row < len(mat) and \\\n 0 <= new_col < len(mat[0]) and \\\n (new_row, new_col) not in visited and \\\n mat[new_row][new_col] != 0:\n queue.append((new_row, new_col))\n visited.add((new_row, new_col))\n dist += 1\n \n return mat\n","sub_path":"DailyInterviewQuestions/01Matrix.py","file_name":"01Matrix.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239026175","text":"from datetime import datetime\n\ndef date_time(time: str) -> str:\n dt = datetime.strptime(time, '%d.%m.%Y %H:%M')\n if dt.hour == 1:\n h = 'hour'\n else:\n h = 'hours'\n if dt.minute == 1:\n m = 'minute'\n else:\n m = 'minutes'\n return dt.strftime(f'{dt.day} %B %Y year {dt.hour} {h} {dt.minute} {m}')\n\n\nif __name__ == \"__main__\":\n print(\"Example:\")\n print(date_time(\"01.01.2000 00:00\"))\n\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert (\n date_time(\"01.01.2000 00:00\") == \"1 January 2000 year 0 hours 0 minutes\"\n ), \"Millenium\"\n assert (\n date_time(\"09.05.1945 06:30\") == \"9 May 1945 year 6 hours 30 minutes\"\n ), \"Victory\"\n assert (\n date_time(\"20.11.1990 03:55\") == \"20 November 1990 year 3 hours 55 minutes\"\n ), \"Somebody was born\"\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"Date and Time Converter/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"201688181","text":"\n\n#calss header\nclass _AWKWARD():\n\tdef __init__(self,): \n\t\tself.name = \"AWKWARD\"\n\t\tself.definitions = [u'difficult to use, do, or deal with: ', u'causing problems, worry, or embarrassment: ', u'embarrassed or nervous: ', u'intentionally not helpful: ', u'moving in a way that is not natural, relaxed, or attractive: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_awkward.py","file_name":"_awkward.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647808817","text":"# -*- coding: utf-8 -*-\n\nfrom ..utils import load_yaml_presets\n\n# User role\nUSER = 0\nADMIN = 1\nUSER_ROLE = {\n ADMIN: 'admin',\n USER: 'user',\n}\n\n# User status\nINACTIVE = 0\nACTIVE = 1\nUSER_STATUS = {\n INACTIVE: 'inactive',\n ACTIVE: 'active',\n}\n\n# Resource types\nRESOURCE_TYPES = {\n 0: ('forks', \"Forks the source of\"),\n 1: ('proves', \"Demo or example of\"),\n 2: ('permits', \"License or terms applied\"),\n 3: ('includes', \"Includes this library or resource\"),\n 4: ('uses data', \"Uses this data source\"),\n 5: ('built with', \"Uses this hardware component\"),\n 6: ('inspired by', \"Is inspired by this\"),\n}\n\n\ndef resourceTypeList(verbose=False):\n vb = 1 if verbose else 0\n pl = [(g, RESOURCE_TYPES[g][vb]) for g in RESOURCE_TYPES]\n return sorted(pl, key=lambda x: x[0])\n\n\ndef getResourceType(resource, verbose=False):\n vb = 1 if verbose else 0\n if resource is None:\n return \"\"\n if resource.type_id is None:\n return RESOURCE_TYPES[0][vb]\n if resource.type_id not in RESOURCE_TYPES:\n return \"\"\n return RESOURCE_TYPES[resource.type_id][vb]\n\n\n# Project progress stages\nproject_stages = load_yaml_presets('stages', 'name')\nPR_CHALLENGE = int(project_stages['CHALLENGE']['id'])\nPROJECT_PROGRESS = {}\nPROJECT_PROGRESS_STAGE = {}\nfor ps in project_stages:\n pid = int(project_stages[ps]['id'])\n PROJECT_PROGRESS[pid] = project_stages[ps]['description']\n PROJECT_PROGRESS_STAGE[pid] = project_stages[ps]\n\n\ndef projectProgressList(All=True, WithEmpty=True):\n if not All:\n return [(PR_CHALLENGE, PROJECT_PROGRESS[PR_CHALLENGE])]\n pl = [(g, PROJECT_PROGRESS[g]) for g in PROJECT_PROGRESS]\n if WithEmpty:\n pl.append((-100, ''))\n return sorted(pl, key=lambda x: x[0])\n\n\ndef getProjectStages():\n pl = []\n for ix, g in enumerate(sorted(PROJECT_PROGRESS)):\n stage = PROJECT_PROGRESS_STAGE[g]\n stage['index'] = ix + 1\n pl.append(stage)\n return pl\n\n\ndef getProjectPhase(project):\n if project is None or project.progress is None:\n return \"\"\n if project.progress not in PROJECT_PROGRESS_STAGE:\n return PROJECT_PROGRESS_STAGE[PR_CHALLENGE]['phase']\n return PROJECT_PROGRESS_STAGE[project.progress]['phase']\n\n\ndef getStageByProgress(progress):\n if progress is None:\n return None\n if progress not in PROJECT_PROGRESS_STAGE:\n return PROJECT_PROGRESS_STAGE[PR_CHALLENGE]\n return PROJECT_PROGRESS_STAGE[progress]\n\n\ndef isUserActive(user):\n if not user or 'active' not in user.__dir__():\n return False\n return user.active\n\n\ndef validateProjectData(project):\n stage = project.stage\n all_valid = True\n # Collect project data\n project_data = project.data\n # Iterate through the stage conditions\n for v in stage['conditions']['validate']:\n v['valid'] = False\n vf = v['field']\n if vf in project_data:\n pdvf = project_data[vf]\n if (\n ('min' in v and len(pdvf) >= v['min'])\n or ('max' in v and v['min'] <= len(pdvf) <= v['max'])\n or (\n 'test' in v and v['test'] == 'validurl'\n and pdvf.startswith('http')\n )\n ):\n v['valid'] = True\n if not v['valid']:\n all_valid = False\n return stage, all_valid\n\n\ndef getActivityByType(a):\n \"\"\" Returns Activity item representated as a tuple \"\"\"\n author = title = text = icon = None\n # Obtain author if available\n if a.user:\n author = a.user.username\n if not a.user.active:\n return None\n else:\n author = \"?\"\n # Based on action, populate activity fields\n if a.action == 'sync':\n # title = \"Synchronized\"\n text = \"Readme updated\"\n icon = 'taxi'\n elif a.action == 'post' and a.name == 'review':\n text = a.content\n icon = 'comment'\n elif a.action == 'post' and a.content is not None:\n text = a.content\n icon = 'pencil'\n elif a.name == 'star':\n # title = \"Team forming\"\n text = \"Joined the team\"\n icon = 'thumbs-up'\n elif a.name == 'update' and a.action == 'commit':\n # title = \"Code commit\"\n text = a.content\n author = None\n icon = 'random'\n elif a.name == 'update':\n text = \"Worked on the pitch\"\n icon = 'paperclip'\n elif a.name == 'create':\n text = \"Challenge started\"\n icon = 'rocket'\n elif a.name == 'boost':\n title = a.action\n text = a.content\n icon = 'trophy'\n else:\n return None\n return (author, title, text, icon)\n","sub_path":"dribdat/user/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542231754","text":"# -*-coding:gbk*-\n'''\n请用已学过的知识编写程序,找出小甲鱼藏在下边这个长字符串中的密码,\\\n密码的埋藏点符合以下规律:\na) 每位密码为单个小写字母\nb) 每位密码的左右两边均有且只有三个大写字母qH\n'''\nfile1 = open('D:/string2.txt')\nstr1 = file1.read()\n\n\ncountA = 0 # 统计前边的大写字母\ncountB = 0 # 统计小写字母\ncountC = 0 # 统计后边的大写字母\nlength = len(str1)\n\nfor i in range(length):\n if str1[i] == '\\n':\n continue\n\n \"\"\"\n |如果str1[i]是大写字母:\n |-- 如果已经出现小写字母:\n |-- -- 统计后边的大写字母\n |-- 如果未出现小写字母:\n |-- -- 清空后边大写字母的统计\n |-- -- 统计前边的大写字母\n \"\"\"\n if str1[i].isupper():\n if countB:\n countC += 1\n else:\n countC = 0\n countA += 1\n\n \"\"\"\n |如果str1[i]是小写字母:\n |-- 如果小写字母前边不是三个大写字母(不符合条件):\n |-- -- 清空所有记录,重新统计\n |-- 如果小写字母前边是三个大写字母(符合条件):\n |-- -- 如果已经存在小写字母:\n |-- -- -- 清空所有记录,重新统计(出现两个小写字母)\n |-- -- 如果该小写字母是唯一的:\n |-- -- -- countB记录出现小写字母,准备开始统计countC\n \"\"\"\n if str1[i].islower():\n if countA != 3:\n countA = 0\n countB = 0\n countC = 0\n else: \n if countB:\n countA = 0\n countB = 0\n countC = 0\n else:\n countB = 1\n countC = 0\n target = i\n\n \"\"\"\n |如果前边和后边都是三个大写字母:\n |-- 如果后边第四个字母也是大写字母(不符合条件):\n |-- -- 清空记录B和C,重新统计\n |-- 如果后边仅有三个大写字母(符合所有条件):\n |-- -- 打印结果,并清空所有记录,进入下一轮统计\n \"\"\"\n if countA == 3 and countC == 3:\n if i+1 != length and str1[i+1].isupper():\n countB = 0\n countC = 0\n else:\n print(str1[target], end='')\n countA = 3\n countB = 0\n countC = 0","sub_path":"python file/python不方便Eclipse在运行的脚本/学习代码/第20章练习2.py","file_name":"第20章练习2.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"317636079","text":"#\n# @lc app=leetcode.cn id=110 lang=python3\n#\n# [110] 平衡二叉树\n#\n# https://leetcode-cn.com/problems/balanced-binary-tree/description/\n#\n# algorithms\n# Easy (45.79%)\n# Total Accepted: 9.4K\n# Total Submissions: 20.4K\n# Testcase Example: '[3,9,20,null,null,15,7]'\n#\n# 给定一个二叉树,判断它是否是高度平衡的二叉树。\n#\n# 本题中,一棵高度平衡二叉树定义为:\n#\n#\n# 一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过1。\n#\n#\n# 示例 1:\n#\n# 给定二叉树 [3,9,20,null,null,15,7]\n#\n# ⁠ 3\n# ⁠ / \\\n# ⁠ 9 20\n# ⁠ / \\\n# ⁠ 15 7\n#\n# 返回 true 。\n#\n# 示例 2:\n#\n# 给定二叉树 [1,2,2,3,3,null,null,4,4]\n#\n# ⁠ 1\n# ⁠ / \\\n# ⁠ 2 2\n# ⁠ / \\\n# ⁠ 3 3\n# ⁠ / \\\n# ⁠4 4\n#\n#\n# 返回 false 。\n#\n#\n# Definition for a binary tree node.\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def isBalanced(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n if -1 == self.getDepth(root):\n return False\n else:\n return True\n\n def getDepth(self, node):\n if node == None:\n return 0\n\n leftDepth = self.getDepth(node.left)\n rightDepth = self.getDepth(node.right)\n\n if leftDepth == -1 or rightDepth == -1:\n return -1\n elif abs(leftDepth - rightDepth) > 1:\n return -1\n else:\n return max(leftDepth, rightDepth) + 1\n\n\n# arr = [3, 9, 20, None, None, 15, 7]\n# root = TreeNode(3)\n# root.left = TreeNode(9)\n# root.right = TreeNode(20)\n# root.right.left = TreeNode(15)\n# root.right.right = TreeNode(7)\n\n\n# s = Solution()\n\n# print(s.isBalanced(root))\n","sub_path":"110.balanced-binary-tree.py","file_name":"110.balanced-binary-tree.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224210881","text":"#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-array/blob/master/LICENSE\n\nimport operator\nimport math\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\n\nimport numpy\nimport numba\nimport numba.typing.arraydecl\n\nimport awkward.array.base\nimport awkward.array.jagged\nfrom .base import NumbaMethods\nfrom .base import AwkwardArrayType\nfrom .base import clsrepr\nfrom .base import ISADVANCED\nfrom .base import NOTADVANCED\nfrom .base import sliceval2\nfrom .base import sliceval3\n\n######################################################################## Numba-accelerated interface\n\nclass JaggedArrayNumba(NumbaMethods, awkward.array.jagged.JaggedArray):\n # @classmethod\n # def offsetsaliased(cls, starts, stops):\n ### base implementation is fine and don't need in Numba\n\n @classmethod\n def counts2offsets(cls, counts):\n return cls._counts2offsets(counts)\n\n @staticmethod\n @numba.njit\n def _counts2offsets(counts):\n offsets = numpy.empty(len(counts) + 1, dtype=numpy.int64)\n offsets[0] = 0\n for i in range(len(counts)):\n offsets[i + 1] = offsets[i] + counts[i]\n return offsets\n\n @classmethod\n def offsets2parents(cls, offsets):\n return cls._offsets2parents(offsets)\n\n @staticmethod\n @numba.njit\n def _offsets2parents(offsets):\n if len(offsets) == 0:\n raise ValueError(\"offsets must have at least one element\")\n parents = numpy.empty(offsets[-1], dtype=numpy.int64)\n j = 0\n k = -1\n for i in offsets:\n while j < i:\n parents[j] = k\n j += 1\n k += 1\n return parents\n\n @classmethod\n def startsstops2parents(cls, starts, stops):\n return cls._startsstops2parents(starts, stops)\n\n @staticmethod\n @numba.njit\n def _startsstops2parents(starts, stops):\n out = numpy.full(stops.max(), -1, numpy.int64)\n for i in range(len(starts)):\n out[starts[i]:stops[i]] = i\n return out\n\n @classmethod\n def parents2startsstops(cls, parents, length=None):\n if length is None:\n length = parents.max() + 1\n return cls._parents2startsstops(parents, length)\n\n @staticmethod\n @numba.njit\n def _parents2startsstops(parents, length):\n starts = numpy.zeros(length, numpy.int64)\n stops = numpy.zeros(length, numpy.int64)\n\n last = -1\n for k in range(len(parents)):\n this = parents[k]\n if last != this:\n if last >= 0 and last < length:\n stops[last] = k\n if this >= 0 and this < length:\n starts[this] = k\n last = this\n\n if last != -1:\n stops[last] = len(parents)\n\n return starts, stops\n\n # @classmethod\n # def uniques2offsetsparents(cls, uniques):\n ### base implementation is fine and don't need in Numba\n\n # def __init__(self, starts, stops, content):\n ### base implementation is fine and already exposed in Numba\n\n @classmethod\n def fromiter(cls, iterable):\n import awkward.numba\n return awkward.numba.fromiter(iterable)\n\n # @classmethod\n # def fromoffsets(cls, offsets, content):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromcounts(cls, counts, content):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromparents(cls, parents, content, length=None):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromuniques(cls, uniques, content):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromindex(cls, index, content, validate=True):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromjagged(cls, jagged):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromregular(cls, regular):\n ### base implementation is fine and don't need in Numba\n\n # @classmethod\n # def fromfolding(cls, content, size):\n ### base implementation is fine and don't need in Numba\n\n # def copy(self, starts=None, stops=None, content=None):\n ### base implementation is fine and don't need in Numba\n\n # def deepcopy(self, starts=None, stops=None, content=None):\n ### base implementation is fine and don't need in Numba\n\n # def empty_like(self, **overrides):\n ### base implementation is fine and don't need in Numba\n\n # def zeros_like(self, **overrides):\n ### base implementation is fine and don't need in Numba\n\n # def ones_like(self, **overrides):\n ### base implementation is fine and don't need in Numba\n\n # def __awkward_persist__(self, ident, fill, prefix, suffix, schemasuffix, storage, compression, **kwargs):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def starts(self):\n ### base implementation is fine and already exposed in Numba\n\n # @starts.setter\n # def starts(self, value):\n ### base implementation is fine and don't need in Numba\n \n # @property\n # def stops(self):\n ### base implementation is fine and already exposed in Numba\n\n # @stops.setter\n # def stops(self, value):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def content(self):\n ### base implementation is fine and already exposed in Numba\n\n # @content.setter\n # def content(self, value):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def offsets(self):\n ### base implementation is fine and already exposed in Numba\n\n # @offsets.setter\n # def offsets(self, value):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def counts(self):\n ### base implementation is fine and already exposed in Numba\n\n # @counts.setter\n # def counts(self, value):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def parents(self):\n ### base implementation is fine and already exposed in Numba\n\n # @parents.setter\n # def parents(self, value):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def index(self):\n ### base implementation is fine and already exposed in Numba\n\n # def __len__(self):\n ### base implementation is fine and already exposed in Numba\n\n # def _gettype(self, seen):\n ### base implementation is fine and don't need in Numba\n\n def _valid(self):\n pass # do validation in place from now on\n\n # @staticmethod\n # def _validstartsstops(starts, stops):\n ### base implementation is fine and don't need in Numba\n\n # def __iter__(self, checkiter=True):\n ### base implementation is fine and already exposed in Numba\n\n def __getitem__(self, where):\n if not isinstance(where, tuple):\n where = (where,)\n if len(where) == 0:\n return self\n\n newwhere = ()\n for x in where:\n if isinstance(x, Iterable) and not isinstance(x, (numpy.ndarray, awkward.array.base.AwkwardArray)):\n newwhere = newwhere + (numpy.array(x),)\n else:\n newwhere = newwhere + (x,)\n\n if len(newwhere) == 1:\n newwhere = newwhere[0]\n \n return self._getitem_impl(newwhere)\n\n @numba.njit\n def _getitem_impl(self, newwhere):\n return self[newwhere]\n\n # def __setitem__(self, where, what):\n\n @numba.generated_jit(nopython=True)\n def tojagged(self, data):\n assert not isinstance(data, JaggedArrayType)\n\n if isinstance(data, AwkwardArrayType):\n def impl(self, data):\n if len(self.starts) != len(data):\n raise ValueError(\"cannot broadcast AwkwardArray to match JaggedArray with a different length\")\n if len(self.starts.shape) != 1:\n raise ValueError(\"cannot broadcast AwkwardArray to match JaggedArray that has len(starts.shape) != 1; call jagged.structure1d() first\")\n index = numpy.empty(len(self.content), numpy.int64)\n for i in range(len(self.starts)):\n index[self.starts[i]:self.stops[i]] = i\n return _JaggedArray_new(self, self.starts, self.stops, data[index], self.iscompact)\n return impl\n\n elif isinstance(data, numba.types.Array):\n def impl(self, data):\n if self.starts.shape != data.shape:\n raise ValueError(\"cannot broadcast Numpy array to match a JaggedArray with a different length (or more generally, starts.shape)\")\n content = numpy.empty(len(self.content), data.dtype)\n flatstarts = self.starts.reshape(-1)\n flatstops = self.stops.reshape(-1)\n flatdata = data.reshape(-1)\n for i in range(len(flatstarts)):\n content[flatstarts[i]:flatstops[i]] = flatdata[i]\n return _JaggedArray_new(self, self.starts, self.stops, content, self.iscompact)\n return impl\n\n else:\n def impl(self, data):\n content = numpy.full(len(self.content), data)\n return _JaggedArray_new(self, self.starts, self.stops, content, self.iscompact)\n return impl\n\n # def _tojagged(self, starts=None, stops=None, copy=True):\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if \"out\" in kwargs:\n raise NotImplementedError(\"in-place operations not supported\")\n if method != \"__call__\":\n return NotImplemented\n\n first = None\n inputs = list(inputs)\n for i in range(len(inputs)):\n if isinstance(inputs[i], awkward.array.jagged.JaggedArray):\n inputs[i] = inputs[i].compact()\n shift = inputs[i].starts[0]\n if shift != 0:\n inputs[i] = inputs[i].copy(inputs[i].starts - shift, inputs[i].stops - shift, inputs[i].content[shift:])\n if first is None:\n first = inputs[i]\n elif first.starts[0] != inputs[i].starts[0] or not numpy.array_equal(first.stops, inputs[i].stops):\n raise ValueError(\"JaggedArrays in Numpy ufunc have incompatible structure\")\n\n assert first is not None\n\n for i in range(len(inputs)):\n if isinstance(inputs[i], awkward.array.jagged.JaggedArray):\n pass\n elif isinstance(inputs[i], awkward.array.base.AwkwardArray):\n inputs[i] = first.tojagged(inputs[i])\n elif isinstance(inputs[i], Iterable):\n inputs[i] = first.tojagged(numpy.array(inputs[i], copy=False))\n else:\n inputs[i] = first.tojagged(inputs[i])\n\n for i in range(len(inputs)):\n inputs[i] = inputs[i]._content\n\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if isinstance(result, tuple):\n return tuple(self.Methods.maybemixin(type(x), self.JaggedArray)(first.starts, first.stops, x) if isinstance(x, (numpy.ndarray, awkward.array.base.AwkwardArray)) else x for x in result)\n else:\n return self.Methods.maybemixin(type(result), self.JaggedArray)(first.starts, first.stops, result)\n\n @numba.njit\n def regular(self):\n return self.regular()\n\n ### FIXME: this whole section can't be done until we have Tables\n\n # def _argpairs(self):\n\n # def _argdistincts(self, absolute):\n\n # def argdistincts(self, nested=False):\n\n # def distincts(self, nested=False):\n\n # def argpairs(self, nested=False):\n\n # def pairs(self, nested=False):\n\n # def _argcross(self, other):\n\n # def argcross(self, other, nested=False):\n\n # def cross(self, other, nested=False):\n\n # def _canuseoffset(self):\n ### base implementation is fine and don't need in Numba\n\n # @property\n # def iscompact(self):\n ### base implementation is fine and already exposed in Numba\n\n @numba.njit\n def compact(self):\n return self.compact()\n\n def flatten(self, axis=0):\n if not self._util_isinteger(axis) or axis < 0:\n raise TypeError(\"axis must be a non-negative integer (can't count from the end)\")\n if axis > 0:\n if isinstance(self._content, JaggedArray):\n counts = self.JaggedArray.fromcounts(self.counts, self._content.counts).sum()\n return self.JaggedArray.fromcounts(counts, self._content.flatten(axis=axis - 1))\n\n if len(self) == 0:\n return self._content[0:0]\n elif self.iscompact:\n return self._content[self._starts[0]:self.stops[-1]] # no underscore in stops\n else:\n out = self.compact()\n return out._content[out._starts[0]:out.stops[0]] # no underscore in stops\n\n # def structure1d(self, levellimit=None):\n ### base implementation is fine and can't(?) be exposed in Numba (type manipulation is hard!)\n\n # def _hasjagged(self):\n ### base implementation is fine\n\n # def _reduce(self, ufunc, identity, dtype):\n\n @numba.njit\n def argmin(self):\n return self.argmin()\n\n @numba.njit\n def argmax(self):\n return self.argmax()\n\n def _argminmax(self, ismin):\n raise RuntimeError(\"helper function not needed in JaggedArrayNumba\")\n\n def _argminmax_general(self, ismin):\n raise RuntimeError(\"helper function not needed in JaggedArrayNumba\")\n\n # def _concatenate_axis0(isclassmethod, cls_or_self, arrays):\n # if isinstance(arrays, (numpy.ndarray, awkward.array.base.AwkwardArray)):\n # arrays = (arrays,)\n # else:\n # arrays = tuple(arrays)\n # if isclassmethod:\n # cls = cls_or_self\n # if not all(isinstance(x, awkward.array.jagged.JaggedArray) for x in arrays):\n # raise TypeError(\"cannot concatenate non-JaggedArrays with JaggedArray.concatenate\")\n # else:\n # self = cls_or_self\n # cls = self.__class__\n # if not isinstance(self, awkward.array.jagged.JaggedArray) or not all(isinstance(x, awkward.array.jagged.JaggedArray) for x in arrays):\n # raise TypeError(\"cannot concatenate non-JaggedArrays with JaggedArray.concatenate\")\n # arrays = (self,) + arrays\n # if len(arrays) == 0:\n # raise TypeError(\"concatenate requires at least one array\")\n # return _JaggedArray_concatenate_njit(arrays, axis)\n ### FIXME: left unfinished\n\n # @awkward.util.bothmethod\n # def zip(isclassmethod, cls_or_self, columns1={}, *columns2, **columns3):\n ### FIXME: can't do this one until we have Tables\n\n # def pad(self, length, maskedwhen=True, clip=False):\n\n######################################################################## register types in Numba\n\n@numba.extending.typeof_impl.register(awkward.array.jagged.JaggedArray)\ndef _JaggedArray_typeof(val, c):\n return JaggedArrayType(numba.typeof(val.starts), numba.typeof(val.stops), numba.typeof(val.content), special=type(val))\n\nclass JaggedArrayType(AwkwardArrayType):\n def __init__(self, startstype, stopstype, contenttype, special=awkward.array.jagged.JaggedArray):\n super(JaggedArrayType, self).__init__(name=\"JaggedArrayType({0}, {1}, {2}{3})\".format(startstype.name, stopstype.name, contenttype.name, \"\" if special is awkward.array.jagged.JaggedArray else clsrepr(special)))\n if startstype.ndim != stopstype.ndim:\n raise ValueError(\"JaggedArray.starts must have the same number of dimensions as JaggedArray.stops\")\n if startstype.ndim == 0:\n raise ValueError(\"JaggedArray.starts and JaggedArray.stops must have at least one dimension\")\n self.startstype = startstype\n self.stopstype = stopstype\n self.contenttype = contenttype\n self.special = special\n\n def getitem(self, wheretype):\n if self.startstype.ndim > 1 and not any(isinstance(x, (numba.types.Array, JaggedArrayType)) for x in wheretype.types[:self.startstype.ndim]):\n headtype = numba.types.Tuple(wheretype.types[:self.startstype.ndim])\n tailtype = numba.types.Tuple(wheretype.types[self.startstype.ndim:])\n\n outstartstype = numba.typing.arraydecl.get_array_index_type(self.startstype, headtype).result\n outstopstype = numba.typing.arraydecl.get_array_index_type(self.stopstype, headtype).result\n if isinstance(self.contenttype, JaggedArrayType):\n outcontenttype = self.contenttype.getitem(tailtype)\n else:\n outcontenttype = numba.typing.arraydecl.get_array_index_type(self.contenttype, tailtype).result\n\n assert isinstance(outstartstype, numba.types.Array) == isinstance(outstopstype, numba.types.Array)\n if isinstance(outstartstype, numba.types.Array):\n return JaggedArrayType(outstartstype, outstopstype, outcontenttype, special=self.special)\n else:\n return outcontenttype\n\n else:\n headtype = wheretype.types[0]\n tailtype = numba.types.Tuple(wheretype.types[1:])\n\n if isinstance(headtype, JaggedArrayType) and len(tailtype.types) == 0:\n return _JaggedArray_typer_getitem_jagged(self, headtype)\n\n else:\n fake = _JaggedArray_typer_getitem(JaggedArrayType(JaggedArrayNumba.NUMBA_INDEXTYPE[:], JaggedArrayNumba.NUMBA_INDEXTYPE[:], self), wheretype, NOTADVANCED)\n if isinstance(fake, numba.types.Array):\n return fake.dtype\n else:\n return fake.contenttype\n\n @property\n def len_impl(self):\n return _JaggedArray_lower_len\n\n @property\n def getitem_impl(self):\n return lambda context, builder, sig, args: _JaggedArray_lower_getitem_integer(context, builder, sig, args, checkvalid=False)\n\ndef _JaggedArray_typer_getitem_jagged(arraytype, headtype):\n if isinstance(headtype.contenttype, JaggedArrayType):\n if not isinstance(arraytype.contenttype, JaggedArrayType):\n raise TypeError(\"index (in square brackets) is more deeply jagged than array (before square brackets)\")\n contenttype = _JaggedArray_typer_getitem_jagged(arraytype.contenttype, headtype.contenttype)\n elif isinstance(headtype.contenttype, numba.types.Array) and headtype.contenttype.ndim == 1 and isinstance(headtype.contenttype.dtype, (numba.types.Boolean, numba.types.Integer)):\n contenttype = arraytype.contenttype\n else:\n raise TypeError(\"jagged indexing must be boolean or integers with 1-dimensional content\")\n\n return JaggedArrayType(arraytype.startstype, arraytype.stopstype, contenttype, special=arraytype.special)\n\ndef _JaggedArray_typer_getitem(arraytype, wheretype, advancedtype):\n if len(wheretype.types) == 0:\n return arraytype\n\n if arraytype.startstype.ndim != 1 or arraytype.stopstype.ndim != 1:\n raise NotImplementedError(\"multidimensional starts and stops not supported; call jagged.structure1d() first\")\n\n isarray = (isinstance(wheretype.types[0], numba.types.Array) and wheretype.types[0].ndim == 1)\n\n contenttype = _JaggedArray_typer_getitem(arraytype.contenttype, numba.types.Tuple(wheretype.types[1:]), ISADVANCED if isarray else advancedtype)\n\n if isinstance(wheretype.types[0], numba.types.Integer) or (isarray and advancedtype == ISADVANCED):\n return contenttype\n elif isinstance(wheretype.types[0], numba.types.SliceType) or (isarray and advancedtype == NOTADVANCED):\n return JaggedArrayType(arraytype.startstype, arraytype.stopstype, contenttype, special=arraytype.special)\n elif isinstance(wheretype.types[0], JaggedArrayType):\n raise TypeError(\"cannot use jagged indexing in a tuple\")\n else:\n raise TypeError(\"cannot be used for indexing: {0}\".format(wheretype.types[0]))\n\ndef _JaggedArray_getitem_next(array, where):\n pass\n\n@numba.extending.type_callable(_JaggedArray_getitem_next)\ndef _JaggedArray_type_getitem_next(context):\n return _JaggedArray_typer_getitem\n\n######################################################################## model and boxing\n\n@numba.extending.register_model(JaggedArrayType)\nclass JaggedArrayModel(numba.datamodel.models.StructModel):\n def __init__(self, dmm, fe_type):\n members = [(\"starts\", fe_type.startstype),\n (\"stops\", fe_type.stopstype),\n (\"content\", fe_type.contenttype),\n (\"iscompact\", JaggedArrayNumba.NUMBA_BOOLTYPE)]\n super(JaggedArrayModel, self).__init__(dmm, fe_type, members)\n\nnumba.extending.make_attribute_wrapper(JaggedArrayType, \"starts\", \"starts\")\nnumba.extending.make_attribute_wrapper(JaggedArrayType, \"stops\", \"stops\")\nnumba.extending.make_attribute_wrapper(JaggedArrayType, \"content\", \"content\")\nnumba.extending.make_attribute_wrapper(JaggedArrayType, \"iscompact\", \"iscompact\")\n\n@numba.extending.unbox(JaggedArrayType)\ndef _JaggedArray_unbox(typ, obj, c):\n starts_obj = c.pyapi.object_getattr_string(obj, \"starts\")\n stops_obj = c.pyapi.object_getattr_string(obj, \"stops\")\n content_obj = c.pyapi.object_getattr_string(obj, \"content\")\n iscompact_obj = c.pyapi.object_getattr_string(obj, \"iscompact\")\n\n array = numba.cgutils.create_struct_proxy(typ)(c.context, c.builder)\n array.starts = c.pyapi.to_native_value(typ.startstype, starts_obj).value\n array.stops = c.pyapi.to_native_value(typ.stopstype, stops_obj).value\n array.content = c.pyapi.to_native_value(typ.contenttype, content_obj).value\n array.iscompact = c.pyapi.to_native_value(JaggedArrayNumba.NUMBA_BOOLTYPE, iscompact_obj).value\n\n c.pyapi.decref(starts_obj)\n c.pyapi.decref(stops_obj)\n c.pyapi.decref(content_obj)\n c.pyapi.decref(iscompact_obj)\n\n is_error = numba.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())\n return numba.extending.NativeValue(array._getvalue(), is_error)\n\n@numba.extending.box(JaggedArrayType)\ndef _JaggedArray_box(typ, val, c):\n array = numba.cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n starts_obj = c.pyapi.from_native_value(typ.startstype, array.starts, c.env_manager)\n stops_obj = c.pyapi.from_native_value(typ.stopstype, array.stops, c.env_manager)\n content_obj = c.pyapi.from_native_value(typ.contenttype, array.content, c.env_manager)\n\n cls = c.pyapi.unserialize(c.pyapi.serialize_object(typ.special))\n out = c.pyapi.call_function_objargs(cls, (starts_obj, stops_obj, content_obj))\n\n c.pyapi.decref(starts_obj)\n c.pyapi.decref(stops_obj)\n c.pyapi.decref(content_obj)\n\n return out\n\n@numba.extending.type_callable(awkward.array.jagged.JaggedArray)\ndef _JaggedArray_type_init(context):\n def typer(startstype, stopstype, contenttype):\n if isinstance(startstype, numba.types.Array) and isinstance(stopstype, numba.types.Array) and isinstance(contenttype, (numba.types.Array, AwkwardArrayType)):\n return JaggedArrayType(startstype, stopstype, contenttype, special=awkward.array.jagged.JaggedArray)\n return typer\n\n@numba.extending.type_callable(JaggedArrayNumba)\ndef _JaggedArray_type_init(context):\n def typer(startstype, stopstype, contenttype):\n if isinstance(startstype, numba.types.Array) and isinstance(stopstype, numba.types.Array) and isinstance(contenttype, (numba.types.Array, AwkwardArrayType)):\n return JaggedArrayType(startstype, stopstype, contenttype, special=JaggedArrayNumba)\n return typer\n\n@numba.extending.lower_builtin(awkward.array.jagged.JaggedArray, numba.types.Array, numba.types.Array, numba.types.Array)\n@numba.extending.lower_builtin(awkward.array.jagged.JaggedArray, numba.types.Array, numba.types.Array, AwkwardArrayType)\n@numba.extending.lower_builtin(JaggedArrayNumba, numba.types.Array, numba.types.Array, numba.types.Array)\n@numba.extending.lower_builtin(JaggedArrayNumba, numba.types.Array, numba.types.Array, AwkwardArrayType)\ndef _JaggedArray_init_array(context, builder, sig, args):\n startstype, stopstype, contenttype = sig.args\n startsval, stopsval, contentval = args\n\n if context.enable_nrt:\n context.nrt.incref(builder, startstype, startsval)\n context.nrt.incref(builder, stopstype, stopsval)\n context.nrt.incref(builder, contenttype, contentval)\n\n array = numba.cgutils.create_struct_proxy(sig.return_type)(context, builder)\n array.starts = startsval\n array.stops = stopsval\n array.content = contentval\n array.iscompact = context.get_constant(JaggedArrayNumba.NUMBA_BOOLTYPE, False) # unless you reproduce that logic here or call out to Python\n return array._getvalue()\n\ndef _JaggedArray_new(array, starts, stops, content, iscompact):\n pass\n\n@numba.extending.type_callable(_JaggedArray_new)\ndef _JaggedArray_type_new(context):\n def typer(arraytype, startstype, stopstype, contenttype, iscompacttype):\n return JaggedArrayType(startstype, stopstype, contenttype, special=arraytype.special)\n return typer\n\n@numba.extending.lower_builtin(_JaggedArray_new, JaggedArrayType, numba.types.Array, numba.types.Array, numba.types.Array, numba.types.Boolean)\n@numba.extending.lower_builtin(_JaggedArray_new, JaggedArrayType, numba.types.Array, numba.types.Array, AwkwardArrayType, numba.types.Boolean)\ndef _JaggedArray_lower_new(context, builder, sig, args):\n arraytype, startstype, stopstype, contenttype, iscompacttype = sig.args\n arrayval, startsval, stopsval, contentval, iscompactval = args\n\n if context.enable_nrt:\n context.nrt.incref(builder, startstype, startsval)\n context.nrt.incref(builder, stopstype, stopsval)\n context.nrt.incref(builder, contenttype, contentval)\n\n array = numba.cgutils.create_struct_proxy(sig.return_type)(context, builder)\n array.starts = startsval\n array.stops = stopsval\n array.content = contentval\n array.iscompact = iscompactval\n return array._getvalue()\n\n######################################################################## utilities\n\ndef _check_startstop_contentlen(context, builder, starttype, startval, stoptype, stopval, contenttype, contentval):\n if isinstance(contenttype, numba.types.Array):\n contentlen = numba.targets.arrayobj.array_len(context, builder, numba.types.intp(contenttype), (contentval,))\n else:\n contentlen = _JaggedArray_lower_len(context, builder, numba.types.intp(contenttype), (contentval,))\n\n with builder.if_then(builder.or_(builder.or_(builder.icmp_signed(\"<\", startval, context.get_constant(starttype, 0)),\n builder.icmp_signed(\"<\", stopval, context.get_constant(stoptype, 0))),\n builder.or_(builder.icmp_signed(\">=\", startval, contentlen),\n builder.icmp_signed(\">\", stopval, contentlen))),\n likely=False):\n context.call_conv.return_user_exc(builder, ValueError, (\"JaggedArray.starts or JaggedArray.stops is beyond the range of JaggedArray.content\",))\n\n######################################################################## lowered len\n\n@numba.extending.lower_builtin(len, JaggedArrayType)\ndef _JaggedArray_lower_len(context, builder, sig, args):\n arraytype, = sig.args\n arrayval, = args\n\n array = numba.cgutils.create_struct_proxy(arraytype)(context, builder, value=arrayval)\n\n return numba.targets.arrayobj.array_len(context, builder, numba.types.intp(arraytype.startstype), (array.starts,))\n\n######################################################################## lowered getitem\n\n@numba.extending.lower_builtin(operator.getitem, JaggedArrayType, numba.types.Integer)\ndef _JaggedArray_lower_getitem_integer(context, builder, sig, args, checkvalid=True):\n arraytype, wheretype = sig.args\n arrayval, whereval = args\n\n array = numba.cgutils.create_struct_proxy(arraytype)(context, builder, value=arrayval)\n\n startstype = arraytype.startstype\n stopstype = arraytype.stopstype\n contenttype = arraytype.contenttype\n\n if startstype.ndim == 1:\n start = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, startstype.dtype(startstype, wheretype), (array.starts, whereval))\n stop = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, stopstype.dtype(stopstype, wheretype), (array.stops, whereval))\n\n if checkvalid:\n _check_startstop_contentlen(context, builder, startstype.dtype, start, stopstype.dtype, stop, contenttype, array.content)\n\n if isinstance(contenttype, numba.types.Array):\n return numba.targets.arrayobj.getitem_arraynd_intp(context, builder, contenttype(contenttype, numba.types.slice2_type), (array.content, sliceval2(context, builder, start, stop)))\n else:\n return _JaggedArray_lower_getitem_slice(context, builder, contenttype(contenttype, numba.types.slice2_type), (array.content, sliceval2(context, builder, start, stop)))\n\n else:\n outstartstype = numba.types.Array(startstype.dtype, startstype.ndim - 1, startstype.layout)\n outstopstype = numba.types.Array(stopstype.dtype, stopstype.ndim - 1, stopstype.layout)\n\n starts = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, outstartstype(startstype, wheretype), (array.starts, whereval))\n stops = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, outstopstype(stopstype, wheretype), (array.stops, whereval))\n\n outtype = JaggedArrayType(outstartstype, outstopstype, contenttype, special=arraytype.special)\n return _JaggedArray_lower_new(context, builder, outtype(arraytype, outstartstype, outstopstype, contenttype, JaggedArrayNumba.NUMBA_BOOLTYPE), (arrayval, starts, stops, array.content, array.iscompact))\n\n@numba.extending.lower_builtin(operator.getitem, JaggedArrayType, numba.types.SliceType)\ndef _JaggedArray_lower_getitem_slice(context, builder, sig, args):\n arraytype, wheretype = sig.args\n arrayval, whereval = args\n\n array = numba.cgutils.create_struct_proxy(arraytype)(context, builder, value=arrayval)\n\n startstype = arraytype.startstype\n starts = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, startstype(startstype, wheretype), (array.starts, whereval))\n\n stopstype = arraytype.stopstype\n stops = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, stopstype(stopstype, wheretype), (array.stops, whereval))\n\n slice = context.make_helper(builder, wheretype, value=whereval)\n iscompact = builder.and_(array.iscompact,\n builder.or_(builder.icmp_signed(\"==\", slice.step, context.get_constant(numba.types.intp, 1)),\n builder.icmp_signed(\"==\", slice.step, context.get_constant(numba.types.intp, numba.types.intp.maxval))))\n\n contenttype = arraytype.contenttype\n return _JaggedArray_lower_new(context, builder, arraytype(arraytype, startstype, stopstype, contenttype, JaggedArrayNumba.NUMBA_BOOLTYPE), (arrayval, starts, stops, array.content, iscompact))\n\n@numba.extending.lower_builtin(operator.getitem, JaggedArrayType, numba.types.Array)\ndef _JaggedArray_lower_getitem_array(context, builder, sig, args):\n arraytype, wheretype = sig.args\n arrayval, whereval = args\n\n array = numba.cgutils.create_struct_proxy(arraytype)(context, builder, value=arrayval)\n\n startstype = arraytype.startstype\n starts = numba.targets.arrayobj.fancy_getitem_array(context, builder, startstype(startstype, wheretype), (array.starts, whereval))\n\n stopstype = arraytype.stopstype\n stops = numba.targets.arrayobj.fancy_getitem_array(context, builder, stopstype(stopstype, wheretype), (array.stops, whereval))\n\n contenttype = arraytype.contenttype\n return _JaggedArray_lower_new(context, builder, arraytype(arraytype, startstype, stopstype, contenttype, JaggedArrayNumba.NUMBA_BOOLTYPE), (arrayval, starts, stops, array.content, context.get_constant(JaggedArrayNumba.NUMBA_BOOLTYPE, False)))\n\n@numba.extending.lower_builtin(operator.getitem, JaggedArrayType, JaggedArrayType)\ndef _JaggedArray_lower_getitem_jaggedarray(context, builder, sig, args):\n arraytype, wheretype = sig.args\n\n if isinstance(wheretype, JaggedArrayType) and isinstance(wheretype.contenttype, JaggedArrayType):\n def getitem(array, where):\n return _JaggedArray_new(array, array.starts, array.stops, array.content[where.content], True)\n\n elif isinstance(wheretype, JaggedArrayType) and isinstance(wheretype.contenttype.dtype, numba.types.Boolean) and isinstance(arraytype.contenttype, numba.types.Array):\n def getitem(array, where):\n if len(array) != len(where):\n raise IndexError(\"jagged index must have the same (outer) length as the JaggedArray it indexes\")\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n content = numpy.empty(where.content.astype(numpy.int64).sum(), array.content.dtype)\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n wherei = where[i]\n if len(wherei) > length:\n raise IndexError(\"jagged index is out of bounds in JaggedArray\")\n\n for j in range(len(wherei)):\n if wherei[j]:\n content[k] = array.content[array.starts[i] + j]\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n return _JaggedArray_new(array, starts, stops, content, True)\n\n elif isinstance(wheretype, JaggedArrayType) and isinstance(wheretype.contenttype.dtype, numba.types.Boolean):\n def getitem(array, where):\n if len(array) != len(where):\n raise IndexError(\"jagged index must have the same (outer) length as the JaggedArray it indexes\")\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n index = numpy.empty(len(where.content), numpy.int64)\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n wherei = where[i]\n if len(wherei) > length:\n raise IndexError(\"jagged index is out of bounds in JaggedArray\")\n\n for j in range(len(wherei)):\n if wherei[j]:\n index[k] = array.starts[i] + j\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n return _JaggedArray_new(array, starts, stops, array.content[index[:k]], True)\n\n elif isinstance(wheretype, JaggedArrayType) and isinstance(wheretype.contenttype.dtype, numba.types.Integer) and isinstance(arraytype.contenttype, numba.types.Array):\n def getitem(array, where):\n if len(array) != len(where):\n raise IndexError(\"jagged index must have the same (outer) length as the JaggedArray it indexes\")\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n content = numpy.empty(len(where.content), array.content.dtype)\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n wherei = where[i]\n\n for j in range(len(wherei)):\n norm = wherei[j]\n if norm < 0:\n norm += length\n if norm < 0 or norm >= length:\n raise IndexError(\"jagged index is out of bounds in JaggedArray\")\n content[k] = array.content[array.starts[i] + norm]\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n return _JaggedArray_new(array, starts, stops, content, True)\n\n elif isinstance(wheretype, JaggedArrayType) and isinstance(wheretype.contenttype.dtype, numba.types.Integer):\n def getitem(array, where):\n if len(array) != len(where):\n raise IndexError(\"jagged index must have the same (outer) length as the JaggedArray it indexes\")\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n index = numpy.empty(len(where.content), numpy.int64)\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n wherei = where[i]\n\n for j in range(len(wherei)):\n norm = wherei[j]\n if norm < 0:\n norm += length\n if norm < 0 or norm >= length:\n raise IndexError(\"jagged index is out of bounds in JaggedArray\")\n index[k] = array.starts[i] + norm\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n return _JaggedArray_new(array, starts, stops, array.content[index], True)\n\n else:\n raise AssertionError(where)\n\n return context.compile_internal(builder, getitem, sig, args)\n\n@numba.extending.lower_builtin(operator.getitem, JaggedArrayType, numba.types.BaseTuple)\ndef _JaggedArray_lower_getitem_enter(context, builder, sig, args):\n arraytype, wheretype = sig.args\n arrayval, whereval = args\n\n if len(wheretype.types) == 1:\n if isinstance(wheretype.types[0], numba.types.Integer):\n getitem = _JaggedArray_lower_getitem_integer\n elif isinstance(wheretype.types[0], numba.types.SliceType):\n getitem = _JaggedArray_lower_getitem_slice\n elif isinstance(wheretype.types[0], numba.types.Array):\n getitem = _JaggedArray_lower_getitem_array\n return getitem(context, builder, sig.return_type(arraytype, wheretype.types[0]), (arrayval, builder.extract_value(whereval, 0)))\n\n if any(isinstance(x, numba.types.Array) for x in wheretype.types):\n arraylen = numba.cgutils.alloca_once_value(builder, context.get_constant(JaggedArrayNumba.NUMBA_INDEXTYPE, 0))\n for i, whereitemtype in enumerate(wheretype.types):\n if isinstance(whereitemtype, numba.types.Array):\n if isinstance(whereitemtype.dtype, numba.types.Boolean):\n enter_arraylen = lambda whereitem, arraylen: max(arraylen, whereitem.astype(numpy.int64).sum())\n else:\n enter_arraylen = lambda whereitem, arraylen: max(arraylen, len(whereitem))\n\n whereitemval = builder.extract_value(whereval, i)\n arraylenval = context.compile_internal(builder, enter_arraylen, JaggedArrayNumba.NUMBA_INDEXTYPE(whereitemtype, JaggedArrayNumba.NUMBA_INDEXTYPE), (whereitemval, builder.load(arraylen)))\n builder.store(arraylenval, arraylen)\n\n arraylenval = builder.load(arraylen)\n newwheretype = []\n newwherevals = []\n for i, old in enumerate(wheretype.types):\n if isinstance(old, numba.types.Array) and isinstance(old.dtype, numba.types.Boolean):\n toadvanced = lambda whereitem, arraylen: numpy.where(whereitem)[0]\n elif isinstance(old, numba.types.Array):\n toadvanced = lambda whereitem, arraylen: numpy.full(arraylen, whereitem[0], numpy.int64) if len(whereitem) == 1 else whereitem\n elif isinstance(old, numba.types.Integer):\n toadvanced = lambda whereitem, arraylen: numpy.full(arraylen, whereitem, numpy.int64)\n else:\n toadvanced = None\n\n whereitemval = builder.extract_value(whereval, i)\n if toadvanced is None:\n newwheretype.append(old)\n newwherevals.append(whereitemval)\n else:\n new = numba.types.Array(JaggedArrayNumba.NUMBA_INDEXTYPE, 1, \"C\") if isinstance(old, (numba.types.Array, numba.types.Integer)) else old\n newwheretype.append(new)\n newwherevals.append(context.compile_internal(builder, toadvanced, new(old, JaggedArrayNumba.NUMBA_INDEXTYPE), (whereitemval, arraylenval)))\n\n wheretype = numba.types.Tuple(tuple(newwheretype))\n whereval = context.make_tuple(builder, wheretype, tuple(newwherevals))\n\n def fake1(array, where):\n return _JaggedArray_getitem_next(awkward.array.jagged.JaggedArray(numpy.array([0], numpy.int64), numpy.array([len(array)], numpy.int64), array), where, None)[0]\n\n def fake2(array, where):\n out = _JaggedArray_getitem_next(awkward.array.jagged.JaggedArray(numpy.array([0], numpy.int64), numpy.array([len(array)], numpy.int64), array), where, None)\n return out.content[out.starts[0]:out.stops[-1]]\n\n fake = fake1 if all(isinstance(x, numba.types.Integer) for x in wheretype.types) else fake2\n return context.compile_internal(builder, fake, sig.return_type(arraytype, wheretype), (arrayval, whereval))\n\n@numba.generated_jit(nopython=True)\ndef _JaggedArray_getitem_enter_toadvanced(whereitem, arraylen):\n if isinstance(whereitem, numba.types.Array) and isinstance(whereitem.dtype, numba.types.Boolean):\n return lambda whereitem, arraylen: numpy.nonzero(whereitem)[0]\n elif isinstance(whereitem, numba.types.Array):\n return lambda whereitem, arraylen: numpy.full(arraylen, whereitem[0], numpy.int64) if len(whereitem) == 1 else whereitem\n elif isinstance(whereitem, numba.types.Integer):\n return lambda whereitem, arraylen: numpy.full(arraylen, whereitem, numpy.int64)\n else:\n return lambda whereitem, arraylen: whereitem\n\n@numba.extending.lower_builtin(_JaggedArray_getitem_next, numba.types.Array, numba.types.BaseTuple, numba.types.NoneType)\n@numba.extending.lower_builtin(_JaggedArray_getitem_next, JaggedArrayType, numba.types.BaseTuple, numba.types.NoneType)\n@numba.extending.lower_builtin(_JaggedArray_getitem_next, numba.types.Array, numba.types.BaseTuple, numba.types.Array)\n@numba.extending.lower_builtin(_JaggedArray_getitem_next, JaggedArrayType, numba.types.BaseTuple, numba.types.Array)\ndef _JaggedArray_lower_getitem_next(context, builder, sig, args):\n arraytype, wheretype, advancedtype = sig.args\n arrayval, whereval, advancedval = args\n\n if len(wheretype.types) == 0:\n if context.enable_nrt:\n context.nrt.incref(builder, arraytype, arrayval)\n return arrayval\n\n headtype = wheretype.types[0]\n tailtype = numba.types.Tuple(wheretype.types[1:])\n headval = numba.targets.tupleobj.static_getitem_tuple(context, builder, headtype(wheretype, JaggedArrayNumba.NUMBA_INDEXTYPE), (whereval, 0))\n tailval = numba.targets.tupleobj.static_getitem_tuple(context, builder, tailtype(wheretype, numba.types.slice2_type), (whereval, slice(1, None)))\n\n if isinstance(headtype, numba.types.Integer):\n if isinstance(arraytype.contenttype, numba.types.Array):\n def getitem(array, head, tail, advanced):\n content = numpy.empty(len(array.starts), array.content.dtype)\n for i in range(len(array.starts)):\n norm = head\n if norm < 0:\n norm += array.stops[i] - array.starts[i]\n j = array.starts[i] + norm\n if j >= array.stops[i]:\n raise ValueError(\"integer index is beyond the range of one of the JaggedArray starts/stops pairs\")\n content[i] = array.content[j]\n return _JaggedArray_getitem_next(content, tail, advanced)\n\n else:\n def getitem(array, head, tail, advanced):\n index = numpy.empty(len(array.starts), numpy.int64)\n for i in range(len(array.starts)):\n norm = head\n if norm < 0:\n norm += array.stops[i] - array.starts[i]\n j = array.starts[i] + norm\n if j >= array.stops[i]:\n raise ValueError(\"integer index is beyond the range of one of the JaggedArray starts/stops pairs\")\n index[i] = j\n return _JaggedArray_getitem_next(array.content[index], tail, advanced)\n\n elif isinstance(headtype, numba.types.SliceType) and headtype.members == 2 and advancedtype == NOTADVANCED and not any(isinstance(x, numba.types.Array) for x in tailtype):\n intp_maxval = numba.types.intp.maxval\n\n def getitem(array, head, tail, advanced):\n if (head.start == 0 or head.start == intp_maxval) and head.stop == intp_maxval:\n next = _JaggedArray_getitem_next(array.content, tail, advanced)\n return _JaggedArray_new(array, array.starts, array.stops, next, array.iscompact)\n\n starts = numpy.empty(len(array.starts), numpy.int64)\n stops = numpy.empty(len(array.starts), numpy.int64)\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n a = head.start\n b = head.stop\n\n if a == intp_maxval:\n a = 0\n elif a < 0:\n a += length\n if b == intp_maxval:\n b = length\n elif b < 0:\n b += length\n\n if b <= a:\n a = 0\n b = 0\n if a < 0:\n a = 0\n elif a > length:\n a = length\n if b < 0:\n b = 0\n elif b > length:\n b = length\n\n starts[i] = array.starts[i] + a\n stops[i] = array.starts[i] + b\n\n next = _JaggedArray_getitem_next(array.content, tail, advanced)\n return _JaggedArray_new(array, starts, stops, next, False)\n\n elif isinstance(headtype, numba.types.SliceType):\n intp_maxval = numba.types.intp.maxval\n\n def getitem(array, head, tail, advanced):\n if head.step == 0:\n raise ValueError(\"slice step cannot be zero\")\n\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n index = numpy.empty(len(array.content), numpy.int64)\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n a = head.start\n b = head.stop\n c = head.step\n if c == intp_maxval:\n c = 1\n\n if a == intp_maxval and c > 0:\n a = 0\n elif a == intp_maxval:\n a = length - 1\n elif a < 0:\n a += length\n\n if b == intp_maxval and c > 0:\n b = length\n elif b == intp_maxval:\n b = -1\n elif b < 0:\n b += length\n\n if c > 0:\n if b <= a:\n a = 0\n b = 0\n if a < 0:\n a = 0\n elif a > length:\n a = length\n if b < 0:\n b = 0\n elif b > length:\n b = length\n else:\n if a <= b:\n a = 0\n b = 0\n if a < -1:\n a = -1\n elif a >= length:\n a = length - 1\n if b < -1:\n b = -1\n elif b >= length:\n b = length - 1\n\n for j in range(a, b, c):\n index[k] = array.starts[i] + j\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n next = _JaggedArray_getitem_next(array.content[index[:k]], tail, _spread_advanced(starts, stops, advanced))\n return _JaggedArray_new(array, starts, stops, next, True)\n\n elif isinstance(headtype, numba.types.Array):\n if advancedtype == NOTADVANCED:\n def getitem(array, head, tail, advanced):\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n offsets[0] = 0\n index = numpy.empty(len(head)*len(array.starts), numpy.int64)\n nextadvanced = numpy.empty(len(index), numpy.int64)\n\n k = 0\n for i in range(len(array.starts)):\n length = array.stops[i] - array.starts[i]\n\n for j in range(len(head)):\n norm = head[j]\n if norm < 0:\n norm += length\n if norm < 0 or norm >= length:\n raise IndexError(\"advanced index is out of bounds in JaggedArray\")\n index[k] = array.starts[i] + norm\n nextadvanced[k] = j\n k += 1\n offsets[i + 1] = k\n\n starts = offsets[:-1]\n stops = offsets[1:]\n next = _JaggedArray_getitem_next(array.content[index], tail, nextadvanced)\n return _JaggedArray_new(array, starts, stops, next, True)\n\n else:\n def getitem(array, head, tail, advanced):\n index = numpy.empty(len(array.starts), numpy.int64)\n nextadvanced = numpy.empty(len(index), numpy.int64)\n\n for i in range(len(advanced)):\n length = array.stops[i] - array.starts[i]\n if advanced[i] >= len(head):\n raise IndexError(\"advanced index lengths do not match\")\n norm = head[advanced[i]]\n if norm < 0:\n norm += length\n if norm < 0 or norm >= length:\n raise IndexError(\"advanced index is out of bounds in JaggedArray\")\n index[i] = array.starts[i] + norm\n nextadvanced[i] = i\n\n next = _JaggedArray_getitem_next(array.content[index], tail, nextadvanced)\n return next\n\n else:\n raise AssertionError(head)\n\n sig = sig.return_type(arraytype, headtype, tailtype, advancedtype)\n args = (arrayval, headval, tailval, advancedval)\n return context.compile_internal(builder, getitem, sig, args)\n\n@numba.generated_jit(nopython=True)\ndef _spread_advanced(starts, stops, advanced):\n if isinstance(advanced, numba.types.NoneType):\n return lambda starts, stops, advanced: advanced\n else:\n def impl(starts, stops, advanced):\n counts = stops - starts\n nextadvanced = numpy.empty(counts.sum(), numpy.int64)\n k = 0\n for i in range(len(counts)):\n length = counts[i]\n nextadvanced[k : k + counts[i]] = advanced[i]\n k += length\n return nextadvanced\n return impl\n\n######################################################################## overloading ufuncs\n\n### See numba.typing.npydecl for typing, and then maybe lower as usual?\n\n### @numba.extending.lower_builtin(numpy.add, JaggedArrayType, JaggedArrayType)\n### @numba.extending.lower_builtin(numpy.add, JaggedArrayType, numba.types.Array)\n### @numba.extending.lower_builtin(numpy.add, numba.types.Array, JaggedArrayType)\n### ???\n\n\n\n\n\n\n######################################################################## other lowered methods in Numba, including reducers\n\n@numba.typing.templates.infer_getattr\nclass _JaggedArrayType_type_methods(numba.typing.templates.AttributeTemplate):\n key = JaggedArrayType\n\n def resolve_reducer(self, arraytype, args, kwargs, endtype):\n if len(args) == 0 and len(kwargs) == 0:\n if isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, JaggedArrayType):\n contenttype = self.resolve_reducer(arraytype.contenttype, args, kwargs, endtype)\n return JaggedArrayType(arraytype.startstype, arraytype.stopstype, contenttype.return_type, special=JaggedArrayNumba)()\n elif isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, numba.types.Array):\n if endtype is None:\n endtype = arraytype.contenttype.dtype\n return numba.types.Array(endtype, 1, arraytype.contenttype.layout)()\n\n @numba.typing.templates.bound_function(\"any\")\n def resolve_any(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, JaggedArrayNumba.NUMBA_BOOLTYPE)\n\n @numba.typing.templates.bound_function(\"all\")\n def resolve_all(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, JaggedArrayNumba.NUMBA_BOOLTYPE)\n\n @numba.typing.templates.bound_function(\"count\")\n def resolve_count(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, JaggedArrayNumba.NUMBA_INDEXTYPE)\n\n @numba.typing.templates.bound_function(\"count_nonzero\")\n def resolve_count_nonzero(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, JaggedArrayNumba.NUMBA_INDEXTYPE)\n\n @numba.typing.templates.bound_function(\"sum\")\n def resolve_sum(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, None)\n\n @numba.typing.templates.bound_function(\"prod\")\n def resolve_prod(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, None)\n\n @numba.typing.templates.bound_function(\"min\")\n def resolve_min(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, None)\n\n @numba.typing.templates.bound_function(\"max\")\n def resolve_max(self, arraytype, args, kwargs):\n return self.resolve_reducer(arraytype, args, kwargs, None)\n\n# @numba.typing.templates.bound_function(\"structure1d\")\n# def resolve_structure1d(self, arraytype, args, kwargs):\n# if len(args) == 0:\n# return arraytype()\n# elif len(args) == 1 and isinstance(args[0], numba.types.NoneType):\n# return arraytype(args[0])\n# elif len(args) == 1 and isinstance(args[0], numba.types.Integer):\n# return arraytype(args[0])\n\n# @numba.extending.lower_builtin(\"structure1d\", JaggedArrayType)\n# def _JaggedArray_lower_structure1d_1(context, builder, sig, args):\n# arraytype, = sig.args\n# arrayval, = args\n# return _JaggedArray_lower_structure1d_3(context, builder, sig.return_type(arraytype, JaggedArrayNumba.NUMBA_INDEXTYPE), (arrayval, context.get_constant(JaggedArrayNumba.NUMBA_INDEXTYPE, -1),))\n\ndef _JaggedArray_lower_reduce_descend(which, context, builder, sig, args):\n arraytype, = sig.args\n arrayval, = args\n array = numba.cgutils.create_struct_proxy(arraytype)(context, builder, value=arrayval)\n content = which(context, builder, sig.return_type.contenttype(arraytype.contenttype), (array.content,))\n return _JaggedArray_lower_new(context, builder, sig.return_type(arraytype, arraytype.startstype, arraytype.stopstype, sig.return_type.contenttype, JaggedArrayNumba.NUMBA_BOOLTYPE), (arrayval, array.starts, array.stops, content, array.iscompact))\n\n@numba.extending.lower_builtin(\"any\", JaggedArrayType)\ndef _JaggedArray_lower_any(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_any, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, numpy.bool_)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = False\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]) and array.content[j] != 0:\n flatout[i] = True\n break\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"all\", JaggedArrayType)\ndef _JaggedArray_lower_all(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_all, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, numpy.bool_)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = True\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]) and array.content[j] == 0:\n flatout[i] = False\n break\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"count\", JaggedArrayType)\ndef _JaggedArray_lower_count(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_count, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, numpy.int64)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = 0\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]):\n flatout[i] += 1\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"count_nonzero\", JaggedArrayType)\ndef _JaggedArray_lower_count_nonzero(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_count_nonzero, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, numpy.int64)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = 0\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]) and array.content[j] != 0:\n flatout[i] += 1\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"sum\", JaggedArrayType)\ndef _JaggedArray_lower_sum(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_sum, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, array.content.dtype)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = 0\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]):\n flatout[i] += array.content[j]\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"prod\", JaggedArrayType)\ndef _JaggedArray_lower_prod(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_prod, context, builder, sig, args)\n def run(array):\n out = numpy.empty(array.starts.shape, array.content.dtype)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = 1\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]):\n flatout[i] *= array.content[j]\n return out\n return context.compile_internal(builder, run, sig, args)\n\n@numba.extending.lower_builtin(\"min\", JaggedArrayType)\ndef _JaggedArray_lower_min(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_min, context, builder, sig, args)\n def run(array, identity):\n out = numpy.empty(array.starts.shape, array.content.dtype)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = identity\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]) and array.content[j] < flatout[i]:\n flatout[i] = array.content[j]\n return out\n datatype = sig.args[0].contenttype.dtype\n if isinstance(datatype, numba.types.Boolean):\n identity = True\n elif isinstance(datatype, numba.types.Integer):\n identity = datatype.maxval\n else:\n identity = numpy.inf\n datatype = numba.types.float64\n return context.compile_internal(builder, run, sig.return_type(sig.args[0], datatype), (args[0], context.get_constant(datatype, identity)))\n\n@numba.extending.lower_builtin(\"max\", JaggedArrayType)\ndef _JaggedArray_lower_max(context, builder, sig, args):\n if isinstance(sig.args[0].contenttype, JaggedArrayType):\n return _JaggedArray_lower_reduce_descend(_JaggedArray_lower_max, context, builder, sig, args)\n def run(array, identity):\n out = numpy.empty(array.starts.shape, array.content.dtype)\n flatout = out.reshape(-1)\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n for i in range(len(flatstarts)):\n flatout[i] = identity\n for j in range(flatstarts[i], flatstops[i]):\n if not math.isnan(array.content[j]) and array.content[j] > flatout[i]:\n flatout[i] = array.content[j]\n return out\n datatype = sig.args[0].contenttype.dtype\n if isinstance(datatype, numba.types.Boolean):\n identity = False\n elif isinstance(datatype, numba.types.Integer):\n identity = datatype.minval\n else:\n identity = -numpy.inf\n datatype = numba.types.float64\n return context.compile_internal(builder, run, sig.return_type(sig.args[0], datatype), (args[0], context.get_constant(datatype, identity)))\n\n@numba.extending.overload_attribute(JaggedArrayType, \"offsets\")\ndef _JaggedArray_offsets(arraytype):\n if arraytype.startstype.ndim == 1:\n def impl(array):\n offsets = numpy.empty(len(array.starts) + 1, numpy.int64)\n if len(array.starts) == 0:\n offsets[0] = 0\n return offsets\n offsets = array.starts[0]\n for i in range(1, len(array.starts)):\n if array.starts[i + 1] != array.stops[i]:\n raise ValueError(\"starts and stops are not compatible with a single offsets array; call jagged.compact() first\")\n offsets[i] = array.stops[i]\n return offsets\n return impl\n else:\n raise TypeError(\"len(starts.shape) must be 1 to compute offsets; call jagged.structure1d() first\")\n\n@numba.extending.overload_attribute(JaggedArrayType, \"counts\")\ndef _JaggedArray_counts(arraytype):\n def impl(array):\n return array.stops - array.starts\n return impl\n\n@numba.extending.overload_attribute(JaggedArrayType, \"parents\")\ndef _JaggedArray_parents(arraytype):\n def impl(array):\n out = numpy.full(array.stops.max(), -1, numpy.int64)\n for i in range(len(array.starts)):\n out[array.starts[i]:array.stops[i]] = i\n return out\n return impl\n\n@numba.extending.overload_attribute(JaggedArrayType, \"index\")\ndef _JaggedArray_index(arraytype):\n def impl(array):\n out = numpy.full(array.stops.max(), -1, numpy.int64)\n for i in range(len(array.starts)):\n for j in range(array.starts[i], array.stops[i]):\n out[j] = j - array.starts[i]\n return JaggedArray(array.starts, array.stops, out, array.iscompact)\n\n@numba.extending.overload_method(JaggedArrayType, \"regular\")\ndef _JaggedArray_regular(arraytype):\n if not isinstance(arraytype.contenttype, numba.types.Array):\n raise TypeError(\"JaggedArray.content must be a Numpy array to use jagged.regular()\")\n def impl(array):\n count = -1\n for i in range(len(array.starts)):\n if count == -1:\n count = array.stops[i] - array.starts[i]\n elif count != array.stops[i] - array.starts[i]:\n raise ValueError(\"JaggedArray is not regular: different elements have different counts\")\n return array.content.reshape(array.starts.shape + (count,))\n return impl\n\n@numba.extending.overload_method(JaggedArrayType, \"compact\")\ndef _JaggedArray_compact(arraytype):\n if isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, AwkwardArrayType):\n def impl(array):\n if array.iscompact:\n return array\n if len(array.starts) == 0:\n return _JaggedArray_new(array, array.starts, array.stops[0:0], array.content[0:0], True)\n\n if array.starts.shape != array.stops.shape:\n raise ValueError(\"JaggedArray.starts must have the same shape as JaggedArray.stops\")\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n\n offsets = numpy.empty(len(flatstarts) + 1, flatstarts.dtype)\n offsets[0] = 0\n for i in range(len(flatstarts)):\n count = flatstops[i] - flatstarts[i]\n if count < 0:\n raise ValueError(\"JaggedArray.stops[i] must be greater than or equal to JaggedArray.starts[i] for all i\")\n offsets[i + 1] = offsets[i] + count\n\n index = numpy.empty(offsets[-1], numpy.int64)\n k = 0\n for i in range(len(flatstarts)):\n for j in range(flatstarts[i], flatstops[i]):\n index[k] = j\n k += 1\n\n starts = offsets[:-1].reshape(array.starts.shape)\n stops = offsets[1:].reshape(array.starts.shape) # intentional\n content = array.content[index]\n return _JaggedArray_new(array, starts, stops, content, True)\n\n return impl\n\n elif isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, numba.types.Array):\n def impl(array):\n if array.iscompact:\n return array\n if len(array.starts) == 0:\n return _JaggedArray_new(array, array.starts, array.stops[0:0], array.content[0:0], True)\n\n if array.starts.shape != array.stops.shape:\n raise ValueError(\"JaggedArray.starts must have the same shape as JaggedArray.stops\")\n flatstarts = array.starts.ravel()\n flatstops = array.stops.ravel()\n\n offsets = numpy.empty(len(flatstarts) + 1, flatstarts.dtype)\n offsets[0] = 0\n for i in range(len(flatstarts)):\n count = flatstops[i] - flatstarts[i]\n if count < 0:\n raise ValueError(\"JaggedArray.stops[i] must be greater than or equal to JaggedArray.starts[i] for all i\")\n offsets[i + 1] = offsets[i] + count\n\n content = numpy.empty(offsets[-1], array.content.dtype)\n k = 0\n for i in range(len(flatstarts)):\n for j in range(flatstarts[i], flatstops[i]):\n content[k] = array.content[j]\n k += 1\n\n starts = offsets[:-1].reshape(array.starts.shape)\n stops = offsets[1:].reshape(array.starts.shape) # intentional\n return _JaggedArray_new(array, starts, stops, content, True)\n\n return impl\n\n@numba.extending.overload_method(JaggedArrayType, \"flatten\")\ndef _JaggedArray_flatten(arraytype):\n if isinstance(arraytype, JaggedArrayType):\n def impl(array):\n if len(array.starts) == 0:\n return array.content[0:0]\n else:\n a = array.compact()\n return a.content[a.starts[0]:a.stops[-1]]\n return impl\n\n@numba.extending.overload_method(JaggedArrayType, \"argmin\")\ndef _JaggedArray_argmin(arraytype):\n if isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, AwkwardArrayType):\n def impl(array):\n return _JaggedArray_new(array, array.starts, array.stops, _JaggedArray_argmin(array.content), array.iscompact)\n return impl\n elif isinstance(arraytype, JaggedArrayType):\n def impl(array):\n return _JaggedArray_argminmax(array, True)\n return impl\n\n@numba.extending.overload_method(JaggedArrayType, \"argmax\")\ndef _JaggedArray_argmax(arraytype):\n if isinstance(arraytype, JaggedArrayType) and isinstance(arraytype.contenttype, AwkwardArrayType):\n def impl(array):\n return _JaggedArray_new(array, array.starts, array.stops, _JaggedArray_argmax(array.content), array.iscompact)\n return impl\n elif isinstance(arraytype, JaggedArrayType):\n def impl(array):\n return _JaggedArray_argminmax(array, False)\n return impl\n \n@numba.njit\ndef _JaggedArray_argminmax(array, ismin):\n if len(array.content.shape) != 1:\n raise NotImplementedError(\"content is not one-dimensional\")\n\n flatstarts = array.starts.reshape(-1)\n flatstops = array.stops.reshape(-1)\n\n offsets = numpy.empty(len(flatstarts) + 1, numpy.int64)\n offsets[0] = 0\n for i in range(len(flatstarts)):\n if flatstarts[i] == flatstops[i]:\n offsets[i + 1] = offsets[i]\n else:\n offsets[i + 1] = offsets[i] + 1\n\n starts = offsets[:-1].reshape(array.starts.shape)\n stops = offsets[1:].reshape(array.stops.shape)\n\n output = numpy.empty(offsets[-1], dtype=numpy.int64)\n\n if ismin:\n k = 0\n for i in range(len(flatstarts)):\n if flatstops[i] != flatstarts[i]:\n best = array.content[flatstarts[i]]\n bestj = 0\n for j in range(flatstarts[i] + 1, flatstops[i]):\n if array.content[j] < best:\n best = array.content[j]\n bestj = j - flatstarts[i]\n output[k] = bestj\n k += 1\n\n else:\n k = 0\n for i in range(len(flatstarts)):\n if flatstops[i] != flatstarts[i]:\n best = array.content[flatstarts[i]]\n bestj = 0\n for j in range(flatstarts[i] + 1, flatstops[i]):\n if array.content[j] > best:\n best = array.content[j]\n bestj = j - flatstarts[i]\n output[k] = bestj\n k += 1\n\n return _JaggedArray_new(array, starts, stops, output, True)\n\n@numba.njit\ndef _JaggedArray_concatenate_njit(arrays, axis):\n return _JaggedArray_concatenate(arrays, axis)\n\ndef _JaggedArray_concatenate(arrays, axis):\n pass\n\n@numba.extending.type_callable(_JaggedArray_concatenate)\ndef _JaggedArray_concatenate(context):\n return None\n\n@numba.extending.lower_builtin(_JaggedArray_concatenate, numba.types.Tuple, numba.types.Integer)\ndef _JaggedArray_lower_concatenate(context, builder, sig, args):\n raise NotImplementedError\n","sub_path":"awkward-numba/awkward/numba/array/jagged.py","file_name":"jagged.py","file_ext":"py","file_size_in_byte":72051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"496094534","text":"import urllib.request\r\n\r\nurl = 'https://raw.githubusercontent.com/linuxacademy/content-elastic-log-samples/master/access.log'\r\naccessFile = urllib.request.urlopen(url)\r\n\r\nipset = set()\r\n\r\nfor line in accessFile:\r\n\r\n decoded_line = line.decode(\"utf-8\")\r\n if not ' - - ' in decoded_line:\r\n continue\r\n ip = decoded_line.split(' - - ')[0]\r\n\r\n if not ip in ipset:\r\n ipset.add(ip)\r\n print(decoded_line)","sub_path":"accesslog.py","file_name":"accesslog.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"182578796","text":"# Implement a Person class that displays an entry in the contacts book\nimport datetime\n\n\nclass Person:\n '''Implement a Person class that displays an entry in the contacts book'''\n\n def __init__(self, surname, first_name, birth_date, nickname=None):\n self.surname = surname\n self.first_name = first_name\n if nickname:\n self.nickname = nickname\n else:\n self.nickname = surname\n print(self)\n self.birth_date = birth_date\n def get_age(self):\n today_d = datetime.datetime.today()\n age_day = self.birth_date.split('-')\n if today_d.month == int(age_day[1]) and today_d.day >= int(age_day[2]):\n return today_d.year - int(age_day[0])\n elif today_d.month > int(age_day[1]):\n return today_d.year - int(age_day[0])\n else:\n return today_d.year - int(age_day[0]) - 1\n def get_fullname(self):\n return self.surname + ' ' + self.first_name\n\n\n\nperson1 = Person('Bilinskyy', 'Igor', '1971-12-26', 'Ingvarr')\n\nprint(person1.nickname, person1.surname, person1.first_name, person1.birth_date)\nprint(person1.get_age())\nprint(person1.get_fullname())\n","sub_path":"sensey_05_oop_01.py","file_name":"sensey_05_oop_01.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6673138","text":"\"\"\"\n==========================================================================\nstrategies.py\n==========================================================================\nHypothesis strategies for built-in data types.\n\nAuthor : Yanghui Ou\n Date : June 16, 2019\n\"\"\"\n\nimport hypothesis\nimport pytest\n\nfrom pymtl3.datatypes import strategies as pst\nfrom pymtl3.datatypes.bits_import import *\n\n\n@pytest.mark.parametrize( 'nbits', [1, 3, 4, 8, 16, 32] )\ndef test_unsiged( nbits ):\n print(\"\")\n @hypothesis.given(\n bits = pst.bits(nbits)\n )\n @hypothesis.settings( max_examples=16 )\n def actual_test( bits ):\n assert bits.nbits == nbits\n print( bits, bits.uint() )\n actual_test()\n\n@pytest.mark.parametrize( 'nbits', [1, 3, 4, 8, 16, 32] )\ndef test_signed( nbits ):\n print(\"\")\n @hypothesis.given(\n bits = pst.bits(nbits, True)\n )\n @hypothesis.settings( max_examples=16 )\n def actual_test( bits ):\n assert bits.nbits == nbits\n print( bits, bits.int() )\n actual_test()\n","sub_path":"pymtl3/datatypes/test/strategies_test.py","file_name":"strategies_test.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378172646","text":"from django.shortcuts import get_object_or_404, render\nfrom django.core.paginator import Paginator\nfrom .models import Listing\nfrom .choices import *\n\ndef index(request):\n # pass postgresql database as object\n # sort by most recent first\n # paginator - create pagination\n listings = Listing.objects.order_by('-list_date').filter(is_published=True) # .all()\n paginator = Paginator(listings, 3) # 6\n page = request.GET.get('page')\n page_listings = paginator.get_page(page)\n\n context = {\n 'listings': page_listings,\n }\n\n return render(request, 'listings/listings.html', context)\n\ndef listing(request, listing_id):\n # display 404 if object (page) doesn't exist\n listing = get_object_or_404(Listing, pk=listing_id)\n\n context = {\n 'listing': listing,\n }\n\n return render(request, 'listings/listing.html', context)\n\ndef search(request):\n search_query = Listing.objects.order_by('-list_date')\n\n # Keyword\n if 'keywords' in request.GET:\n # request.GET[name attribute in html]\n keywords = request.GET['keywords']\n # check if keywords exist\n if keywords:\n # icontains - check if paragraph (description) contains the keyword\n search_query = search_query.filter(description__icontains=keywords)\n\n # City\n if 'city' in request.GET:\n city = request.GET['city']\n if city:\n # iexact - exact city but case-insensitive\n search_query = search_query.filter(city__iexact=city)\n\n # State\n if 'state' in request.GET:\n state = request.GET['state']\n if state:\n search_query = search_query.filter(state__iexact=state)\n\n # Bedrooms\n if 'bedrooms' in request.GET:\n bedrooms = request.GET['bedrooms']\n if bedrooms:\n search_query = search_query.filter(bedrooms__iexact=bedrooms) # bedrooms__lte\n\n # Max Price\n if 'price' in request.GET:\n price = request.GET['price']\n if price:\n # lte - less than or equal to (up to queried price)\n search_query = search_query.filter(price__lte=price)\n\n context = {\n 'state_choices': state_choices,\n 'bedroom_choices': bedroom_choices,\n 'price_choices': price_choices,\n 'listings': search_query,\n 'search_values': request.GET, # get all values from GET request\n }\n\n return render(request, 'listings/search.html', context)\n","sub_path":"listings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"449922092","text":"from flask import flash\nfrom dbconnect import connection\nfrom MySQLdb import escape_string as thwart\nimport datetime\nimport gc\n\ntoday_date = datetime.date.today()\ndef convert_date(raw_date):\n\tdate_format='%m/%d/%Y'\n\tif raw_date == None:\n\t\tformatted_date = raw_date\n\telse:\n\t\tformatted_date = datetime.datetime.strptime(str(raw_date), '%Y-%m-%d').strftime(date_format)\n\treturn formatted_date\n\nclass Invoice(object):\n\tdef __init__(self, invoice_number, date_received, associated_parts):\n\t\tself.invoice_number = str(invoice_number)\n\t\tself.date_received = date_received.strftime('%Y-%m-%d')\n\t\tself.associated_parts = associated_parts\n\n\tdef create(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"INSERT INTO invoice (invoice_number, date_received) VALUES ( '%s', '%s' )\" % (thwart(self.invoice_number), thwart(self.date_received) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef import_invoice_from_excel(self):\n\t\tself.create()\n\t\tfor each_part in self.associated_parts:\n\t\t\tinvoice_detail = InvoiceDetail(invoice_number = self.invoice_number, part_number = each_part['part_number'], purchase_order_number = each_part['assoc_po'], shelf_location = None, status = 'New', claimed = 0, claimed_date = None)\n\t\t\tinvoice_detail.create_excel()\n\n\t\t\tpart = Part(part_number = each_part['part_number'], part_description = each_part['part_description'], machine_type = None, part_price = str(each_part['part_price']), image_url = None)\n\t\t\tpart.create_or_update_excel()\n\t\treturn True\n\n\tdef check_if_exist(self):\n\t\tc, conn = connection()\n\t\tcheck = c.execute(\"SELECT * FROM invoice WHERE invoice_number = ('%s')\" % (thwart(self.invoice_number)) )\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\tif int(check) == 0:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\t@staticmethod\n\tdef get_all():\n\t\tall_invoices = []\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT I.*, (SELECT COUNT(*) FROM invoice_detail AS D WHERE D.invoice_number = I.invoice_number) AS number_of_items FROM invoice AS I ORDER BY date_received DESC\")\n\t\tfor i in c:\n\t\t\tall_invoices.append({ \"invoice_number\" : i[0], \"date_received\" : convert_date(i[1]), \"number_of_items\" : i[2]})\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn all_invoices\n\nclass InvoiceDetail(object):\n\tdef __init__(self, invoice_number, part_number, purchase_order_number, shelf_location, status, claimed, claimed_date):\n\t\tself.invoice_number = str(invoice_number)\n\t\tself.part_number = str(part_number)\n\t\tself.purchase_order_number = str(purchase_order_number)\n\t\tself.shelf_location = shelf_location\n\t\tself.status = str(status)\n\t\tself.claimed = str(claimed)\n\t\tself.claimed_date = claimed_date\n\n\tdef create(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"INSERT INTO invoice_detail (invoice_number, part_number, purchase_order_number, shelf_location, status, claimed, claimed_date) VALUES ( '%s', '%s', '%s', '%s', '%s', '%s', '%s' )\" % ( thwart(self.invoice_number), thwart(self.part_number), thwart(self.purchase_order_number), thwart(self.shelf_location), thwart(self.status), thwart(self.claimed), thwart(self.claimed_date) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef create_excel(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"INSERT INTO invoice_detail (invoice_number, part_number, purchase_order_number, shelf_location, status, claimed) VALUES ( '%s', '%s', '%s', '', '%s', '%s' )\" % ( thwart(self.invoice_number), thwart(self.part_number), thwart(self.purchase_order_number), thwart(self.status), thwart(self.claimed) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\t\t\nclass Part(object):\n\tdef __init__(self, part_number, part_description, machine_type, part_price, image_url):\n\t\tself.part_number = part_number\n\t\tself.part_description = part_description\n\t\tself.machine_type = machine_type\n\t\tself.part_price = part_price\n\t\tself.image_url = image_url\n\n\tdef create(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"INSERT INTO part_detail (part_number, part_description, machine_type, part_price, img_url) VALUES ( '%s', '%s', '%s', '%s', '%s')\" % ( thwart(self.part_number), thwart(self.part_description), thwart(self.machine_type), thwart(self.part_price), thwart(self.image_url), thwart(self.part_number) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef update(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"UPDATE part_detail SET part_description = '%s', machine_type = '%s', part_price = '%s', img_url = '%s' WHERE part_number = ('%s')\" % ( thwart(self.part_description), thwart(self.machine_type), thwart(self.part_price), thwart(self.image_url), thwart(self.part_number) ) )\n\t\t\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef create_excel(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"INSERT INTO part_detail (part_number, part_description, machine_type, part_price) VALUES ( '%s', '%s', 'Other', '%s')\" % ( thwart(self.part_number), thwart(self.part_description), thwart(self.part_price) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef update_excel(self):\n\t\tc, conn = connection()\n\t\tc.execute(\"UPDATE part_detail SET part_description = '%s', part_price = '%s' WHERE part_number = ('%s')\" % ( thwart(self.part_description), thwart(self.part_price), thwart(self.part_number) ) )\n\t\tconn.commit()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn True\n\n\tdef create_or_update_excel(self):\n\t\tif self.check_if_exist() == False:\n\t\t\tself.create_excel()\n\t\telif self.check_if_exist() == True:\n\t\t\tself.update_excel()\n\n\tdef check_if_exist(self):\n\t\tc, conn = connection()\n\t\tcheck = c.execute(\"SELECT * FROM part_detail WHERE part_number = ('%s')\" % (thwart(self.part_number)) )\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\tif int(check) == 0:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\t@staticmethod\n\tdef get_all():\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT * FROM part_detail\")\n\t\tall_parts = c.fetchall()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn all_parts\n\n\t@staticmethod\n\tdef get_stock_inventory():\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT * FROM part_detail\")\n\t\tstock_parts = []\n\t\tfor p in c:\n\t\t\tc.execute(\"SELECT P.part_number, P.part_description, P.machine_type, P.part_price, (SELECT COUNT(*) FROM invoice_detail AS I WHERE I.part_number = P.part_number AND I.status IN ('NEW', 'In Stock - Claimed')) AS total_quantity, (SELECT COUNT(*) FROM invoice_detail AS I WHERE I.part_number = P.part_number AND I.status IN ('NEW', 'In Stock - Claimed') AND I.shelf_location IS NOT NULL AND I.shelf_location NOT IN ('N/A', 'n/a', '')) AS stock_quantity, (SELECT COUNT(*) FROM invoice_detail AS I WHERE I.part_number = P.part_number AND I.status IN ('NEW') AND I.shelf_location IS NOT NULL AND I.shelf_location NOT IN ('N/A', 'n/a', '')) AS claimable_amount FROM part_detail AS P WHERE part_number = '%s'\" % (thwart(p[0])) )\n\t\t\tpart = c.fetchone()\n\t\t\tstock_parts.append({\"part_number\" : part[0], \"part_description\" : part[1], \"machine_type\" : part[2], \"asc_price\" : part[3], \"total_quantity\" : part[4], \"stock_quantity\" : part[5], \"claimable_quanity\" : part[6]})\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn stock_parts\n\n\t@staticmethod\n\tdef get_stock_quantity_for_part(part_number):\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT COUNT(*) FROM invoice_detail WHERE part_number = '%s' AND status IN ('NEW', 'In Stock - Claimed') AND shelf_location IS NOT NULL AND shelf_location NOT IN ('N/A', 'n/a', '')\" % (thwart(part_number) ))\n\t\tresult = c.fetchone()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn result[0]\n\n\t@staticmethod\n\tdef get_by_part_number(part_number):\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT * FROM part_detail WHERE part_number = '%s'\" % (thwart(part_number) ))\n\t\tpart_detail = c.fetchone()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn part_detail\n\n\t@staticmethod\n\tdef get_shelves():\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT DISTINCT shelf_location FROM invoice_detail\")\n\t\tall_shelves = c.fetchall()\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn all_shelves\n\n\t@staticmethod\n\tdef get_shelf_report(shelf_name):\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT ID.invoice_detail_id, ID.invoice_number, ID.part_number, ID.purchase_order_number, ID.shelf_location, ID.status, ID.claimed, ID.claimed_date, P.part_price, I.date_received, P.part_description FROM invoice_detail AS ID JOIN part_detail AS P ON ID.part_number = P.part_number JOIN invoice AS I ON ID.invoice_number = I.invoice_number WHERE ID.status IN ('New', 'In Stock - Claimed') AND ID.shelf_location = '%s'\" % (thwart(shelf_name)))\n\t\tshelf_data = c.fetchall()\n\t\tshelf_data_list = []\n\t\tfor i in shelf_data:\n\t\t\tshelf_data_list.append({\n\t\t\t\t\t\t\t\"invoice_detail_id\" : i[0],\n\t\t\t\t\t\t\t\"invoice_number\" : i[1],\n\t\t\t\t\t\t\t\"part_number\" : i[2],\n\t\t\t\t\t\t\t\"assoc_po\" : i[3],\n\t\t\t\t\t\t\t\"location\" : i[4],\n\t\t\t\t\t\t\t\"status\" : i[5],\n\t\t\t\t\t\t\t\"claimed\" : i[6],\n\t\t\t\t\t\t\t\"claimed_date\" : convert_date(i[7]),\n\t\t\t\t\t\t\t\"part_price\" : i[8],\n\t\t\t\t\t\t\t\"received_date\" : convert_date(i[9]),\n\t\t\t\t\t\t\t\"part_description\" : i[10]\n\t\t\t\t\t\t\t})\n\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn shelf_data_list\n\n\t@staticmethod\n\tdef get_invoice_detail(part_number):\n\t\tc, conn = connection()\n\t\tc.execute(\"SELECT * FROM invoice_detail WHERE part_number = '%s'\" % (thwart(part_number) ))\n\t\tpart_invoice_detail = c.fetchall()\n\t\tresult = []\n\t\tfor i in part_invoice_detail:\n\t\t\tresult.append(\n\t\t\t\t\t\t\t{\"invoice_detail_id\" : i[0],\n\t\t\t\t\t\t\t\"invoice_number\" : i[1],\n\t\t\t\t\t\t\t\"part_number\" : i[2],\n\t\t\t\t\t\t\t\"assoc_po\" : i[3],\n\t\t\t\t\t\t\t\"location\" : i[4],\n\t\t\t\t\t\t\t\"status\" : i[5],\n\t\t\t\t\t\t\t\"claimed\" : i[6],\n\t\t\t\t\t\t\t\"claimed_date\" : convert_date(i[7])})\n\t\tc.close()\n\t\tconn.close()\n\t\tgc.collect()\n\t\treturn result\n\n","sub_path":"my_hng/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":9630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313231864","text":"inFile = open('NGLY1-Viruses-mapped2')\nouFile = open('NGLY1-Viruses-mapped2-virus-type', 'w')\nD = {}\nD2 = {}\nwhile True:\n line1 = inFile.readline().strip()\n line2 = inFile.readline().strip()\n if line1:\n fields = line1.split('\\t')\n virus= fields[1]\n virus_pos = virus+':'+fields[8]+':'+fields[9]\n if virus_pos in D2:\n pass\n else:\n D.setdefault(virus, 0)\n D[virus] += 1\n D2[virus_pos] = 1\n else:\n break\ninFile.close()\n\nd = D.items()\nd.sort(cmp=lambda x,y:cmp(float(x[1]),float(y[1])),reverse=True)\nfor x in d:\n ouFile.write(x[0] + '\\t' + str(x[1]) + '\\n')\nouFile.close()\n","sub_path":"NGLY1/Virus/NGLY1-unmapped-bam-fasta/5-virus-type.py","file_name":"5-virus-type.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"118209215","text":"from flask.ext.wtf import Form\nfrom wtforms import TextField, SubmitField, SelectField, PasswordField, RadioField\nfrom wtforms.widgets import TextArea\nfrom wtforms.validators import Required, ValidationError\n\nclass AdminLogin(Form):\n username = TextField('username', validators=[Required()])\n password = PasswordField('password', validators=[Required()])\n\nclass AdminForm(Form):\n manager_id = TextField('manager_id')\n accept = TextField('accept')\n accept_all = SubmitField('accept_all')\n\nclass LoginForm(Form):\n username = TextField('username', validators=[Required()])\n password = PasswordField('password', validators=[Required()])\n action = RadioField('action', validators=[Required()],\n choices=[('add', 'Add'), ('change', 'Change'), ('remove', 'Remove')])\n\nclass RequestForm(Form):\n firstname = TextField('firstname', validators=[Required()]) # Required\n lastname = TextField('lastname', validators=[Required()]) # Required\n username = TextField('username', validators=[Required()]) # Required\n effectivedate = TextField('Date', validators=[Required()])\n manager = SelectField('Supervisor/Manager', validators=[Required()],\n choices=[(\"\", '-'),\n ('Brokerage_Ops', 'Anne Byrd'), ('Brokerage_Ops', 'Beth Kirksey'),\n ('Brokerage_Ops', 'Elisa Kern'), ('Brokerage_Ops', 'Molly McGreal'),\n ('Brokerage_Ops', 'Mike Gilleland'),\n ('Compliance', 'Jason Strickland'), ('Compliance', 'Michael Hogan'),\n ('Contractor_Business_Partners', 'Contractor'),\n ('CSRs', 'Bill Davis'), ('CSRs', 'Harold Solomon'),\n ('DBA', 'Barry Callahan'),\n ('Development', 'Henry Olschofka'), ('Development', 'Qingfeng He'),\n ('Development', 'Vincent Hsing'),\n ('Executives', 'Steve Wallman'),\n ('Finance', 'Jim Fyffe'),\n ('HR', 'Jill Brown'),\n ('Prod_Support', 'Chamidu Abeysekera'),\n ('Marketing', 'Helen Shepro'),\n ('Product_Management', 'Jon Normile'),\n ('Sales', 'Greg Vigrass'),\n ('Tech_Ops', 'Josh Leckner'), ('Tech_Ops', 'Tate DeCray')])\n title = TextField('title')\n ############################################################################\n laptop = SelectField('Laptop', choices=[(\"\", '-'),\n ('Windows 7', 'Dell - PC'), ('OSX', 'Apple - OSX'),\n ('Terminal', 'Terminal - Raspberry Py' )])\n desk_phone = SelectField('Phone', choices=[(\"\", '-'), ('Standard', 'Standard')])\n company_cell = SelectField('Cell Phone', choices=[(\"\", '-'), ('Yes', 'Yes'), ('No', 'No')])\n globalrelay = SelectField('globalrelay', choices=[('Yes', 'Yes'), ('No', 'No')])\n jira = SelectField('jira', choices=[('Yes', 'Yes'), ('No', 'No')])\n ############################################################################\n email = SelectField('email', choices=[\n ('folioinvesting.com', '@folioinvesting.com'),\n ('folioinstitutional.com', '@folioinstitutional.com')])\n officelocation = TextField('Office Location', default=\"\")\n home_num = TextField('Home Phone', default=\"\")\n cell_num = TextField('Cell Phone', default=\"\")\n ############################################################################\n salesforce = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n csr = SelectField('action', choices=[(\"\", '-'),\n ('Add PROD CSRegisteredRep', 'Add - PROD CSRegisteredRep'),\n ('Add PROD CSManager', 'Add - PROD CSManager'),\n ('Add EAP CSRegisteredRep', 'Add - EAP CSRegisteredRep'),\n ('Add EAP CSManager', 'Add - EAP CSManager'),\n ('Remove PROD CSRegisteredRep', 'Remove - PROD CSRegisteredRep'),\n ('Remove PROD CSManager', 'Remove - PROD CSManager'),\n ('Remove EAP CSRegisteredRep', 'Remove - EAP CSRegisteredRep'),\n ('Remove EAP CSManager', 'Remove - EAP CSManager')])\n inteliclear = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n fvadmins = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n mosiki_group = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n gps = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n callcenterqueue = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n sql = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n svn = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n level3 = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n vpn = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n noc = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n colo = SelectField('action', choices=[(\"\", '-'), ('Add', 'Add'), ('Remove', 'Remove')])\n ############################################################################\n notes = TextField('notes', widget=TextArea())\n","sub_path":"LDAP/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641188458","text":"import sys\r\nimport json\r\nimport schedule\r\nimport time\r\nimport subprocess\r\nimport _thread\r\n\r\ndef runProgram(prgm):\r\n try:\r\n _thread.start_new_thread(executeThread,(prgm, ))\r\n except:\r\n print(\"Error: unable to start thread\")\r\n\r\ndef executeThread(prgm):\r\n subprocess.run(prgm[\"command\"], cwd=prgm[\"dir\"])\r\n\r\nif len(sys.argv) != 2:\r\n print(\"The program expects a config file as the first and only argument. You can find documentation on how to open the config file in the README.txt file in the root directory of the project.\")\r\n exit()\r\n\r\nwith open(sys.argv[1]) as configHandle:\r\n configFile = json.load(configHandle)\r\n\r\nif configFile == None:\r\n print(\"Error loading config file aborting\")\r\n exit()\r\n\r\nfor x in configFile[\"schedule\"]:\r\n runProgram(x)\r\n schedule.every(int(x[\"interval\"])).seconds.do(runProgram, prgm=x)\r\n\r\nwhile True:\r\n schedule.run_pending()\r\n time.sleep(1)\r\n","sub_path":"runtime/redditquery/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619739741","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Code for the paper- Part 1\n\n# This script handles the meta-analyses.\n\n# ### Plotting imports and notebook configuration\n\n# In[ ]:\n\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nilearn import datasets, image, input_data, plotting\n\nimport nimare\n\nFIG_WIDTH = 10\nROW_HEIGHT = 2 # good row height for width of 10\n\n\n# ## Listing 1\n\n# In[ ]:\n\n\nsl_dset1 = nimare.io.convert_sleuth_to_dataset(\n \"data/contrast-CannabisMinusControl_space-talairach_sleuth.txt\"\n)\nsl_dset2 = nimare.io.convert_sleuth_to_dataset(\n \"data/contrast-ControlMinusCannabis_space-talairach_sleuth.txt\"\n)\n\n\n# ## Listing 2\n\n# In[ ]:\n\n\nif os.path.isfile(\"data/neurosynth_dataset.pkl.gz\"):\n ns_dset = nimare.dataset.Dataset.load(\"data/neurosynth_dataset.pkl.gz\")\nelif os.path.isfile(\"data/database.txt\"):\n ns_dset = nimare.io.convert_neurosynth_to_dataset(\n \"data/database.txt\",\n \"data/features.txt\",\n )\n ns_dset.save(\"data/neurosynth_dataset.pkl.gz\")\nelse:\n nimare.extract.fetch_neurosynth(\"data/\", unpack=True)\n ns_dset = nimare.io.convert_neurosynth_to_dataset(\n \"data/database.txt\",\n \"data/features.txt\",\n )\n ns_dset.save(\"data/neurosynth_dataset.pkl.gz\")\n\n\n# ## Listing 3\n\n# In[ ]:\n\n\nmkda_kernel = nimare.meta.kernel.MKDAKernel(r=10)\nmkda_ma_maps = mkda_kernel.transform(sl_dset1, return_type=\"image\")\nkda_kernel = nimare.meta.kernel.KDAKernel(r=10)\nkda_ma_maps = kda_kernel.transform(sl_dset1, return_type=\"image\")\nale_kernel = nimare.meta.kernel.ALEKernel(sample_size=20)\nale_ma_maps = ale_kernel.transform(sl_dset1, return_type=\"image\")\n\n\n# ### Figure 3\n\n# In[ ]:\n\n\nmax_value = np.max(kda_ma_maps[0].get_fdata()) + 1\n\nfig, axes = plt.subplots(nrows=3, figsize=(FIG_WIDTH, ROW_HEIGHT * 3))\nplotting.plot_stat_map(\n mkda_ma_maps[2],\n cut_coords=[54, -46, 12],\n title=\"MKDA Kernel\",\n vmax=max_value,\n axes=axes[0],\n draw_cross=False,\n)\nplotting.plot_stat_map(\n kda_ma_maps[2],\n cut_coords=[54, -46, 12],\n title=\"KDA Kernel\",\n vmax=max_value,\n axes=axes[1],\n draw_cross=False,\n)\nplotting.plot_stat_map(\n ale_ma_maps[2],\n cut_coords=[54, -46, 12],\n title=\"ALE Kernel\",\n axes=axes[2],\n draw_cross=False,\n)\nfig.savefig(\"figures/figure_03.svg\")\n\n\n# ## Listing 4\n\n# In[ ]:\n\n\nmkdad_meta = nimare.meta.cbma.mkda.MKDADensity(null_method=\"analytic\")\nmkdad_results = mkdad_meta.fit(sl_dset1)\n\n\n# ## Listing 5\n\n# In[ ]:\n\n\nijk = ns_dset.coordinates[[\"i\", \"j\", \"k\"]].values\nmeta = nimare.meta.cbma.ale.SCALE(\n n_iters=10000,\n ijk=ijk,\n kernel__sample_size=20,\n)\nscale_results = meta.fit(sl_dset1)\n\n\n# ## Listing 6\n\n# In[ ]:\n\n\nmeta = nimare.meta.cbma.mkda.MKDAChi2()\nmkdac_results = meta.fit(sl_dset1, sl_dset2)\n\n\n# ### Figure 4\n\n# In[ ]:\n\n\n# Additional meta-analyses for figures\nmeta = nimare.meta.cbma.mkda.KDA(null_method=\"analytic\")\nkda_results = meta.fit(sl_dset1)\n\nmeta = nimare.meta.cbma.ale.ALE(null_method=\"analytic\")\nale_results = meta.fit(sl_dset1)\n\n# Meta-analytic maps across estimators\nresults = [\n mkdad_results,\n mkdac_results,\n kda_results,\n ale_results,\n scale_results,\n]\nnames = [\"MKDADensity\", \"MKDAChi2\", \"KDA\", \"ALE\", \"SCALE\"]\nfig, axes = plt.subplots(\n figsize=(FIG_WIDTH, ROW_HEIGHT * len(names)),\n nrows=len(names),\n)\nfor i, r in enumerate(results):\n name = names[i]\n if \"z\" in r.maps.keys():\n stat_img = r.get_map(\"z\", return_type=\"image\")\n else:\n stat_img = r.get_map(\"z_desc-consistency\", return_type=\"image\")\n plotting.plot_stat_map(\n stat_img,\n title=name,\n cut_coords=[0, 0, 0],\n draw_cross=False,\n annotate=False,\n axes=axes[i],\n )\nfig.savefig(\"figures/figure_04.svg\")\n\n\n# ## Listing 7\n\n# In[ ]:\n\n\nfrom nimare.tests.utils import get_test_data_path\n\ndset_dir = nimare.extract.download_nidm_pain()\ndset_file = os.path.join(get_test_data_path(), \"nidm_pain_dset.json\")\nimg_dset = nimare.dataset.Dataset(dset_file)\nimg_dset.update_path(dset_dir)\n\n# Calculate missing images\nimg_dset.images = nimare.transforms.transform_images(\n img_dset.images,\n target=\"z\",\n masker=img_dset.masker,\n metadata_df=img_dset.metadata,\n)\nimg_dset.images = nimare.transforms.transform_images(\n img_dset.images,\n target=\"varcope\",\n masker=img_dset.masker,\n metadata_df=img_dset.metadata,\n)\n\nmeta = nimare.meta.ibma.DerSimonianLaird()\ndsl_results = meta.fit(img_dset)\n\n\n# ### Figure 5\n\n# In[ ]:\n\n\n# Additional meta-analyses for figures\nmeta = nimare.meta.ibma.Stouffers(use_sample_size=False)\nstouffers_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.Stouffers(use_sample_size=True)\nweighted_stouffers_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.Fishers()\nfishers_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.PermutedOLS()\nols_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.WeightedLeastSquares()\nwls_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.Hedges()\nhedges_results = meta.fit(img_dset)\n\n# Use atlas for likelihood-based estimators\natlas = datasets.fetch_atlas_harvard_oxford(\"cort-maxprob-thr25-2mm\")\n\n# nilearn's NiftiLabelsMasker cannot handle NaNs at the moment,\n# and some of the NIDM-Results packs' beta images have NaNs at the edge of the\n# brain.\n# So, we will create a reduced version of the atlas for this analysis.\nnan_mask = image.math_img(\n \"~np.any(np.isnan(img), axis=3)\", img=img_dset.images[\"beta\"].tolist()\n)\nnanmasked_atlas = image.math_img(\n \"mask * atlas\",\n mask=nan_mask,\n atlas=atlas[\"maps\"],\n)\nmasker = input_data.NiftiLabelsMasker(nanmasked_atlas)\n\nmeta = nimare.meta.ibma.VarianceBasedLikelihood(method=\"reml\", mask=masker)\nvbl_results = meta.fit(img_dset)\n\nmeta = nimare.meta.ibma.SampleSizeBasedLikelihood(method=\"reml\", mask=masker)\nssbl_results = meta.fit(img_dset)\n\n# Plot statistical maps from IBMAs\nresults = [\n dsl_results,\n stouffers_results,\n weighted_stouffers_results,\n fishers_results,\n ols_results,\n wls_results,\n hedges_results,\n vbl_results,\n ssbl_results,\n]\nnames = [\n \"DerSimonian-Laird\",\n \"Stouffer's\",\n \"Weighted Stouffer's\",\n \"Fisher's\",\n \"Ordinary Least Squares\",\n \"Weighted Least Squares\",\n \"Hedges'\",\n \"Variance-Based Likelihood\",\n \"Sample Size-Based Likelihood\",\n]\n\nfig, axes = plt.subplots(\n figsize=(FIG_WIDTH, ROW_HEIGHT * len(results)), nrows=len(results)\n)\nfor i, r in enumerate(results):\n img = r.get_map(\"z\")\n plotting.plot_stat_map(\n img,\n title=names[i],\n annotate=False,\n cut_coords=[5, -15, 10],\n axes=axes[i],\n draw_cross=False,\n )\n\nfig.savefig(\"figures/figure_05.svg\")\n\n\n# ### Save map for future use\n\n# In[ ]:\n\n\ndsl_results.get_map(\"est\").to_filename(\"data/pain_map.nii.gz\")\n\n\n# ## Listing 8\n\n# In[ ]:\n\n\nmc_corrector = nimare.correct.FWECorrector(\n method=\"montecarlo\", n_iters=10000, n_cores=4\n)\nmc_results = mc_corrector.transform(mkdad_meta.results)\n\nfdr_corrector = nimare.correct.FDRCorrector(method=\"indep\")\nfdr_results = fdr_corrector.transform(mkdad_meta.results)\n\n\n# ### Figure 6\n\n# In[ ]:\n\n\nfig, axes = plt.subplots(figsize=(FIG_WIDTH, ROW_HEIGHT * 2), nrows=2)\nplotting.plot_stat_map(\n mc_results.get_map(\"z_level-cluster_corr-FWE_method-montecarlo\"),\n title=\"Cluster-level Monte Carlo\",\n annotate=False,\n cut_coords=[0, 0, 0],\n axes=axes[0],\n draw_cross=False,\n)\nplotting.plot_stat_map(\n fdr_results.get_map(\"z_corr-FDR_method-indep\"),\n title=\"Independent FDR\",\n annotate=False,\n cut_coords=[0, 0, 0],\n axes=axes[1],\n draw_cross=False,\n)\nfig.savefig(\"figures/figure_06.svg\")\n\n\n# ## Listing 9\n\n# In[ ]:\n\n\nkern = nimare.meta.kernel.ALEKernel()\nmeta = nimare.meta.cbma.ale.ALESubtraction(\n kernel_transformer=kern,\n n_iters=10000,\n)\nsubtraction_results = meta.fit(sl_dset1, sl_dset2)\n\n\n# ### Figure 7\n\n# In[ ]:\n\n\nstat_img = subtraction_results.get_map(\n \"z_desc-group1MinusGroup2\",\n return_type=\"image\",\n)\nfig, ax = plt.subplots(figsize=(FIG_WIDTH, ROW_HEIGHT))\nplotting.plot_stat_map(\n stat_img,\n title=\"ALE Subtraction\",\n cut_coords=[0, 0, 0],\n draw_cross=False,\n annotate=False,\n axes=ax,\n)\nfig.savefig(\"figures/figure_07.svg\")\n\n\n# ## Listing 10\n\n# In[ ]:\n\n\n# Create amygdala mask for MACMs\natlas = datasets.fetch_atlas_harvard_oxford(\"sub-maxprob-thr25-2mm\")\namyg_val = atlas[\"labels\"].index(\"Right Amygdala\")\namygdala_mask = image.math_img(f\"img == {amyg_val}\", img=atlas[\"maps\"])\namygdala_mask.to_filename(\"data/amygdala_roi.nii.gz\")\n\namygdala_ids = ns_dset.get_studies_by_mask(\"data/amygdala_roi.nii.gz\")\ndset_amygdala = ns_dset.slice(amygdala_ids)\n\nsphere_ids = ns_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)\ndset_sphere = ns_dset.slice(sphere_ids)\n\n\n# ## Listing 11\n\n# In[ ]:\n\n\nmeta_amyg = nimare.meta.cbma.ale.ALE(kernel__sample_size=20)\nresults_amyg = meta_amyg.fit(dset_amygdala)\n\nmeta_sphere = nimare.meta.cbma.ale.ALE(kernel__sample_size=20)\nresults_sphere = meta_sphere.fit(dset_sphere)\n\n\n# ### Figure 8\n\n# In[ ]:\n\nfig, axes = plt.subplots(figsize=(FIG_WIDTH, ROW_HEIGHT * 2), nrows=2)\nplotting.plot_stat_map(\n results_amyg.get_map(\"z\"),\n title=\"Amygdala ALE MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[0],\n)\nplotting.plot_stat_map(\n results_sphere.get_map(\"z\"),\n title=\"Sphere ALE MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[1],\n)\nfig.savefig(\"figures/figure_08.svg\")\n\n\n# In[ ]:\n\n\nmeta_amyg = nimare.meta.cbma.mkda.MKDADensity(null_method=\"analytic\")\nresults_amyg = meta_amyg.fit(dset_amygdala)\n\nmeta_sphere = nimare.meta.cbma.mkda.MKDADensity(null_method=\"analytic\")\nresults_sphere = meta_sphere.fit(dset_sphere)\n\nfig, axes = plt.subplots(figsize=(FIG_WIDTH, ROW_HEIGHT * 2), nrows=2)\nplotting.plot_stat_map(\n results_amyg.get_map(\"z\"),\n title=\"Amygdala MKDA MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[0],\n)\nplotting.plot_stat_map(\n results_sphere.get_map(\"z\"),\n title=\"Sphere MKDA MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[1],\n)\nfig.savefig(\"figures/figure_08a.svg\")\n\n\n# In[ ]:\n\n\nmeta_amyg = nimare.meta.cbma.mkda.KDA()\nresults_amyg = meta_amyg.fit(dset_amygdala)\n\nmeta_sphere = nimare.meta.cbma.mkda.KDA()\nresults_sphere = meta_sphere.fit(dset_sphere)\n\nfig, axes = plt.subplots(figsize=(FIG_WIDTH, ROW_HEIGHT * 2), nrows=2)\nplotting.plot_stat_map(\n results_amyg.get_map(\"z\"),\n title=\"Amygdala KDA MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[0],\n)\nplotting.plot_stat_map(\n results_sphere.get_map(\"z\"),\n title=\"Sphere KDA MACM\",\n cut_coords=[24, -2, -20],\n draw_cross=False,\n annotate=False,\n axes=axes[1],\n)\nfig.savefig(\"figures/figure_08b.svg\")\n\n\n# ## Listing 12\n\n# In[ ]:\n\n\n# ### Figure 9\n\n# In[ ]:\n","sub_path":"listings_figures_and_tables_part1.py","file_name":"listings_figures_and_tables_part1.py","file_ext":"py","file_size_in_byte":10888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162160333","text":"from util import *\nimport Runnables\nfrom Runnables.Runnable import Runnable\n\nclass OracleLoginMenu(Runnable):\n \"\"\"\n Oracle Login Module\n\n Description:\n Generic login menu for an Oracle Database. Will provide user with\n three attempts to authenticate after it will exit.\n\n Expects:\n \"hostname\": hostname of Database\n \"next\": next Runnable to give initialized Database object to\n\n update returns:\n True: Authentication failed\n (1, None, {}): Maximum authentication attempts reached\n (1, next, {{\"db\" : db}): Authentication passed\n \"\"\"\n def __init__(self, params={}):\n self._commands = {}\n self._host = params[\"hostname\"]\n self._next = params[\"next\"]\n self._atempts = 0;\n print(\"Please log in to your Oracle account.\")\n\n def update(self):\n db = Database()\n user = input(\"Username: \")\n pw = getpass.getpass()\n if db.connect(user, pw, self._host):\n print(\"Authenticated!\")\n return (1, self._next, {\"db\" : db})\n \n self._atempts += 1\n\n if self._atempts >= 3:\n print(\"Maximum failed attempts. Goodbye.\")\n return (1, None, {})\n \n print(\"Authentication failed, please try again.\")\n return True\n\nRunnables.runnable_types[\"OracleLoginMenu\"] = OracleLoginMenu","sub_path":"Runnables/OracleLoginMenu.py","file_name":"OracleLoginMenu.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"625249955","text":"import requests\nfrom django.conf import settings\n\ndef geocode(address):\n print(address)\n r = requests.get(\"https://geocode-maps.yandex.ru/1.x\", params={\n \"geocode\": address,\n \"apikey\": settings.YANDEX_API_KEY,\n \"format\": \"json\"\n })\n r.raise_for_status()\n return r.json().get('response').get('GeoObjectCollection').get('featureMember')[0].get('GeoObject').get('Point').get('pos').split(' ')","sub_path":"apps/attractions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"275692715","text":"from django.contrib import admin\nfrom alterapp.models import Tag, Article\n\n# Register your models here.\n\nclass ArticleAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t(None, {'fields':['title', 'description','tags']}),\n\t\t('Image', {'fields':['image']}),\n\t\t('Date Information', {'fields':['created_date', 'published_date']}),\n\t]\n\nadmin.site.register(Tag)\nadmin.site.register(Article, ArticleAdmin)\n\n","sub_path":"alteration/alterapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"480713056","text":"\r\nprint('\\033[36m-=\\033[m'*30)\r\nqnt = int(input('Qual é a quantidade total de veículos da empresa: '))\r\nprint('\\033[36m-=\\033[m'*30)\r\nult = []\r\nseg = 0\r\ntec = 0\r\nquart = 0\r\nquint = 0\r\nsext = 0\r\nprint()\r\nfor i in range(qnt):\r\n placa = (input('Qual é o número da placa do veículo: '))\r\n ult.append(placa[-1])\r\n if placa[-1] == '1' or placa[-1] == '2':\r\n seg += 1\r\n elif placa[-1] == '3' or placa[-1] == '4':\r\n tec += 1\r\n elif placa[-1] == '5' or placa[-1] == '6':\r\n quart += 1\r\n elif placa[-1] == '7' or placa[-1] == '8':\r\n quint += 1\r\n elif placa[-1] == '9' or placa[-1] == '0':\r\n sext += 1\r\n\r\n\r\nsoma = quint + sext\r\nsoma1 = seg + tec + quart + quint + sext\r\nperc = (quart * 100) / soma1\r\nprint()\r\nprint('\\033[36m-=\\033[m'*40)\r\nprint(f'A quantidade de veículos com restrição de saída nas quintas e sextas foi de {soma}.')\r\nprint('\\033[36m-=\\033[m'*40)\r\nprint(f'O percentual de veículos com permissão de saída às quartas foi {perc:.2f}%.')\r\nprint('\\033[36m-=\\033[m'*40)\r\nif seg > tec and seg > quart and seg > quint and seg > sext:\r\n print('O dia da semana com maior quantidade de veículos com restrição de saída foi segunda.')\r\nelif tec > seg and tec > quart and tec > quint and tec > sext:\r\n print('O dia da semana com maior quantidade de veículos com restrição de saída foi terça.')\r\nelif quart > seg and quart > tec and quart > quint and quart > sext:\r\n print('O dia da semana com maior quantidade de veículos com restrição de saída foi quarta.')\r\nelif quint > seg and quint > tec and quint > quart and quint > sext:\r\n print('O dia da semana com maior quantidade de veículos com restrição de saída foi quinta.')\r\nelif sext > seg and sext > tec and sext > quart and sext > quint:\r\n print('O dia da semana com maior quantidade de veículos com restrição de saída foi sexta.')\r\nprint('\\033[36m-=\\033[m'*40)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Exercícios 01/Quest 36.py","file_name":"Quest 36.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"56145069","text":"#!/usr/bin/env python3\n\n'''\n:author: aleksu\n:python: python 3.6.5\n'''\n\nimport time\nfrom enum import Enum\n\nclass TestStatus(Enum):\n\tNOT_RUN \t = 0,\n\tCOMPLETE \t = 1,\n\tNOT_COMPLETE = 2\n\n# TODO: переименовать (проверить, как типы тестов называются)\nclass TestTypes(Enum):\n\tFIRST_TYPE \t= 0,\n\tSECOND_TYPE = 1\n\nclass Commands(Enum):\n\tSWITCH_MODE \t\t= 0,\n\tSET_RESISTANCE\t\t= 1,\n\tPUSH_BLUE_PEDAL \t= 2,\n\tPUSH_YELLOW_PEDAL \t= 3,\n\tGET_CURRENT_VALUE \t= 4,\n\tGET_VOLTAGE_VALUE \t= 5,\n\tSET_REGULAR_POS\t\t= 6 # ПРИМ.: не реализована обработка в ПО стенда\n\n'''\n\tПример именования тестов зависимости мощности от сопротивления:\n\t\tcut_100_to_2000_70\n\tгде:\n\t\tcut - режим резания\n\t\t100 - минимальное сопротивление, (Ом)\n\t\t2000 - максимальное сопротивление, (Ом)\n\t\t70 - рабочая мощность, (Вт)\n\tПример именования тестов зависимости мощности от положения регулятора\n\t\tcut_300\n\tгде:\n\t\tcut - режим резания\n\t\t300 - сопротивление, (Ом)\n'''\nTEST_NAMES = [\n\t\"bi_10_to_2000_35\",\t\t# 0\n\t\"bi_10_to_2000_70\",\t\t# 1\n\t\"cut_100_to_2000_35\",\t# 2\n\t\"cut_100_to_2000_70\",\t# 3\n\t\"mix_100_to_2000_20\",\t# 4\n\t\"mix_100_to_2000_35\",\t# 5\n\t\"mix_100_to_2000_40\",\t# 6\n\t\"mix_100_to_2000_70\",\t# 7\n\t\"mono_100_to_2000_15\",\t# 8\n\t\"mono_100_to_2000_20\",\t# 9\n\t\"mono_100_to_2000_30\",\t# 10\n\t\"mono_100_to_2000_40\",\t# 11\n\t\"spray_100_to_2000_15\",\t# 12\n\t\"spray_100_to_2000_30\",\t# 13\n\t\"bi_300\",\t\t\t\t# 14\n\t\"cut_300\",\t\t\t\t# 15\n\t\"mix_1000\",\t\t\t\t# 16\n\t\"mono_300\",\t\t\t\t# 17\n\t\"spay_1000\"\t\t\t\t# 18\n\t]\n\nTEST_MODES = {\n\t'bi': 'БИ-КОАГ',\n\t'cut':'РЕЗАНИЕ',\n\t'mix': 'СМЕСЬ',\n\t'mono': 'МОНО-КОАГ',\n\t'spray': 'СПРЕЙ'\n}\n\nCOAG_MODES = {\n\t'bi': 'bi_coag',\n\t'cut':'mono_coag',\n\t'mix': 'mono_coag',\n\t'mono': 'mono_coag',\n\t'spray': 'mono_coag'\n}\n\nclass HFEGTest(object):\n\tdef __init__(self, arg):\n\t\tsuper(HFEGTest, self).__init__()\n\t\tself.test_name = arg\n\t\tself.test_desc = ''\n\t\tself.abscissa_name = ''\n\t\tself.ordinate_name = ''\n\t\tself.test_mode = ''\n\t\tself.test_type = None\n\t\tself.test_status = TestStatus.NOT_RUN\n\n\t\tself.__parse_test_name()\n\t\n\tdef __parse_test_name(self):\n\t\tsplited_name = self.test_name.split('_')\n\t\tself.test_mode = COAG_MODES.get(splited_name[0])\n\n\t\tif len(splited_name) > 2:\n\t\t\tself.test_type = TestTypes.FIRST_TYPE\n\t\t\tself.test_desc = '''\n\t\t\tЗависимость выходной мощности от сопротивления\n\t\t\tнагрузки для электрохирургического режима {} в\n\t\t\tдиапазоне от {} до {} Ом при установленной мощности\n\t\t\t{} Вт.\n\t\t\t'''.format(\n\t\t\t\tTEST_MODES.get(splited_name[0]),\n\t\t\t\tsplited_name[1],\n\t\t\t\tsplited_name[3],\n\t\t\t\tsplited_name[4]\n\t\t\t\t)\n\t\t\tself.abscissa_name = 'Сопротивление нагрузки, Ом'\n\t\t\tself.ordinate_name = 'Выходная мощность, Вт'\n\t\telse:\n\t\t\tself.test_type = TestTypes.SECOND_TYPE\n\t\t\tself.test_desc = '''\n\t\t\tЗависимость выходной мощности от положения регулятора\n\t\t\tдля электрохирургического режима {} при сопротивлении\n\t\t\tнагрузки {} Ом.\n\t\t\t'''.format(\n\t\t\t\tTEST_MODES.get(splited_name[0]),\n\t\t\t\tsplited_name[1]\n\t\t\t\t)\n\t\t\tself.abscissa_name = 'Положение регулятора мощности'\n\t\t\tself.ordinate_name = 'Выходная мощность, Вт'\n\n\tdef run_test(self, device):\n\t\tself.test_status = TestStatus.NOT_RUN\n\t\t\n\t\tret = device.send_command(COMMANDS.SWITCH_MODE, self.test_mode)\n\t\t\n\t\tif ret == False:\n\t\t\treturn\n\n\t\tresistance_values \t= []\n\t\toutput_power_values = []\n\t\tself.test_status = TestStatus.NOT_COMPLETE\n\n\t\tif self.test_type == TestStatus.FIRST_TYPE:\n\t\t\tresist = 10\t# формула мощности P = U^2/R, где R > 0\n\t\t\tmax_resist = 2230\n\t\t\tstep = 10\n\n\t\t\twhile resist < max_resist:\n\t\t\t\tret = device.send_command(COMMANDS.SET_RESISTANCE, resist)\n\t\t\t\t\n\t\t\t\tif ret == False:\n\t\t\t\t\treturn\n\n\t\t\t\to_voltage = device.send_command(COMMANDS.GET_VOLTAGE_VALUE)\n\n\t\t\t\tif o_voltage == -1:\n\t\t\t\t\treturn\n\n\t\t\t\toutput_power = (o_voltage**2)/resist\n\n\t\t\t\tresistance_values.append(resist)\n\t\t\t\toutput_power_values.append(output_power)\n\n\t\t\t\tresist += step\n\t\telse:\n\t\t\treg_pos = 0 # regulator position - положение регулятора мощности\n\t\t\tmax_pos = 70\n\t\t\tstep = max_pos//10\n\n\t\t\twhile reg_pos < max_pos:\n\t\t\t\tret = device.send_command(COMMANDS.SET_REGULAR_POS, reg_pos)\n\n\t\t\t\tif ret == False:\n\t\t\t\t\treturn\n\n\t\t\t\to_voltage = device.send_command(COMMANDS.GET_VOLTAGE_VALUE)\n\n\t\t\t\tif o_voltage == -1:\n\t\t\t\t\treturn\n\n\t\t\t\toutput_power = (o_voltage**2)/resist\n\n\t\t\t\tresistance_values.append(resist)\n\t\t\t\toutput_power_values.append(output_power)\n\n\t\t\t\treg_pos += step\n\t\t\n\t\tself.test_status = TestStatus.COMPLETE\n\n\t\treturn resistance_values, output_power_values\n\nTESTS_FOR_HFEG_VER_1 = [\n\tHFEGTest(TEST_NAMES[14]),\n\tHFEGTest(TEST_NAMES[0]),\n\tHFEGTest(TEST_NAMES[1])\n]\n\nTESTS_FOR_HFEG_VER_2 = [\n\tHFEGTest(TEST_NAMES[2]),\n\tHFEGTest(TEST_NAMES[3]),\n\tHFEGTest(TEST_NAMES[4]),\n\tHFEGTest(TEST_NAMES[5]),\n\tHFEGTest(TEST_NAMES[8]),\n\tHFEGTest(TEST_NAMES[10]),\n\tHFEGTest(TEST_NAMES[12]),\n\tHFEGTest(TEST_NAMES[13]),\n\tHFEGTest(TEST_NAMES[0]),\n\tHFEGTest(TEST_NAMES[1])\n]\n\nTESTS_FOR_HFEG_VER_3 = [\n\tHFEGTest(TEST_NAMES[2]),\n\tHFEGTest(TEST_NAMES[3]),\n\tHFEGTest(TEST_NAMES[4]),\n\tHFEGTest(TEST_NAMES[5]),\n\tHFEGTest(TEST_NAMES[8]),\n\tHFEGTest(TEST_NAMES[10]),\n\tHFEGTest(TEST_NAMES[12]),\n\tHFEGTest(TEST_NAMES[13]),\n\tHFEGTest(TEST_NAMES[0]),\n\tHFEGTest(TEST_NAMES[1])\n]\n","sub_path":"app/hfeg_tests.py","file_name":"hfeg_tests.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375217775","text":"# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlength = 10\nlarge_angle = 150\nMIN = length\n#MAX = 1000\n#B = MAX-MIN\n#17\ndef histgram(event,MIN):\n MAX = 1000#int(np.max(event))\n hist = np.zeros(MAX-MIN+1)\n for i in range(MIN,MAX+1):\n hist[i-MIN] = len(np.where(event == i)[0])\n return hist/np.sum(hist)\n\ndef frequency(event,MIN):\n MAX = int(np.max(event))\n hist = np.zeros(MAX-MIN+1)\n for i in range(MIN,MAX+1):\n hist[i-MIN] = len(np.where(event == i)[0])\n return 1/(np.sum(hist/np.sum(hist)*np.arange(MIN,MAX+1))*0.08)\n\n\n\ndir_place='17_center'\nsave_place = '/Users/yamaguchi-s/Desktop/Research/Celegans/real_data/data/'+dir_place+'/result/large_turn/length_'+str(length)+'/event/'\nsave_name = 'largeturn_17_'+str(length)+'_'+str(large_angle)+'.npy'\nEvent_summary_17 = np.load(save_place+save_name)\n\n\n#20\ndir_place='20_center'\nsave_place = '/Users/yamaguchi-s/Desktop/Research/Celegans/real_data/data/'+dir_place+'/result/large_turn/length_'+str(length)+'/event/'\nsave_name = 'largeturn_20_'+str(length)+'_'+str(large_angle)+'.npy'\nEvent_summary_20 = np.load(save_place+save_name)\n#print Event_summary_20\n\n\n#23\ndir_place='23_center'\nsave_place = '/Users/yamaguchi-s/Desktop/Research/Celegans/real_data/data/'+dir_place+'/result/large_turn/length_'+str(length)+'/event/'\nsave_name = 'largeturn_23_'+str(length)+'_'+str(large_angle)+'.npy'\nEvent_summary_23 = np.load(save_place+save_name)\n\n\ndir_place='no_gradient'\nsave_place = '/Users/yamaguchi-s/Desktop/Research/Celegans/real_data/data/'+dir_place+'/result/large_turn/length_'+str(length)+'/event/'\nsave_name = 'largeturn_no_gradient_'+str(length)+'_'+str(large_angle)+'.npy'\nEvent_summary = np.load(save_place+save_name)\n\nplt.xscale(\"log\")\nhist_no = histgram(Event_summary,MIN)\nhist_17 = histgram(Event_summary_17,MIN)\nhist_20 = histgram(Event_summary_20,MIN)\nhist_23 = histgram(Event_summary_23,MIN)\nplt.plot(np.arange(MIN,len(hist_17)+MIN),hist_17,label=\"17_C\")\nplt.plot(np.arange(MIN,len(hist_20)+MIN),hist_20,label=\"20_C\")\nplt.plot(np.arange(MIN,len(hist_23)+MIN),hist_23,label=\"23_C\")\nplt.plot(np.arange(MIN,len(hist_no)+MIN),hist_no,label=\"no_gradient\")\nplt.legend(loc='upper right')\nplt.savefig('ALL_'+str(length)+'_'+str(large_angle)+'_xlog.png')\nplt.show()\n\"\"\"\n\nfreq_17 = frequency(Event_summary_17,MIN)\nfreq_20 = frequency(Event_summary_20,MIN)\nfreq_23 = frequency(Event_summary_23,MIN)\nfreq_no = frequency(Event_summary,MIN)\nprint '==========================='\nprint 'large_angle:',large_angle\nprint 'length:',length\nprint '---------------------------'\nprint '17_center:',freq_17\nprint '20_center:',freq_20\nprint '23_center:',freq_23\nprint 'no_gradient:',freq_no\nprint '==========================='\n\"\"\"\n","sub_path":"source/real/data_analyze/visualization/visualize_largeturn_log.py","file_name":"visualize_largeturn_log.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273607685","text":"#!/usr/bin/python\n\n'''@svm_csv_converter.py\n\nThis file restructures only the supplied dataset(s), from a csv file to a\npython dictionary format.\n\n'''\n\nimport csv\nfrom itertools import islice\nfrom brain.validator.validate_dataset import Validate_Dataset\nfrom log.logger import Logger\n\n\ndef svm_csv_converter(raw_data):\n '''@svm_csv_converter\n\n This method converts the supplied csv file-object, intended for an svm\n model, to a python dictionary.\n\n @raw_data, generally a file (or json string) containing the raw dataset(s),\n to be used when computing a corresponding model. If this argument is a\n file, it needs to be closed.\n\n @list_observation_label, is a list containing dependent variable labels.\n\n Note: we use the 'Universal Newline Support' with the 'U' parameter when\n opening 'raw_data'. This allows newlines to be understood regardless,\n if the newline character was created in osx, windows, or linux.\n\n Note: since 'row' is a list, with one comma-delimited string element, the\n following line is required in this method:\n\n row = row[0].split(',')\n\n '''\n\n feature_count = None\n list_dataset = []\n list_observation_label = []\n list_feature_label = []\n logger = Logger(__name__, 'error', 'error')\n\n # open temporary 'csvfile' reader object\n dataset_reader = csv.reader(\n raw_data,\n delimiter=' ',\n quotechar='|'\n )\n\n # iterate first row of csvfile\n for row in islice(dataset_reader, 0, 1):\n\n # iterate each column in a given row\n row_indep_label = row[0].split(',')\n for value in islice(row_indep_label, 1, None):\n list_feature_label.append(str(value))\n\n # iterate all rows of csvfile\n for dep_index, row in enumerate(islice(dataset_reader, 0, None)):\n\n # iterate first column of each row (except first)\n row_dep_label = row[0].split(',')\n for value in row_dep_label[:1]:\n list_observation_label.append(str(value))\n\n # generalized feature count in an observation\n row_indep_variable = row[0].split(',')\n if not feature_count:\n feature_count = len(row_indep_variable) - 1\n\n # iterate each column in a given row\n for indep_index, value in enumerate(\n islice(row_indep_variable, 1, None)\n ):\n\n try:\n validate = Validate_Dataset(value)\n validate.validate_value()\n\n list_error = validate.get_errors()\n if list_error:\n logger.log(list_error)\n return None\n else:\n value = float(value)\n except Exception as error:\n logger.log(error)\n return False\n\n list_dataset.append({\n 'dep_variable_label': list_observation_label[dep_index],\n 'indep_variable_label': list_feature_label[indep_index],\n 'indep_variable_value': value\n })\n\n # close file, save observation labels, and return\n raw_data.close()\n return {\n 'dataset': list_dataset,\n 'observation_labels': list_observation_label,\n 'feature_count': feature_count\n }\n","sub_path":"brain/converter/dataset/svm_csv_converter.py","file_name":"svm_csv_converter.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557960972","text":"from datetime import datetime\nimport time\nimport pytz\nimport dateutil.parser\n\n\ndef datetime_with_utc_tz(dt=None):\n \"\"\"\n Convert datetime object to a UTC datetime object.\n :param dt:\n :return:\n \"\"\"\n dt = dt or datetime.utcnow()\n if not isinstance(dt, datetime):\n return dt\n elif dt.tzinfo:\n dt = dt.astimezone(pytz.utc)\n else:\n dt.replace(tzinfo=pytz.utc)\n return dt\n\n\ndef iso_utcz_strftime(dt):\n \"\"\"\n Get iso formatted utc date time string formatted with 'Z' notation.\n :param dt:\n :return:\n \"\"\"\n if not isinstance(dt, datetime):\n raise ValueError(\"Datetime object expected.\")\n dt = datetime_with_utc_tz(dt)\n return datetime.strftime(dt, \"%Y-%m-%dT%H:%M:%SZ\")\n\n\ndef iso_strptime(dt_string):\n \"\"\"\n Parse an iso8601 date string.\n :param dt_string:\n :return:\n \"\"\"\n dt = dateutil.parser.parse(dt_string)\n if not dt.tzinfo:\n dt.replace(tzinfo=pytz.utc)\n return dt\n\n\ndef to_unix_ts(a_datetime=None):\n \"\"\" Get a unix timestamp for datetime instance. \"\"\"\n if not a_datetime:\n a_datetime = datetime.utcnow()\n return time.mktime(a_datetime.timetuple())\n\n\ndef datetime_from_epoch(timestamp):\n \"\"\"\n Get datetime from epoch.\n :param int timestamp: seconds or milliseconds since epoch\n :returns: datetime\n :raises: ValueError, TypeError\n \"\"\"\n if timestamp > 9999999999:\n timestamp = timestamp / 1000\n return datetime.utcfromtimestamp(timestamp)\n","sub_path":"external/timeutils.py","file_name":"timeutils.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"223628367","text":"import os\nimport requests\nimport shutil\nimport tarfile\n\nfilepath = '../vendor/'\n\nfilename = 'oidn-1.4.1.x86_64.linux'\nurl = 'https://github.com/OpenImageDenoise/oidn/releases/download/v1.4.1/' + filename + '.tar.gz'\nr = requests.get(url, allow_redirects=True)\n\nopen('oidn-linux.tar.gz', 'wb').write(r.content)\n\ntar = tarfile.open('oidn-linux.tar.gz', \"r:gz\")\ntar.extractall()\ntar.close()\n\nos.rename(filename, \"oidn-linux\")\nos.remove(\"oidn-linux.tar.gz\")\n\nshutil.move(\"oidn-linux\", filepath + \"oidn-linux\")","sub_path":"scripts/DownloadLinuxOIDN.py","file_name":"DownloadLinuxOIDN.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393017294","text":"import disco\n\ndef estimate_map(e, params):\n\tx=map(float,e[1].split(' '))\n\ty=x[params.y_id]\n\tdel x[params.y_id]\n\n\treturn [(e[0],(x,y))]\n\n\ndef estimate_combiner(k, v, vals, done, params):\n\tif vals=={}: \n\t\tvals['x']=[0.0]*len(v[0])\n\t\tvals['x2']=[0.0]*len(v[0])\n\t\tvals['xy']=[0.0]*len(v[0])\n\t\tvals['y']=0.0\n\t\tvals['c']=0\n\n\tif done:\n\t\treturn [(k, ' '. join(map(repr,vals['x'] + vals['x2'] + vals['xy'] + [ vals['y'], vals['c'] ])))]\n\n\tfor i in range(len(v)):\n\t\tvals['x'][i]+=v[0][i]\n\t\tvals['x2'][i]+=v[0][i]*v[0][i]\n\t\tvals['xy'][i]+=v[0][i]*v[1]\n\tvals['y']+=v[1]\n\tvals['c']+=1\n\n\n\ndef predict_map(e, params):\n\tx=map(float,e[1].split(' '))\n\treturn [(e[0],' '.join(map(repr,[params[i][0]+params[i][1]*x[i] for i in range(len(params))])))]\n\n\ndef estimate(input, y_id, host=\"disco://localhost\", map_reader=disco.chain_reader):\n\tresults = disco.job(host, name = 'naive_linear_regression_estimate',\n\t\t\t input_files = input, \n\t\t\t map_reader = map_reader, \n\t\t\t fun_map = estimate_map,\n\t\t\t combiner = estimate_combiner, \n\t\t\t params=disco.Params(y_id=y_id),\n\t\t\t sort = False, clean = False)\n\n\tc=0\n\ty=0.0\n\tl=None\n\tx=None\n\tx2=None\n\txy=None\n\n\tfor key,value in disco.result_iterator(results):\n\t\tv=map(float,value.split(' '))\n\t\t\n\t\tif l==None:\n\t\t\tl=(len(v)-2)/3\n\t\t\tx=[0.0]*l\n\t\t\tx2=[0.0]*l\n\t\t\txy=[0.0]*l\n\n\t\tc+=v[-1]\n\t\ty+=v[-2]\n\t\tfor i in range(l):\n\t\t\tx[i]+=v[i]\n\t\t\tx2[i]+=v[l+i]\n\t\t\txy[i]+=v[2*l+i]\n\n\tb = [ (c*xy[i] - x[i]*y)/(c*x2[i]+x[i]*x[i]) for i in range(l) ]\n\ta = [ (y-b[i]*x[i])/c for i in range(l) ]\n\n\treturn zip(*(a,b))\n\n\ndef predict(input, model, host=\"disco://localhost\", map_reader=disco.chain_reader):\n\tresults = disco.job(host, name = 'naive_linear_regression_predict',\n\t\t\t input_files = input, \n\t\t\t map_reader = map_reader, \n\t\t\t fun_map = predict_map,\n\t\t\t params=model,\n\t\t\t sort = False, clean = False)\n\n\treturn results\n","sub_path":"examples/datamining/naive_linreg.py","file_name":"naive_linreg.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142673335","text":"# -*- coding:utf-8 -*-\n\nimport struct,os,fnmatch,re\n\nLOG = open('ErrorLog.bin', 'wb')\n\n#遍历文件夹,返回文件列表\ndef walk(adr):\n mylist=[]\n for root,dirs,files in os.walk(adr):\n for name in files:\n adrlist=os.path.join(root, name)\n mylist.append(adrlist)\n return mylist\n\n#将4字节byte转换成整数\ndef byte2int(byte):\n long_tuple=struct.unpack('L',byte)\n long = long_tuple[0]\n return long\n\n#将整数转换为4字节二进制byte\ndef int2byte(num):\n return struct.pack('L',num)\n\ndef FormatString(name1, name2, string, count):\n #格式说明:\n #★字符串行数★字符串\n '''\n res = \"★%08d★\\n%s\\n\"%(count, string+'\\n')\n \n res = \"☆%08d☆\\n%s★%08d★\\n%s\\n\"%(count, string+'\\n', count, string+'\\n')\n '''\n '''\n res = \"○%08d○%s○\\n%s●%08d●%s●\\n%s\\n\"%(count, name, string+'\\n', count, name, string+'\\n')\n '''\n res = \"●%08d●%s○%s○\\n%s\\n\"%(count, name1, name2, string+'\\n')\n \n return res\n\n\n#搜索二进制文件,获取name和text首地址\ndef GetOffset(src):\n size = len(src.read())\n src.seek(0x10)\n indexoffset = byte2int(src.read(4))\n count = (size - indexoffset) // 4\n src.seek(indexoffset)\n buff = src.read()\n offset_list = []\n off = 0\n j = 0\n for i in range(0, count):\n if buff[j:j+4] == b'\\x69\\x00\\x00\\x00': \n offset_list.append(indexoffset+j+4)\n j += 4\n return offset_list\n\n#传入地址,返回字符串、长度、首地址、末地址\ndef DumpString(src, offset):\n if offset == 0:return ''\n src.seek(offset)\n #过滤非文本\n temp = src.read(1)\n if temp == b'\\x00':\n return ''\n else:\n temp = src.read(1)\n if temp == b'\\x00':\n return ''\n\n src.seek(offset) \n string = ''\n bytestream = b''\n while True:\n byte = src.read(1)\n if byte == b'\\x00':\n try:\n string += str(bytestream, encoding='sjis', errors='ignore')\n except:\n global LOG\n LOG.write(bytestream+b'\\n')\n string = \"Error:%08x\\n\"%(offset)\n bytestream = b''\n break \n else:\n bytestream += byte\n return string\n\nnum = 0\n#将地址列表转换为文本列表\ndef Convert(src, offset_list):\n str_list = []\n for off in offset_list:\n src.seek(off)\n sname_off = byte2int(src.read(4))\n rname_off = byte2int(src.read(4))\n src.seek(4,1)\n textoff = byte2int(src.read(4))\n \n show_name = DumpString(src, sname_off)\n real_name = DumpString(src, rname_off)\n text = DumpString(src, textoff)\n global num\n num += len(text)\n str_list.append([show_name, real_name, text])\n return str_list\n\ndef Main():\n fl = walk('bin')\n for fn in fl:\n dstname ='script' + fn[3:-4] + '.txt'\n dst = open(dstname, 'w', encoding='utf16')\n src = open(fn, 'rb')\n\n offset_list = GetOffset(src)\n str_list = Convert(src, offset_list)\n j = 0\n for [sname, rname, text] in str_list:\n res = FormatString(sname, rname, text, j)\n dst.write(res)\n j += 1\n print(os.path.basename(fn) + '-->' + os.path.basename(dstname))\n src.close()\n dst.close()\n \n fl = walk('script')\n for fn in fl:\n if os.path.getsize(fn) < 1:\n os.remove(fn)\n\n'''\nsrc = open('00_common_01_02.bin', 'rb')\noffset_list = GetOffset(src)\nprint(offset_list)\nstr_list = Convert(src, offset_list)\n\n'''\n\nMain()\nLOG.close()\nif os.path.getsize('ErrorLog.bin') < 1:\n os.remove('ErrorLog.bin')\n\nprint(num)\nprint(num*2/(1024*1024))\ninput('\\n导出完成\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Tactics/Tactics_text_out.py","file_name":"Tactics_text_out.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491184807","text":"from math import cos, sin, radians\n\nimport pyglet\n\nfrom pyglet.gl import GL_TRIANGLES\n\n\ndef _calc_angle(rotation):\n r = -radians(rotation)\n return round(cos(r), 3), round(sin(r), 3)\n\n\ndef _make_corner(x, y, start_deg, end_deg, border_width, color1, color2):\n b = border_width\n c1 = color1\n c2 = color2\n points = []\n\n for i in range(start_deg, end_deg+1, 15):\n points.extend(_calc_angle(i))\n\n vertices = []\n colors = []\n last = None\n\n for point in zip(points[0::2], points[1::2]):\n if not last:\n last = point\n continue\n triangle = x, y, x+last[0]*b, y+last[1]*b, x+point[0]*b, y+point[1]*b\n color = c1 + c2 + c2\n vertices.extend(triangle)\n colors.extend(color)\n last = point\n\n return vertices, colors\n\n\ndef _create_left_right(x, y, width, height, color1, color2):\n verts = [x, y, x + width, y, x + width, y + height, x + width, y + height, x, y + height, x, y]\n colors = list(color1 + color2 + color2 + color2 + color1 + color1)\n return verts, colors\n\n\ndef _create_top_bottom(x, y, width, height, color1, color2):\n verts = [x, y, x + width, y, x + width, y + height, x + width, y + height, x, y + height, x, y]\n colors = list(color1 + color1 + color2 + color2 + color2 + color1)\n return verts, colors\n\n\ndef calculate_frame(x, y, width, height, border=2, menusize=10, color1=(25, 25, 25), color2=(50, 50, 50)):\n b = border\n m = menusize\n\n tlcv, tlcc = _make_corner(x+b, y+height-b, 180, 270, border, color1, color2)\n trcv, trcc = _make_corner(x+width-b, y+height-b, 270, 360, border, color1, color2)\n brcv, brcc = _make_corner(x+width-b, y+b, 0, 90, border, color1, color2)\n blcv, blcc = _make_corner(x+b, y+b, 90, 180, border, color1, color2)\n\n tb, tc = _create_top_bottom(x + b, y + height - m, width - b - b, m, color1, color2)\n lb, lc = _create_left_right(x, y + b, b, height - b - b, color2, color1)\n rb, rc = _create_left_right(x + width - b, y + b, b, height - b - b, color1, color2)\n bb, bc = _create_top_bottom(x + b, y, width - b - b, b, color2, color1)\n\n vertices = brcv + blcv + tlcv + trcv + lb + bb + rb + tb\n colors = brcc + blcc + tlcc + trcc + lc + bc + rc + tc\n\n return vertices, colors\n\n\nclass Frame:\n def __init__(self, x, y, width, height, window, batch=None, group=None):\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self._window = window\n self._border = 3\n self._menusize = 25\n self._color1 = 25, 25, 25\n self._color2 = 50, 50, 50\n\n self._batch = batch or pyglet.graphics.Batch()\n self._group = group or pyglet.graphics.Group()\n self._title = None # pyglet.text.Label\n\n self._widgets = []\n\n self.in_update = False\n\n verts, colors = calculate_frame(x=x, y=y, width=width, height=height, border=self._border,\n menusize=self._menusize, color1=self._color1, color2=self._color2)\n\n self.vlist = self._batch.add(len(verts)//2, GL_TRIANGLES, None, ('v2f', verts), ('c3b', colors))\n\n self._window.push_handlers(self)\n\n def add_widget(self, widget):\n self._widgets.append(widget)\n\n def check_hit(self, x, y):\n return (self._x < x < self._x + self._width and\n self._y + self._height - self._menusize - self._border < y < self._y + self._height)\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n if self.check_hit(x, y):\n self.in_update = True\n\n def on_mouse_release(self, x, y, buttons, modifiers):\n self.in_update = False\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n if not self.in_update:\n return\n\n vertices = self.vlist.vertices[:]\n vertices[0::2] = [x + dx for x in vertices[0::2]]\n vertices[1::2] = [y + dy for y in vertices[1::2]]\n self.vlist.vertices[:] = vertices\n self._x += dx\n self._y += dy\n","sub_path":"bordermaker.py","file_name":"bordermaker.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649480429","text":"\n''' Function tu control with the keybord,we are not going to use,but \nto prove that all works is great\nYou have to take the init in the functio if you use this function ''' \n# sudo apt-get install python3-tkinter\nimport RPi.GPIO as gpio \nimport time\nimport sys\nimport random \nimport Tkinter as tk\n\n\n# We initialize the pins \n\ndef init():\n\tgpio.setmode(gpio.BOARD)\n\tgpio.setup(7, gpio.OUT)\n\tgpio.setup(11, gpio.OUT)\n\tgpio.setup(13, gpio.OUT)\n\tgpio.setup(15, gpio.OUT)\n\n\n \n# With this function we select de pins to make the robot go forwar\n# tf is the variable for time in secods \ndef forward(tf):\n\tinit()\n\tgpio.output(7 , False)\n\tgpio.output(11 , True)\n\tgpio.output(13 , True)\n\tgpio.output(15, False)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\n# With this function we select de pins to make the robot go reverse\ndef reverse(tf):\n\tinit()\n\tgpio.output(7 , True)\n\tgpio.output(11, False)\n\tgpio.output(13, False)\n\tgpio.output(15, True)\n\ttime.sleep(tf)\n\tgpio.cleanup() # Terminamos la funcion\n\n# With this function we select de pins to make the robot turn left\ndef turn_left(tf):\n\tinit()\n\tgpio.output(7 , True)\n\tgpio.output(11 , True)\n\tgpio.output(13 , True)\n\tgpio.output(15 , False)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\n# With this function we select de pins to make the robot turn right\ndef turn_right(tf):\n\tinit()\n\tgpio.output(7 , False)\n\tgpio.output(11, True)\n\tgpio.output(13, False)\n\tgpio.output(15, False)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\ndef pivot_left(tf):\n\tinit()\n\tgpio.output(7 , True)\n\tgpio.output(11, False)\n\tgpio.output(13, True)\n\tgpio.output(15,False)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\ndef pivot_right(tf):\n\tinit()\n\tgpio.output(7 , False)\n\tgpio.output(11, True)\n\tgpio.output(13, False)\n\tgpio.output(15, True)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\ndef stop():\n\tinit()\n\tgpio.output(7 , False)\n\tgpio.output(11, False)\n\tgpio.output(13, False)\n\tgpio.output(15, False)\n\ttime.sleep(tf)\n\tgpio.cleanup()\n\t\n\n\n# forward(1) it would go forward 1 secod\n# reverse(1) it would go reverse 1 secod\n# turn_left(1) it would go left 1 secod\n# turn_right(1) it would go right 1 secod\n# pivot_left(1) it would go pivot left 1 secod\n# pivot_right(1) it would go pivot right 1 secod\n\n\n\n\n# Distance sensor https://www.youtube.com/watch?v=HutxiWnX26w\n\n## Se supone que esto deberia estar funcionando esta tarde pero aun no lo se seguro\n\ndef distance(measure='cm'):\n try:\n gpio.setmode(gpio.BOARD)\n gpio.setup(12, gpio.OUT)\n gpio.setup(16, gpio.IN)\n \n gpio.output(12, False)\n while gpio.input(16) == 0:\n nosig = time.time()\n\n while gpio.input(16) == 1:\n sig = time.time()\n\n tl = sig - nosig\n\n if measure == 'cm':\n distance = tl / 0.000058\n elif measure == 'in':\n distance = tl / 0.000148\n else:\n print('improper choice of measurement: in or cm')\n distance = None\n\n gpio.cleanup()\n return distance\n except:\n distance = 100\n gpio.cleanup()\n return distance\n\n\t\t\nif __name__ == \"__main__\":\n print(distance('cm'))\n\n\n\n\n\ndef check_front():\n\tinit()\n\tdist = distance()\n\t# this function try to no crash\n\tif dist < 15:\n\t\tprint('Demasiado cerca ,' , dist)\n\t\tinit()\n\t\treverse(2)\n\t\tturn_left(0.5)\n\t\tforward(2)\n\t\tturn_right(1)\n\n\n\t\tdist = distance()\n\t\tif dist < 15:\n\t\t\tprint('Demasiado cerca ,' ,dist)\n\t\t\tinit()\n\t\t\treverse(2)\n\t\t\tturn_right(0.5)\n\t\t\tforward(2)\n\t\t\tturn_right(1)\n\t\t\tdist= distance()\n\t\t\tif dist < 15 :\n\t\t\t\tprint('Demasiado cerca ,', dist)\n\t\t\t\tinit()\n\t\t\t\treverse(2)\n\t\t\t\tpivot_left(2)\n\t\t\t\tforward(2)\n\t\t\t\t\n\t\t\t\tif dist < 15 :\n\t\t\t\t\t print ('Me he quedado atascado ,', dist)\n\t\t\t\t\t sys.exit()\n\n\t\t\n\n\ndef key_input(event):\n\t\n\t\n\tprint ('Key: ' , event.char)\n\tkey_press = event.char \n\tsleep_time = 0.030 \n\t# Commands in the keyboar per function \n\tif key_press.lower() == 'w':\n\t\tforward(sleep_time)\n\telif key_press.lower() == 's':\n\t\treverse(sleep_time)\n\telif key_press.lower() == 'a':\n\t\tturn_left(sleep_time)\n\telif key_press.lower() == 'd':\n\t\tturn_right(sleep_time)\n\telif key_press.lower() == 'q':\n\t\tpivot_left(sleep_time)\n\telif key_press.lower() == 'e':\n\t\tpivot_right(sleep_time)\n\telse:\n\t\tpass\n\ncommand = tk.Tk()\ncommand.bind(' tgt_order - slack\n assert eoc_rec_grad_x.order_estimate() > tgt_order_grad - grad_slack\n\n\n@pytest.mark.parametrize(\"knl, local_expn_class, mpole_expn_class\", [\n (LaplaceKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion),\n (LaplaceKernel(2), LaplaceConformingVolumeTaylorLocalExpansion,\n LaplaceConformingVolumeTaylorMultipoleExpansion),\n (HelmholtzKernel(2), VolumeTaylorLocalExpansion, VolumeTaylorMultipoleExpansion),\n (HelmholtzKernel(2), HelmholtzConformingVolumeTaylorLocalExpansion,\n HelmholtzConformingVolumeTaylorMultipoleExpansion),\n (HelmholtzKernel(2), H2DLocalExpansion, H2DMultipoleExpansion)\n ])\ndef test_translations(ctx_getter, knl, local_expn_class, mpole_expn_class):\n logging.basicConfig(level=logging.INFO)\n\n from sympy.core.cache import clear_cache\n clear_cache()\n\n ctx = ctx_getter()\n queue = cl.CommandQueue(ctx)\n\n np.random.seed(17)\n\n res = 20\n nsources = 15\n\n out_kernels = [knl]\n\n extra_kwargs = {}\n if isinstance(knl, HelmholtzKernel):\n extra_kwargs[\"k\"] = 0.05\n\n # Just to make sure things also work away from the origin\n origin = np.array([2, 1], np.float64)\n sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64))\n + origin[:, np.newaxis])\n strengths = np.ones(nsources, dtype=np.float64) * (1/nsources)\n\n pconv_verifier_p2m2p = PConvergenceVerifier()\n pconv_verifier_p2m2m2p = PConvergenceVerifier()\n pconv_verifier_p2m2m2l2p = PConvergenceVerifier()\n pconv_verifier_full = PConvergenceVerifier()\n\n from sumpy.visualization import FieldPlotter\n\n eval_offset = np.array([5.5, 0.0])\n\n centers = (np.array(\n [\n # box 0: particles, first mpole here\n [0, 0],\n\n # box 1: second mpole here\n np.array([-0.2, 0.1], np.float64),\n\n # box 2: first local here\n eval_offset + np.array([0.3, -0.2], np.float64),\n\n # box 3: second local and eval here\n eval_offset\n ],\n dtype=np.float64) + origin).T.copy()\n\n del eval_offset\n\n from sumpy.expansion import VolumeTaylorExpansionBase\n\n if isinstance(knl, HelmholtzKernel) and \\\n issubclass(local_expn_class, VolumeTaylorExpansionBase):\n # FIXME: Embarrassing--but we run out of memory for higher orders.\n orders = [2, 3]\n else:\n orders = [2, 3, 4]\n nboxes = centers.shape[-1]\n\n def eval_at(e2p, source_box_nr):\n e2p_target_boxes = np.array([source_box_nr], dtype=np.int32)\n\n # These are indexed by global box numbers.\n e2p_box_target_starts = np.array([0, 0, 0, 0], dtype=np.int32)\n e2p_box_target_counts_nonchild = np.array([0, 0, 0, 0],\n dtype=np.int32)\n e2p_box_target_counts_nonchild[source_box_nr] = ntargets\n\n evt, (pot,) = e2p(\n queue,\n\n src_expansions=mpoles,\n src_base_ibox=0,\n\n target_boxes=e2p_target_boxes,\n box_target_starts=e2p_box_target_starts,\n box_target_counts_nonchild=e2p_box_target_counts_nonchild,\n centers=centers,\n targets=targets,\n out_host=True, **extra_kwargs\n )\n\n return pot\n\n for order in orders:\n m_expn = mpole_expn_class(knl, order=order)\n l_expn = local_expn_class(knl, order=order)\n\n from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P, E2EFromCSR\n p2m = P2EFromSingleBox(ctx, m_expn)\n m2m = E2EFromCSR(ctx, m_expn, m_expn)\n m2p = E2PFromSingleBox(ctx, m_expn, out_kernels)\n m2l = E2EFromCSR(ctx, m_expn, l_expn)\n l2l = E2EFromCSR(ctx, l_expn, l_expn)\n l2p = E2PFromSingleBox(ctx, l_expn, out_kernels)\n p2p = P2P(ctx, out_kernels, exclude_self=False)\n\n fp = FieldPlotter(centers[:, -1], extent=0.3, npoints=res)\n targets = fp.points\n\n # {{{ compute (direct) reference solution\n\n evt, (pot_direct,) = p2p(\n queue,\n targets, sources, (strengths,),\n out_host=True, **extra_kwargs)\n\n # }}}\n\n # {{{ apply P2M\n\n p2m_source_boxes = np.array([0], dtype=np.int32)\n\n # These are indexed by global box numbers.\n p2m_box_source_starts = np.array([0, 0, 0, 0], dtype=np.int32)\n p2m_box_source_counts_nonchild = np.array([nsources, 0, 0, 0],\n dtype=np.int32)\n\n evt, (mpoles,) = p2m(queue,\n source_boxes=p2m_source_boxes,\n box_source_starts=p2m_box_source_starts,\n box_source_counts_nonchild=p2m_box_source_counts_nonchild,\n centers=centers,\n sources=sources,\n strengths=strengths,\n nboxes=nboxes,\n\n tgt_base_ibox=0,\n\n #flags=\"print_hl_wrapper\",\n out_host=True, **extra_kwargs)\n\n # }}}\n\n ntargets = targets.shape[-1]\n\n pot = eval_at(m2p, 0)\n\n err = la.norm((pot - pot_direct)/res**2)\n err = err / (la.norm(pot_direct) / res**2)\n\n pconv_verifier_p2m2p.add_data_point(order, err)\n\n # {{{ apply M2M\n\n m2m_target_boxes = np.array([1], dtype=np.int32)\n m2m_src_box_starts = np.array([0, 1], dtype=np.int32)\n m2m_src_box_lists = np.array([0], dtype=np.int32)\n\n evt, (mpoles,) = m2m(queue,\n src_expansions=mpoles,\n src_base_ibox=0,\n tgt_base_ibox=0,\n ntgt_level_boxes=mpoles.shape[0],\n\n target_boxes=m2m_target_boxes,\n\n src_box_starts=m2m_src_box_starts,\n src_box_lists=m2m_src_box_lists,\n centers=centers,\n #flags=\"print_hl_cl\",\n out_host=True, **extra_kwargs)\n\n # }}}\n\n pot = eval_at(m2p, 1)\n\n err = la.norm((pot - pot_direct)/res**2)\n err = err / (la.norm(pot_direct) / res**2)\n\n pconv_verifier_p2m2m2p.add_data_point(order, err)\n\n # {{{ apply M2L\n\n m2l_target_boxes = np.array([2], dtype=np.int32)\n m2l_src_box_starts = np.array([0, 1], dtype=np.int32)\n m2l_src_box_lists = np.array([1], dtype=np.int32)\n\n evt, (mpoles,) = m2l(queue,\n src_expansions=mpoles,\n src_base_ibox=0,\n tgt_base_ibox=0,\n ntgt_level_boxes=mpoles.shape[0],\n\n target_boxes=m2l_target_boxes,\n src_box_starts=m2l_src_box_starts,\n src_box_lists=m2l_src_box_lists,\n centers=centers,\n #flags=\"print_hl_cl\",\n out_host=True, **extra_kwargs)\n\n # }}}\n\n pot = eval_at(l2p, 2)\n\n err = la.norm((pot - pot_direct)/res**2)\n err = err / (la.norm(pot_direct) / res**2)\n\n pconv_verifier_p2m2m2l2p.add_data_point(order, err)\n\n # {{{ apply L2L\n\n l2l_target_boxes = np.array([3], dtype=np.int32)\n l2l_src_box_starts = np.array([0, 1], dtype=np.int32)\n l2l_src_box_lists = np.array([2], dtype=np.int32)\n\n evt, (mpoles,) = l2l(queue,\n src_expansions=mpoles,\n src_base_ibox=0,\n tgt_base_ibox=0,\n ntgt_level_boxes=mpoles.shape[0],\n\n target_boxes=l2l_target_boxes,\n src_box_starts=l2l_src_box_starts,\n src_box_lists=l2l_src_box_lists,\n centers=centers,\n #flags=\"print_hl_wrapper\",\n out_host=True, **extra_kwargs)\n\n # }}}\n\n pot = eval_at(l2p, 3)\n\n err = la.norm((pot - pot_direct)/res**2)\n err = err / (la.norm(pot_direct) / res**2)\n\n pconv_verifier_full.add_data_point(order, err)\n\n for name, verifier in [\n (\"p2m2p\", pconv_verifier_p2m2p),\n (\"p2m2m2p\", pconv_verifier_p2m2m2p),\n (\"p2m2m2l2p\", pconv_verifier_p2m2m2l2p),\n (\"full\", pconv_verifier_full),\n ]:\n print(30*\"-\")\n print(name)\n print(30*\"-\")\n print(verifier)\n print(30*\"-\")\n verifier()\n\n\n# You can test individual routines by typing\n# $ python test_kernels.py 'test_p2p(cl.create_some_context)'\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from py.test.cmdline import main\n main([__file__])\n\n# vim: fdm=marker\n","sub_path":"test/test_kernels.py","file_name":"test_kernels.py","file_ext":"py","file_size_in_byte":18956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"76743054","text":"import glob\nimport importlib.util as iutil\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport webbrowser\nfrom functools import partial\nfrom typing import Dict\nfrom warnings import warn\n\nfrom aequilibrae.project import Project\nfrom aequilibrae.project.database_connection import ENVIRON_VAR\n\nimport qgis\nfrom qgis.PyQt import QtCore\nfrom qgis.PyQt.QtCore import Qt\nfrom qgis.PyQt.QtWidgets import QVBoxLayout, QApplication\nfrom qgis.PyQt.QtWidgets import QWidget, QDockWidget, QAction, QMenu, QTabWidget, QCheckBox, QToolBar, QToolButton\nfrom qgis.core import QgsDataSourceUri, QgsVectorLayer\nfrom qgis.core import QgsProject\nfrom .binary_downloader_class import BinaryDownloaderDialog\nfrom .common_tools import AboutDialog\nfrom .download_extra_packages_class import DownloadExtraPackages\nfrom .matrix_procedures import LoadDatasetDialog\nfrom .menu_actions import run_add_zones, display_aequilibrae_formats, run_show_project_data, load_matrices, show_log\nfrom .menu_actions import run_desire_lines, run_scenario_comparison, run_lcd, run_tag\nfrom .menu_actions import run_distribution_models, run_tsp, run_change_parameters, run_stacked_bandwidths\nfrom .menu_actions import run_load_project, project_from_osm, run_create_transponet, prepare_network, run_add_connectors\nfrom .paths_procedures import run_shortest_path, run_dist_matrix, run_traffic_assig\nfrom .public_transport_procedures import GtfsImportDialog\n\nno_binary = False\ntry:\n from aequilibrae.paths.AoN import one_to_all\nexcept ImportError as e:\n no_binary = True\n warn(f'AequilibraE binaries are not available {e.args}')\n\nif not no_binary:\n pass\n\nextra_packages = True\n# Checks if we can display OMX\nspec = iutil.find_spec(\"openmatrix\")\nhas_omx = spec is not None\nif not has_omx:\n extra_packages = False\n\nspec = iutil.find_spec(\"openmatrix\")\nhas_ortools = spec is not None\n\nif hasattr(Qt, 'AA_EnableHighDpiScaling'):\n QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)\n\nif hasattr(Qt, 'AA_UseHighDpiPixmaps'):\n QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)\n\n\nclass AequilibraEMenu:\n\n def __init__(self, iface):\n # Closes AequilibraE projects eventually opened in memory\n self.logger = logging.getLogger('AequilibraEGUI')\n if ENVIRON_VAR in os.environ:\n del os.environ[ENVIRON_VAR]\n self.geo_layers_list = ['links', 'nodes', 'zones']\n self.translator = None\n self.iface = iface\n self.project = None # type: Project\n self.matrices = {}\n self.layers = {} # type: Dict[QgsVectorLayer]\n self.dock = QDockWidget(self.trlt('AequilibraE'))\n self.manager = QWidget()\n self.no_binary = no_binary\n\n # The self.toolbar will hold everything\n self.toolbar = QToolBar()\n self.set_font(self.toolbar)\n self.toolbar.setOrientation(2)\n\n self.menuActions = {'Project': [],\n 'Network Manipulation': [],\n 'Data': [],\n 'Trip Distribution': [],\n 'Paths and assignment': [],\n 'Routing': [],\n # 'Public Transport': [],\n 'GIS': [],\n 'Utils': [],\n 'AequilibraE': []}\n\n # # ####################### PROJECT SUB-MENU ############################\n self.add_menu_action('Project', 'Open Project', partial(run_load_project, self))\n self.add_menu_action('Project', 'Create project from OSM', partial(project_from_osm, self))\n self.add_menu_action('Project', 'Create Project from layers', partial(run_create_transponet, self))\n self.add_menu_action('Project', 'Add zoning data', partial(run_add_zones, self))\n self.add_menu_action('Project', 'Parameters', partial(run_change_parameters, self))\n self.add_menu_action('Project', 'logfile', partial(show_log, self))\n self.add_menu_action('Project', 'Close project', self.run_close_project)\n\n # # # ########################################################################\n # # # ################# NETWORK MANIPULATION SUB-MENU #######################\n\n self.add_menu_action('Network Manipulation', 'Network Preparation', partial(prepare_network, self))\n self.add_menu_action('Network Manipulation', 'Add centroid connectors', partial(run_add_connectors, self))\n\n # # # ########################################################################\n # # # #################### DATA UTILITIES SUB-MENU #########################\n self.add_menu_action('Data', 'Display project data', partial(run_show_project_data, self))\n\n # # # # ########################################################################\n # # # # ################## TRIP DISTRIBUTION SUB-MENU ########################\n\n self.add_menu_action('Trip Distribution', 'Trip Distribution', partial(run_distribution_models, self))\n\n # # # ########################################################################\n # # # ################### PATH COMPUTATION SUB-MENU #######################\n #\n self.add_menu_action('Paths and assignment', 'Shortest path', partial(run_shortest_path, self))\n self.add_menu_action('Paths and assignment', 'Impedance matrix', partial(run_dist_matrix, self))\n self.add_menu_action('Paths and assignment', 'Traffic Assignment', partial(run_traffic_assig, self))\n\n # # # ########################################################################\n # # # ####################### ROUTING SUB-MENU ###########################\n if has_ortools:\n self.add_menu_action('Routing', 'Travelling Salesman Problem', partial(run_tsp, self))\n else:\n _ = self.menuActions.pop('Routing')\n\n # # # ########################################################################\n # # # ####################### TRANSIT SUB-MENU ###########################\n # transitMenu = QMenu()\n # self.gtfs_import_action = QAction(self.trlt('Convert GTFS to SpatiaLite'), self.manager)\n # self.gtfs_import_action.triggered.connect(self.run_import_gtfs)\n # transitMenu.addAction(self.gtfs_import_action)\n #\n # transitButton = QToolButton()\n # transitButton.setText(self.trlt('Public Transport'))\n # transitButton.setPopupMode(2)\n # transitButton.setMenu(transitMenu)\n #\n # self.toolbar.addWidget(transitButton)\n #\n # # ########################################################################\n # # ################# GIS TOOLS SUB-MENU #########################\n self.add_menu_action('GIS', 'Desire Lines', partial(run_desire_lines, self))\n self.add_menu_action('GIS', 'Stacked Bandwidth', partial(run_stacked_bandwidths, self))\n self.add_menu_action('GIS', 'Scenario Comparison', partial(run_scenario_comparison, self))\n self.add_menu_action('GIS', 'Lowest common denominator', partial(run_lcd, self))\n self.add_menu_action('GIS', 'Simple tag', partial(run_tag, self))\n\n # # ########################################################################\n # # ################# Utils submenu #########################\n self.add_menu_action('Data', 'Import matrices', partial(load_matrices, self))\n self.add_menu_action('Utils', 'Display Matrices and datasets', partial(display_aequilibrae_formats, self))\n\n # # ########################################################################\n # # ################# LOOSE STUFF #########################\n\n self.add_menu_action('AequilibraE', 'About', self.run_about)\n self.add_menu_action('AequilibraE', 'Help', self.run_help)\n\n if no_binary:\n self.add_menu_action('AequilibraE', 'Download binaries', self.run_binary_download)\n\n if not extra_packages:\n self.add_menu_action('AequilibraE', 'Install extra packages', self.install_extra_packages)\n\n self.build_menu()\n # ########################################################################\n # ################# PROJECT MANAGER #########################\n\n self.showing = QCheckBox()\n self.showing.setText('Show project info')\n self.showing.setChecked(True)\n self.toolbar.addWidget(self.showing)\n\n self.projectManager = QTabWidget()\n self.toolbar.addWidget(self.projectManager)\n\n # # # ########################################################################\n self.tabContents = []\n self.toolbar.setIconSize(QtCore.QSize(16, 16))\n\n p1_vertical = QVBoxLayout()\n p1_vertical.setContentsMargins(0, 0, 0, 0)\n p1_vertical.addWidget(self.toolbar)\n self.manager.setLayout(p1_vertical)\n\n self.dock.setWidget(self.manager)\n self.dock.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)\n self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dock)\n QgsProject.instance().layerRemoved.connect(self.layerRemoved)\n\n def add_menu_action(self, main_menu: str, text: str, function, submenu=None):\n if main_menu == 'AequilibraE':\n action = QToolButton()\n action.setText(text)\n action.clicked.connect(function)\n else:\n action = QAction(text, self.manager)\n action.triggered.connect(function)\n if submenu is None:\n self.menuActions[main_menu].append(action)\n else:\n self.menuActions[main_menu][submenu].append(action)\n\n def build_menu(self):\n for menu, actions in self.menuActions.items():\n if menu == 'AequilibraE':\n for action in actions:\n self.toolbar.addWidget(action)\n continue\n itemMenu = QMenu()\n self.set_font(itemMenu)\n if isinstance(actions, dict):\n for submenu, mini_actions in actions.items():\n new_sub_menu = itemMenu.addMenu(submenu)\n self.set_font(new_sub_menu)\n for mini_action in mini_actions:\n new_sub_menu.addAction(mini_action)\n else:\n for action in actions:\n itemMenu.addAction(action)\n itemButton = QToolButton()\n itemButton.setText(menu)\n itemButton.setPopupMode(2)\n itemButton.setMenu(itemMenu)\n\n self.toolbar.addWidget(itemButton)\n\n def run_help(self):\n url = 'http://aequilibrae.com/qgis'\n if sys.platform == 'darwin': # in case of OS X\n subprocess.Popen(['open', url])\n else:\n webbrowser.open_new_tab(url)\n\n def unload(self):\n del self.dock\n\n def trlt(self, message):\n # In the near future, we will use this function to automatically translate the AequilibraE menu\n # To any language we can get people to translate it to\n # return QCoreApplication.translate('AequilibraE', message)\n return message\n\n def initGui(self):\n pass\n\n def removes_temporary_files(self):\n # pass\n # Removes all the temporary files from previous uses\n p = tempfile.gettempdir() + \"/aequilibrae_*\"\n for f in glob.glob(p):\n try:\n os.unlink(f)\n except Exception as e:\n self.logger.error(e.args)\n pass\n\n def run_close_project(self):\n if self.project is None:\n return\n self.project.close()\n self.projectManager.clear()\n self.project = None\n self.matrices.clear()\n self.layers.clear()\n\n def layerRemoved(self, layer):\n layers_to_re_create = [key for key, val in self.layers.items() if val[1] == layer]\n\n # Clears the pool of layers\n self.layers = {key: val for key, val in self.layers.items() if val[1] != layer}\n\n # Re-creates in memory only the layer that was destroyed\n for layer_name in layers_to_re_create:\n self.create_layer_by_name(layer_name)\n\n def load_geo_layer(self):\n sel = self.geo_layers_table.selectedItems()\n lyr = [s.text() for s in sel][0]\n self.load_layer_by_name(lyr)\n\n def load_layer_by_name(self, layer_name: str):\n if self.project is None:\n return\n if layer_name.lower() not in self.layers:\n print('Layer was not found, which is weird')\n self.create_layer_by_name(layer_name)\n layer = self.layers[layer_name.lower()][0]\n QgsProject.instance().addMapLayer(layer)\n qgis.utils.iface.mapCanvas().refresh()\n\n def create_layer_by_name(self, layer_name: str):\n layer = self.create_loose_layer(layer_name)\n self.layers[layer_name.lower()] = [layer, layer.id()]\n\n def create_loose_layer(self, layer_name: str) -> QgsVectorLayer:\n if self.project is None:\n return\n uri = QgsDataSourceUri()\n uri.setDatabase(self.project.path_to_file)\n uri.setDataSource('', layer_name, 'geometry')\n layer = QgsVectorLayer(uri.uri(), layer_name, 'spatialite')\n return layer\n\n def run_about(self):\n dlg2 = AboutDialog(self.iface)\n dlg2.show()\n dlg2.exec_()\n\n def run_load_database(self):\n dlg2 = LoadDatasetDialog(self.iface, single_use=False)\n dlg2.show()\n dlg2.exec_()\n\n def install_extra_packages(self):\n dlg2 = DownloadExtraPackages(self.iface)\n dlg2.show()\n dlg2.exec_()\n\n def run_binary_download(self):\n dlg2 = BinaryDownloaderDialog(self.iface)\n dlg2.show()\n dlg2.exec_()\n\n def run_import_gtfs(self):\n dlg2 = GtfsImportDialog(self.iface)\n dlg2.show()\n dlg2.exec_()\n\n def message_binary(self):\n qgis.utils.iface.messageBar().pushMessage(\n \"Binary Error: \", \"Please download it from the repository using the downloader from the menu\", level=3\n )\n\n def show_message_no_project(self):\n self.iface.messageBar().pushMessage(\"Error\", \"You need to load a project first\", level=3, duration=10)\n\n def message_project_already_open(self):\n self.iface.messageBar().pushMessage(\"You need to close the project currently open first\", level=2, duration=10)\n\n def set_font(self, obj):\n f = obj.font()\n f.setPointSize(11)\n obj.setFont(f)\n","sub_path":"AequilibraEMenu.py","file_name":"AequilibraEMenu.py","file_ext":"py","file_size_in_byte":14573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"438298398","text":"# coding=utf-8\nfrom multiprocessing import Process, Queue, Pool\nfrom multiprocessing.managers import BaseManager\nimport os, sys, time, random\nimport pymongo\nfrom pymongo import MongoClient\nimport argparse\nimport numpy as np\nfrom munch import Munch\nimport pandas as pd\nfrom tqdm import tqdm\n\"\"\"\n功能\n1,创建Queue\n2,连接mongodb\n3,提取优先级高,并且undo的特征组\n4, write()到Queue\n5, 暴露Queue到通信中\n4, update queue timingly\n\n\"\"\"\ndef delay(x, period=1):\n \"\"\"\n [Definition] 序列x中前period天的价格\n [Category] 技术指标\n delay() value of x d days ago\n \"\"\"\n res = np.zeros(x.shape) * np.nan\n res[period:] = x[:-period]\n return res\n\n\nclass QueueManager(BaseManager):\n pass\n\n\nclass TaskQueue():\n def __init__(self, queue_size, db_host, db_port, db_name, db_collection):\n self.config = Munch()\n self.config.db_host = db_host\n self.config.db_port = db_port\n self.config.db_name = db_name\n self.config.db_collection = db_collection\n\n self.config.queue_host = db_host\n self.config.queue_port = 5000\n self.config.queue_size = queue_size\n #self.config.queue_authkey = 'abc'\n self.config.launch_time = time.strftime('%Y-%m-%d %H:%M:%S')\n self.config.pid = os.getpid()\n print('[parallel][TaskQueue] %s' %self.config)\n #print pd.DataFrame(self.config, index=['Queue info']).T\n self.init_queue()\n\n\n def init_queue(self):\n self.queue = Queue(maxsize=self.config.queue_size)\n QueueManager.register('get_task_queue', callable=lambda: self.queue)\n manager = QueueManager(address=('', 5000), authkey=b'abc')\n manager.start()\n self.queue_task = manager.get_task_queue()\n\n\n def connect_mongodb(self):\n try:\n client = MongoClient(self.config.db_host, self.config.db_port)\n db = client[self.config.db_name]\n except Exception as e:\n print('[parallel][TaskQueue] error: ' + e)\n return db\n\n\n\n def write_queue(self, q, value):\n q.put(value)\n\n\n def update_queue(self, data_set):\n for d in data_set:\n self.write_queue(self.queue_task, d)\n\n\n def fetch_data(self, col, data_num):\n # 提取数据,优先级降序排列\n #return col.find({\"IS_backtest\": \"Undo\"}).sort(\"priority\", pymongo.DESCENDING)[0:data_num]\n print('[parallel][TaskQueue] fetch_data ...')\n return col.find({\"backtest\": \"Undo\"})[0:data_num]\n\n\n def timing_run(self, queue_threshold):\n from collections import deque\n q_num = deque([], maxlen=10)\n q_time = deque([], maxlen=10)\n while True:\n if self.queue.qsize() <= queue_threshold:\n q_num = deque([], maxlen=10)\n q_time = deque([], maxlen=10)\n db = self.connect_mongodb()\n for i in self.config.db_collection:\n dataset = self.fetch_data(db[i], data_num=self.config.queue_size)\n if dataset.count() == 0:\n print('[parallel][TaskQueue] collection:%s No data fetched from' %i)\n time.sleep(1)\n else:\n print('[parallel][TaskQueue] collection:%s update_queue...' %i)\n self.update_queue(dataset)\n time.sleep(1)\n break\n \n else:\n q_num.append(self.queue.qsize())\n q_time.append(time.time())\n bar_length = 30\n percent = 1. * self.queue.qsize() / self.config.queue_size\n hashes = '#' * int(percent * bar_length)\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\"\\r%s queue volume: %s/%s [%s] %d%% (%s/s)\" % (time.strftime('%H:%M'),\n self.queue.qsize(), self.config.queue_size,\n hashes + spaces, percent * 100,\n np.round((q_num[0]-q_num[-1])/(q_time[-1]-q_time[0]+1e-10), 2)))\n sys.stdout.flush()\n time.sleep(0.5)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n Q = TaskQueue(queue_size=5000, db_host='127.0.0.1', db_port=27017, db_name='AutoResearch', \n db_collection=['layer1', 'layer2', 'layer3', 'layer4', 'layer5', 'layer6', 'layer7'])\n Q.timing_run(queue_threshold=0)\n Q.manager.shutdown()\n print('Queue exit.')\n\n\n ","sub_path":"auto/.ipynb_checkpoints/task_queue-checkpoint.py","file_name":"task_queue-checkpoint.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599420437","text":"import pandas as pd\nimport os\nimport webbrowser\n\n\ndef main():\n # Read the dataset.\n data_table = pd.read_csv(\"movies.csv\", index_col=\"movie_id\")\n\n # Create webpage to view the data.\n html = data_table[0:100].to_html()\n\n # Save html to temporary file.\n with open(\"data.html\", \"w\") as f:\n f.write(html)\n\n # Open web page in web browser.\n full_filename = os.path.abspath(\"data.html\")\n webbrowser.open(f\"file://{full_filename}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"chapter_4/view_movie_list.py","file_name":"view_movie_list.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283504711","text":"from collections import namedtuple\nimport gym\nimport numpy as np\nimport random\nfrom sklearn.preprocessing import LabelBinarizer\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nenv = gym.make(\"FrozenLake-v0\")\n\nSTATE_SIZE = 16\nHIDDEN_SIZE = 128\nBATCH_SIZE = 16\n\n\nclass Net(nn.Module):\n def __init__(self, state_size, hidden_size, n_actions):\n super(Net, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(state_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, n_actions),\n nn.Softmax()\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Agent(object):\n def __init__(self, state_size, hidden_size, n_actions):\n\n self.state_size = state_size\n self.n_actions = n_actions\n self.net = Net(state_size, hidden_size, n_actions)\n self.encoder = LabelBinarizer()\n self.encoder.fit(range(state_size))\n\n self.epsilon = 1.\n self.epsilon_decay = 0.9\n self.epsilon_min = 0.005\n\n self.objective = nn.CrossEntropyLoss()\n self.optimizer = optim.Adam(self.net.parameters(), lr=0.01)\n\n def act(self, state):\n if random.random() <= self.epsilon:\n return random.randint(0, self.n_actions - 1)\n else:\n state_ = torch.FloatTensor(self.encoder.transform([state])[0, :])\n action_prob = self.net(state_)\n return np.argmax(action_prob.data.numpy())\n\n def train_agent(self, batch):\n average_duration = np.mean(list(map(lambda episode: episode.duration, batch)))\n\n train_states, train_actions = [], []\n for iter_no, episode in enumerate(batch):\n train_states.extend(list(map(lambda step: self.encoder.transform([step.state])[0, :], episode.steps)))\n train_actions.extend(list(map(lambda step: step.action, episode.steps)))\n\n train_states = torch.FloatTensor(train_states)\n train_actions = torch.LongTensor(train_actions)\n action_scores = self.net(train_states)\n\n loss = self.objective(action_scores, train_actions)\n loss.backward()\n self.optimizer.step()\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n return (average_duration, loss.item())\n\n\n# define function which creates batch of episodes, all finished with success\ndef create_batch(env, agent, batch_size):\n batch = []\n while True:\n steps = []\n state = env.reset()\n while True:\n action = agent.act(state)\n next_state, reward, done, info = env.step(action)\n steps.append(EpisodeStep(state=state, action=action))\n if done:\n if reward > 0:\n batch.append(Episode(steps=steps, duration=len(steps)))\n break\n state = next_state\n if len(batch) == batch_size:\n return batch\n\n\nEpisodeStep = namedtuple('EpisodeStep', field_names=['state', 'action'])\nEpisode = namedtuple('Episode', field_names=['steps', 'duration'])\n\nSTATE_SIZE = 16\nHIDDEN_SIZE = 128\nBATCH_SIZE = 16\n\nif __name__ == '__main__':\n env = gym.make(\"FrozenLake-v0\")\n\n n_actions = env.action_space.n\n agent = Agent(STATE_SIZE, HIDDEN_SIZE, n_actions)\n writer = SummaryWriter(comment=\"-frozen-lake\")\n\n for epoch in range(50):\n batch = create_batch(env, agent, BATCH_SIZE)\n average_duration, loss = agent.train_agent(batch)\n\n writer.add_scalar('loss', loss, epoch)\n writer.add_scalar('average-duration', average_duration, epoch)\n writer.add_scalar('epsilon', agent.epsilon, epoch)\n\n print(\"Epoch: %d, loss: %.3f, average duration: %.3f, epsilon: %.3f\" % (\n epoch, loss, average_duration, agent.epsilon))\n\n writer.close()\n\n # play one episode after model training\n reward = 0.0\n while reward < 1:\n state = env.reset()\n while True:\n action = agent.act(state)\n next_state, reward, done, info = env.step(action)\n env.render()\n if done:\n break\n state = next_state\n env.close()\n","sub_path":"CrossEntropy/cross-entropy-frozen-lake.py","file_name":"cross-entropy-frozen-lake.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"68854188","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# @package \n# @brief \n#\n# @version $Revision: $\n# @author Sergey Green\n# @note Barracuda Networks\n# @note $Date: $\n# @note $URL: $\n#\n# @purpose\n# @usage\n#\nimport sys\nimport os\nimport datetime\nimport glob\nimport shutil\nimport re\nfrom optparse import OptionParser\n\nimport logging\n\nformatter = logging.Formatter(\"%(name)-11s : %(message)s\")\nconsole = logging.StreamHandler()\nconsole.setFormatter(formatter)\nlogger = logging.getLogger(\"\")\nlogger.name = \"moveDirs\"\nlogger.addHandler(console)\n\nlogger.setLevel(logging.INFO)\n\nclass MoveDirs:\n def __init__(self):\n self.now = datetime.datetime.now()\n self.cwd = os.getcwd()\n \n def collectArgs(self):\n parser = OptionParser(usage=\"\"\" %prog [options] --src --dst --age \n \n Move directories | files older than a given age [days]\n from src to dst directory\n \n Note: srs and dst paths must be absolute\"\"\")\n \n parser.add_option(\"-s\", \"--src\",\n dest=\"src\",\n default=None,\n help=\"Source, default=None\",)\n parser.add_option(\"-d\", \"--dst\",\n dest=\"dst\",\n default=None,\n help=\"Destination, default=None\",)\n parser.add_option(\"-a\", \"--age\",\n dest=\"age\",\n default=None,\n help=\"Age [days], default=None\",)\n parser.add_option(\"-i\", \"--interactive\",\n action=\"store_true\",\n dest=\"interactive\",\n default=False,\n help=\"Interactive flag, default=False \")\n (self.opts, args) = parser.parse_args()\n \n def verifyArgs(self): \n for opt, value in self.opts.__dict__.items():\n if opt == \"interactive\":\n pass\n elif value:\n pass\n #logger.info(\"%s=%s \" % (opt,value))\n else:\n logger.info(\"%s %s\" % (opt,\"missing ..\"))\n sys.exit()\n \n def move2Link(self):\n filelist = self.listContent()\n if filelist:\n for name in filelist:\n if os.path.islink(name):\n continue\n if self.ifOlder(name):\n self.moveFile(name)\n \n def listContent(self):\n if os.path.isdir(self.opts.src):\n return glob.glob(\"%s%s*\" % (self.opts.src, os.path.sep))\n elif os.path.isfile(self.opts.src):\n return self.opts.src.split()\n return None\n \n def moveFile(self,name):\n newfile = \"%s%s%s\" % (self.opts.dst,os.path.sep, os.path.basename(name))\n newfile = os.path.abspath(newfile) \n linkname = os.path.abspath(name)\n \n if self.opts.interactive:\n reply = raw_input(\"%s -> %s ? Y|n :\" % (linkname,newfile))\n if reply.lower() != \"y\":\n return\n \n shutil.move(name, self.opts.dst)\n os.symlink(newfile,linkname)\n logger.info(\"%s -> %s\" % (newfile,linkname))\n \n def ifOlder(self,p):\n mod = datetime.datetime.fromtimestamp(os.path.getmtime(p))\n if mod < self.getTimeFloor(self.opts.age): \n return True\n return False\n \n def getTimeFloor(self,days):\n return (self.now - (datetime.timedelta(days=int(days))))\n \ndef main():\n md = MoveDirs()\n md.collectArgs()\n md.verifyArgs()\n md.move2Link()\n \nif __name__ == '__main__':\n main()\n ","sub_path":"repo8/move_iso_files/move2Link.py","file_name":"move2Link.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376389826","text":"# coding:utf-8\nimport requests\nimport unittest\nimport json\nclass test_wuye1(unittest.TestCase):\n def setUp(self):\n print(u\"开始\")\n def test_interface1(self):\n print(u\"测试接口\")\n lp1={\"username\":\"13000000000\"}\n rd1=requests.get(\"http://172.16.4.120/wuye/public/index.php/index/index//oauthAccount?\",params=lp1)\n data=json.loads(rd1.text)\n self.assertEqual(\"success\",data[\"message\"])\n def tearDown(self):\n print(u\"结束\")\nif __name__ == '__main__':\n unittest.main","sub_path":"python_work_mei/interface/script/interface1.py","file_name":"interface1.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335368695","text":"from .IngestorInterface import IngestorInterface\nfrom typing import List\nfrom .QuoteModel import QuoteModel\n\n\nclass TextIngestor(IngestorInterface):\n allowed_extensions = [\"txt\"]\n\n @classmethod\n def parse(cls, path: str) -> List[QuoteModel]:\n \"\"\"Return quotes from a TXT file\"\"\"\n if not cls.can_ingest(path):\n raise Exception(\"Cannot Ingest Exception\")\n quotes = []\n with open(path, \"r\") as infile:\n for line in infile.readlines():\n parsed = line.split(\" - \")\n try:\n new_quote = QuoteModel(parsed[0], parsed[1])\n quotes.append(new_quote)\n except IndexError:\n break\n return quotes\n","sub_path":"src/QuoteEngine/TextIngestor.py","file_name":"TextIngestor.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504274294","text":"# _*_ coding:utf-8 _*_\nfrom apps.roster.models import Roster\nfrom apps.roster.serializers import RosterShortSerializer, RosterBaseSerializer, RosterSerializer, \\\n RosterDetailSerializer\nfrom rest_framework import generics\n\n\nclass RosterBaseListApi(generics.ListCreateAPIView):\n \"\"\"根据level获取\"\"\"\n\n model = Roster\n\n def get_serializer_class(self):\n level = self.kwargs.get('level')\n print(level)\n if int(level) == 0:\n return RosterShortSerializer\n elif int(level) == 1:\n return RosterBaseSerializer\n else:\n return RosterDetailSerializer\n\n def get_queryset(self):\n # 从url中获取参数\n level = self.kwargs.get('level')\n return Roster.objects.filter(level=level).order_by('id')\n\n\nclass RosterGroupListApi(generics.ListAPIView):\n \"\"\"根据parent获取组内元素\"\"\"\n\n model = Roster\n serializer_class = RosterSerializer\n\n def get_queryset(self):\n parent = self.kwargs.get('parent')\n return Roster.objects.filter(parent=parent).order_by('id')\n\n\n\nclass RosterRetrieveUpdateDestoryApi(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"获取,修改,删除\"\"\"\n\n model = Roster\n serializer_class = RosterShortSerializer\n\n def get_queryset(self):\n return Roster.objects.all()","sub_path":"apps/roster/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"402627329","text":"\n\"\"\"\n UFRPE - BSI2019.1 - Matemática Discreta - Trabalho 2 - 2ª VA\n Dupla:\n Edson Kropniczki + Cristina Oliveira\n Descrição:\n classes para a implementação do algoritmo de Dijkstra,\n para cálculo da menor distância entre dois vértices de um grafo simples não-direcional\n\n Algoritmo:\n fonte: Wikipedia\n URL : https://en.wikipedia.org/wiki/Dijkstra's_algorithm\n\n Pseudo-código para determinar a menor distância entre source e target:\n\n function Dijkstra(Graph, source, target):\n\n create vertex set Q\n\n for each vertex v in Graph:\n dist[v] ← INFINITY\n prev[v] ← UNDEFINED\n add v to Q\n dist[source] ← 0\n\n while Q is not empty:\n\n u ← vertex in Q with min dist[u]\n\n remove u from Q\n if u = target:\n break\n\n for each neighbor v of u: // only v that are still in Q\n alt ← dist[u] + length(u, v)\n if alt < dist[v]:\n dist[v] ← alt\n prev[v] ← u\n\n return dist[], prev[]\n\n // pseudo-código p/ refazer o caminho reverso\n\n function reverse_path(prev, source, target):\n S ← empty sequence\n u ← target\n if prev[u] is defined or u = source: // Do something only if the vertex is reachable\n while u is defined: // Construct the shortest path with a stack S\n insert u at the beginning of S // Push the vertex onto the stack\n u ← prev[u] // Traverse from target to source\n\"\"\"\n\nimport sys\n\n\nclass Graph(list):\n\n def __init__(self):\n super(Graph, self).__init__()\n\n ##################################################################\n #\n # Implementação em Python do algoritmo de Dijkstra\n # baseada no pseudo código do Wikipedia acima\n #\n ##################################################################\n\n def dijkstra(self, source, target):\n\n # create vertex set Q\n q = []\n dist = {}\n prev = {}\n infinity = sys.maxsize # tomamos o maior inteiro disponível em Python como infinito\n\n for vertex in self:\n if vertex == source:\n dist[vertex] = 0\n else:\n dist[vertex] = infinity\n prev[vertex] = None\n q.append(vertex)\n\n while len(q) > 0:\n\n # u ← vertex in Q with min dist[u]\n u = q[0]\n ix_u = 0\n for i, v in enumerate(q):\n if dist[v] < dist[u]:\n u = v\n ix_u = i\n\n # remove u from Q\n del q[ix_u]\n\n if u == target:\n break\n\n # for each neighbor v of u:\n for edge in u.edges:\n v = edge.n2\n alt = dist[u] + edge.dist\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n return dist, prev\n\n def reverse_path(self, prev, source, target):\n s = \"\"\n distance = 0\n u = target\n if (prev[u] is not None) or (u == source):\n while u is not None:\n s = str(u.node_id) + \"->\" + s\n if prev[u] is not None:\n distance += u.get_edge(prev[u]).dist\n u = prev[u]\n s = s[:-2]\n return s, distance\n\n #################################################################################\n\n\n# Classes Node/Edge para a construção de grafos na GUI e para uso no algoritmo de Dijkstra\nclass Node:\n\n def __init__(self, node_id):\n self.node_id = node_id # save node id (label)\n self.edges = [] # create blank list of edges\n\n # check if edge exists in this node\n def _has_edge(self, edge):\n return edge in self.edges\n\n # add edge to node\n def add_edge(self, other, dist):\n edge = self.Edge(self, other, dist)\n if not self._has_edge(edge):\n self.edges.append(edge)\n other.add_edge(self, dist)\n\n # return Edge obj between this Node and @other, if any\n def get_edge(self, other):\n for edge in self.edges:\n if edge.n2 == other:\n return edge\n return None\n\n # minimum overload to make Node objects hashable, so that we can use them as dictionary keys\n # in Wikipedia Dijkstra algorithm\n def __hash__(self):\n return hash(self.node_id)\n\n def __eq__(self, other):\n return (self.node_id == other.node_id) and (self.edges == other.edges)\n\n def __ne__(self, other):\n return not(self == other)\n\n # Nested class Edge to construct tagged edge objects between Node instances\n class Edge:\n\n def __init__(self, n1, n2, dist):\n self.n1 = n1\n self.n2 = n2\n self.dist = dist\n\n def __eq__(self, other):\n return (self.n1 == other.n1) and (self.n2 == other.n2) and (self.dist == other.dist)\n\n def __lt__(self, other):\n return self.dist < other.dist\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162520372","text":"#An algorithm is said to have a quadratic time complexity when it needs\n# to perform a linear time operation for each value in the input data\n\n#Bubble sort\n# Best O(n^2); Average O(n^2); Worst O(n^2)\n\ndef bubbleSort(List):\n for i in range(len(List)):\n for j in range(len(List) - 1, i, -1):\n if List[j] < List[j - 1]:\n List[j], List[j - 1] = List[j - 1], List[j]\n return List\n\nif __name__ == '__main__':\n List = [3, 4, 2, 6, 5, 7, 1, 9]\n print('Sorted List:',bubbleSort(List))","sub_path":"quadraticTime.py","file_name":"quadraticTime.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"295703063","text":"# -*- coding: utf-8 -*-\nimport string\n\n\n# the_string = \"The word the should be seen two times. It should be.\"\n\ndef get_word_list(the_string):\n # why upper? this gets rid of the fact that capitals and lowercases are different words. we don't want that\n # get rid of it, though, when we switch to hebrew. there are no captials in hebrew\n the_string = string.join(string.split(the_string, \".\"), \"\") # getting rid of the periods\n the_string = string.split(the_string.upper(), \" \")\n the_word_list = {}\n for i in range(len(the_string)):\n cur_word = the_string[i]\n\n #solving the issue of getting the last word without any out of bounds issues\n if i == len(the_string) - 1: # if we're on the last on. i.e. there is no next_word\n next_word = None # next_word is blank\n else: # or else\n next_word = the_string[i + 1] # next_word = the next word\n # print(\"cur_word = %s : next_word = %s\") %(cur_word, next_word)\n if cur_word in the_word_list:\n the_word_list[cur_word][0] += 1 # adding one to the amount of times it shows up\n if next_word in the_word_list[cur_word][1]: # if the next word is already in the the \"next word dictionary\"\n the_word_list[cur_word][1][next_word] += 1 # increment the amount of times it shows up\n else: # or else\n the_word_list[cur_word][1][next_word] = 1 # we create that new dictionary entry, and add 1 to that\n else:\n the_word_list[cur_word] = [1, {next_word : 1}] # creating a brand new entry\n\n return(the_word_list)\n","sub_path":"wordList.py","file_name":"wordList.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239822988","text":"import sys;\nimport glob;\nimport os;\nimport re;\n\noutputfile = sys.argv[2];\nfile2 = open(outputfile, \"w\");\nfileNames = glob.glob(sys.argv[1]+'/*.txt');\n#fileNames.sort();\n\n#documents = 0;\n#term_freq = { };\n#weight_freq = { };\n#contain_dict = { };\n#vocabulary = set();\n#document_freq = { };\n\nfeature_dict = { };\nfeature_counter = 1 ;\n#print (\"filenames are\", fileNames,\"\\n\");\nfor name in fileNames:\n#\tdocuments = documents + 1;\n\n\tpath = re.search('/(.+?).[0-9]+.txt', name).group(1);\n\tindex = path.rfind('/');\n\tindex = index+1;\n\n\tfile1 = open(name, 'r', errors='ignore')\n\tlines = file1.readlines()\n\tstr_temp = ' '.join([line.strip() for line in lines]);\n\tstr_temp = re.sub(\"[^A-Za-z0-9_\\s\\'\\@\\$\\-]+\",'',str_temp);\t\n#\tstr_temp = str_temp.lower();\n\tfile1.close();\n#\tlist_array = str_temp.split(\" \");\n\tlist_array = str_temp.split();\n\n\tterm_freq = { };\n#\tfeature_dict= { };\n\tfeature_order = { };\n#\tprint(\"name is\", name);\n#\tprint (\"document is:\", list_array);\n\tfor item in list_array:\t\t\n\t\tif(item not in feature_dict):\t\t\n\t\t\tfeature_dict[item] = feature_counter;\n#\t\t\tprint(\"feature counter is :\", feature_counter);\n\t\t\tfeature_counter = feature_counter + 1;\t\t\t\n\n\t\tif(item not in feature_order):\n\t\t\tfeature_order[item] = feature_dict[item];\n\n\t\tif (item not in term_freq):\n\t\t\tterm_freq[item] = 1;\n\t\telse:\n\t\t\tterm_freq[item] = 1;\n#\t\t\tterm_freq[item] = term_freq[item] + 1;\n\n#\tprint(\"term frequency is:\", term_freq);\n\n\tstr_str = \"\";\n\tgenexp = ((k, feature_order[k]) for k in sorted(feature_order, key=feature_order.get, reverse=False))\n\tfor k,v in genexp:\n#\t\tprint (\"value is: \", v, \"term frequency s:\", term_freq[k]);\n#\t\tprint (\"str_str is:\", str_str);\n#\t\tstr_dummy = str(v);\n\t\tstr_str =str_str + str(v) + \":\" + str(term_freq[k]) + \" \";\n\t\n\tterm_freq = { };\n\tfeature_order = { };\n#\tprint (str_str);\n \n#\tfor item in list_array:\t\t\n\tif (path[index:] == \"SPAM\" or path[index:] == \"POS\"):\n\t\tfile2.write(\"1\" + \" \" + str_str+\"\\n\");\n\telif(path[index:]==\"HAM\" or path[index:] == \"NEG\"):\n\t\tfile2.write(\"-1\" + \" \" + str_str + \"\\n\");\n\telse:\n\t\tfile2.write(\"0\" + \" \" + str_str + \"\\n\");\n\tstr_str = \" \";\nfile2.close();\n","sub_path":"svmpreprocess.py","file_name":"svmpreprocess.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"264600641","text":"from datetime import datetime\nfrom marketCenter.models.base import Base, uuid\nfrom marketCenter.models.nba_stock import NBAStock as Stock\nfrom marketCenter.errors import IllegalOperation\nfrom marketCenter.db import connection\n\nclass TradeFactory(Base):\n def defaults(self):\n return {\n \"uid\": uuid(),\n \"created_at\": datetime.now()\n }\n\nclass Trade(TradeFactory):\n _collection = \"trades\"\n _keys = (\"uid\",)\n _attributes = (\"stock_id\", \"buyer_id\", \"seller_id\", \"price\", \"shares\", \"created_at\")\n\n def __init__(self, **kwargs):\n attributes = super().defaults()\n attributes.update(kwargs)\n super().__init__(**attributes)\n\n @classmethod\n async def create(cls, **attributes):\n stock = await Stock.find_one({\"uid\": attributes.get(\"stock_id\")})\n try:\n await stock.transfer(\n sender=attributes.get(\"seller_id\"), \n receiver=attributes.get(\"buyer_id\"), \n shares=int(attributes.get(\"shares\"))\n )\n except AttributeError:\n raise IllegalOperation(\"stock {} does not exist\".format(attributes.get(\"stock_id\")))\n return await super().create(**attributes)","sub_path":"marketCenter/models/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475216240","text":"import tkinter as tk\n\nclass APP:\n def __init__(self,master):\n frame = tk.Frame(master)\n frame.pack(side=tk.LEFT,padx=20,pady=20)\n\n self.hi_there = tk.Button(frame,command=self.sayhi,bg='black',text='打招呼',fg='blue')\n self.hi_there.pack()\n def sayhi(self):\n print('Hello World')\n \nroot=tk.Tk()\napp=APP(root)\n\nroot.mainloop()\n","sub_path":"python_learn/第一个tkinter.py","file_name":"第一个tkinter.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445928199","text":"import spacy\n\nnlp = spacy.load('en_core_web_md') \n\nprint(\"Enter 2 Words (separated by space): \")\nwords = input()\n\ntokens = nlp(words)\n\nprint(\"Token1\\t\\tToken2\\t\\tSimilarity\")\nprint(\"-\"*60)\nfor token1 in tokens:\n\tfor token2 in tokens:\n\t\tprint(str(token1.text) + \"\\t\\t\" + str(token2.text) + \"\\t\\t\" + str(token1.similarity(token2)))","sub_path":"Assignment6/q3_c.py","file_name":"q3_c.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"525388295","text":"__author__ = 'anderson'\n\nfrom random import random\nexperimentos = 100000\ncont = 0\npPecaA = 1/1000\npPecaB = 1/500\npPecaC = 1/200\n\nfor x in range(experimentos):#Lotes\n for i in range(10):#embalagens\n if random() <= pPecaA or random() <= pPecaB or random() <= pPecaC:\n cont += 1\n break\n\nprint(cont/experimentos)\n","sub_path":"ProjetosimulacaoDiscreta/lista2/questao06b.py","file_name":"questao06b.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225643622","text":"class Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n if len(nums2) > len(nums1):\n self.intersect(nums2, nums1)\n d = {}\n res = []\n for no in nums1:\n d[no] = d.get(no, 0) + 1\n for no in nums2:\n if no in d and d[no]:\n res.append(no)\n d[no] -= 1\n return res\n","sub_path":"350/350.intersection-of-two-arrays-ii.303730139.Accepted.leetcode.python3.py","file_name":"350.intersection-of-two-arrays-ii.303730139.Accepted.leetcode.python3.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65750452","text":"from .common import echo\nfrom blessed import Terminal\n\n\n# @singleton\nclass Screen:\n screen = {(0, 0): \"\",}\n region = []\n disabled_region = []\n change_region_cnt: int = 0\n regions_order = []\n base_corner = {}\n\n def __init__(self):\n self.term = Terminal(force_styling=True)\n # self.log = open('log3.log', 'a')\n\n def reset(self):\n self.screen = {(0, 0): \"\",}\n\n def __render(self):\n screen2render = {}\n for ii in self.regions_order:\n region = self.region[ii]\n if not self.disabled_region[ii]:\n for c, s in region.items():\n # self.log.write(f'reg_id = {ii}, c = {c}, s = {s}\\n')\n if c in self.screen.keys():\n if c in screen2render.keys() and screen2render[c] != s:\n screen2render[c] = s\n else:\n if s != self.screen[c]:\n screen2render[c] = s\n else:\n screen2render[c] = s\n self.screen[c] = s\n\n screen2render = dict(sorted(screen2render.items(), key=lambda x:x[0][1]))\n for c,s in screen2render.items():\n echo(self.term.move(c[1], c[0]) + s)\n\n def __shift2base(self, c=None, region_id=-1):\n if c is not None and len(c) == 2:\n if region_id != -1 and region_id in self.base_corner.keys():\n base = self.base_corner[region_id]\n return c[0] + base[0], c[1] + base[1]\n return c\n\n def set_base_corner(self, region_id: int = -1, x=-1, y=-1):\n if region_id > 0 and region_id < len(self.region):\n self.base_corner[region_id] = (x, y)\n\n def bind(self) -> int:\n self.region.append({})\n self.disabled_region.append(False)\n return len(self.region) - 1\n\n def begin(self):\n self.change_region_cnt += 1\n\n def end(self):\n self.change_region_cnt -= 1\n if self.change_region_cnt == 0:\n self.__render()\n self.regions_order = []\n echo(self.term.normal)\n if self.change_region_cnt < 0:\n print(\"хуйня\")\n raise ValueError()\n\n def disable_region(self, region_id=-1):\n self.disabled_region[region_id] = True\n\n def echo(self, region_id:int=-1, x=0, y=0, string=\"\"):\n if region_id < 0 or region_id >= len(self.region):\n raise ValueError()\n\n if region_id not in self.regions_order:\n self.regions_order.append(region_id)\n\n c = self.__shift2base((x, y), region_id)\n if len(string) > 1:\n ii = 0\n style = u''\n applied_style: bool = False\n\n while True:\n if ii == len(string): break\n if string[ii] == '\\x1b' or string[ii] == '\\x0f':\n if applied_style:\n style = u''\n applied_style = False\n\n e = string[ii:].find('m')\n if e != -1:\n style += string[ii:ii + e + 1]\n ii += e + 1\n else:\n style += string[ii]\n ii += 1\n continue\n\n applied_style = True\n s = style + string[ii]\n c = self.__shift2base((x, y), region_id)\n self.region[region_id][c] = s\n x += 1\n ii += 1\n if applied_style == False and style != u'':\n try:\n self.region[region_id][c] += style\n except KeyError:\n self.region[region_id][c] = style\n else:\n self.region[region_id][c] = string\n\n self.disabled_region[region_id] = False","sub_path":"simpleinterface/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249566214","text":"\n# coding: utf-8\n\n# # Forecasting Model of Monthly Settled Cases\n\n# ## Overview\n\n# In this tutorial, we will work through a time series forecasting project from end-to-end, from reviewing the dataset and defining the problem to training a final model and making predictions. This project is not exhaustive, but shows how you can get good results quickly by working through a time series forecasting problem systematically.\n# The steps of this project that we will through are as follows. \n# 1. Problem Description\n# 2. Experimental Setup\n# 3. Persistence\n# 4. Data Analysis \n# 5. ARIMA Models\n# 6. Time series evaluation\n\n# ## 1. Problem Description\n\n# The problem is to predict the Number of Settlement by month of SCA over years.\n\n# In[1]:\n\n# separate out a validation dataset\nfrom pandas import Series\nimport pandas as pd\nimport matplotlib.pyplot as plt\nget_ipython().magic(u'matplotlib inline')\nimport numpy as np\n# Import statsmodel\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.tsa.stattools import adfuller\n# evaluate persistence model on time series\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nget_ipython().magic(u'matplotlib inline')\nimport pylab\nfrom pandas import DataFrame\nfrom pandas import TimeGrouper\n# create and summarize stationary version of time series\nfrom statsmodels.tsa.stattools import adfuller\nfrom pandas import Series\nfrom statsmodels.graphics.tsaplots import plot_acf \nfrom statsmodels.graphics.tsaplots import plot_pacf\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.ar_model import AR \nfrom sklearn.metrics import mean_squared_error\nfrom matplotlib import pyplot\nfrom sklearn.metrics import mean_squared_error\nimport statsmodels.tsa.api as smt\nfrom sklearn.metrics import mean_squared_error\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom math import sqrt\nimport warnings\nfrom pandas import Series\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport numpy\nfrom arch import arch_model\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib import pyplot\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib import pyplot\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib import pyplot\nfrom statsmodels.graphics.tsaplots import plot_acf\nfrom statsmodels.graphics.tsaplots import plot_pacf\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom scipy.stats import boxcox\nimport numpy\nfrom statsmodels.tsa.arima_model import ARIMAResults\nfrom pandas import Series\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.arima_model import ARIMAResults\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport numpy\nfrom sklearn.ensemble import RandomForestRegressor\nfrom pandas import DataFrame, concat\n\nseries = Series.from_csv('Monthly Count of Settlement.csv',header=0)\n\n\n# ## 2. Experimental Setup\n\n# We will take the data set and split it into training and test sets for the purposes of the experiment. \n\n# ## 4. Data Analysis\n\n# Now we have a baseline prediction method and performance; now we can start digging into our data.\n# \n# We can use summary statistics and plots of the data to quickly learn more about the structure of the prediction problem. In this section, we will look at the data from five perspectives:\n# \n# 1. Summary Statistics. \n# 2. Line Plot.\n# 4. Density Plots.\n# 5. Box and Whisker Plot.\n\n# ### 4.1 Summary Statistics\n# \n# Summary statistics provide a quick look at the limits of observed values. It can help to get a quick idea of what we are working with. The example below calculates and prints summary statistics for the time series.\n\n# In[2]:\n\n# summary statistics of time series\nprint(series.describe())\n\n\n# ### 4.2 Line Plot\n\n# The first step before getting bogged down in data analysis and modeling is to establish a baseline of performance. This will provide both a template for evaluating models and a performance measure by which all more elaborate predictive models can be compared. The baseline prediction for time series forecasting is called the naive forecast (somethimes referred to as persistence).\n# \n# Here we will use walk-forward validation discussed in class. \n\n# A line plot of a time series can provide a lot of insight into the problem.\n\n# Running the naive forecast prints the prediction and observation for each iteration of the test dataset. The example ends by printing the RMSE for the model. In this case, we can see that the forecast achieved an RMSE of 0.897. \n\n# In[3]:\n\n# plot times series data for overview\npyplot.figure(1)\nseries.plot()\npylab.ylabel('Monthly Settled Cases')\npylab.xlabel('Date')\npyplot.show()\n\n\n# Some observations from the plot include:\n# - There may be an increasing trend of cash settlement amount over time.\n# - There might be systematic seasonality to the cash settlement amount for each year.\n# - The seasonal signal appears to be growing over time, suggesting a multiplicative relationship (increasing change).\n# - There do not appear to be any obvious outliers.\n# - The seasonality might suggests that the series is almost certainly non-stationary.\n# \n# There may be benefit in explicitly modeling the seasonal component and removing it. You may also explore using differencing with one or two levels in order to make the series stationary. The increasing trend or growth in the seasonal component may suggest the use of a log or other power transform.\n# \n\n# ### 4.4 Density Plot and Transformation \n\n# Reviewing plots of the density of observations can provide further insight into the structure of the data. The example below creates a histogram and density plot of the observations without any temporal structure.\n\n# In[4]:\n\npyplot.figure(3)\npyplot.subplot(211)\nseries.hist()\npyplot.subplot(212) \nseries.plot(kind='kde')\npyplot.show(3)\n\n\n# In[5]:\n\nseries=np.sqrt(series)\npyplot.figure(3)\npyplot.subplot(211)\nseries.hist()\npyplot.subplot(212) \nseries.plot(kind='kde')\npyplot.show(3)\n\n\n# It is clear a unimodel distribution with one clear peak. From the pdf graph and the histogram, we can say that the it is almost symmetric with a small long tail(the distribution have a few number of occurrences far from the central part) on the right.\n# For positive skew where tail is on the positive end, we can apply square root transformation,log transformation and inverse/reciprocal transformation. Therefore, if the log transformation is not sufficient, we can also use the next level of transformation. On the other hand, Box Cox runs all transformations automatically so that you can choose the best one.\n\n# ### 4.5 Box and Whisker Plot \n# \n# We can group the monthly data by year and get an idea of the spread of observations for each year and how this may be changing. We do expect to see some trend (increasing mean or median), but it may be interesting to see how the rest of the distribution may be changing. \n# \n# The example code below groups the observations by year and creates one box and whisker plot for each year of observations. The last year (2017) only contains 3 months and may not be a useful comparison with the 12 months of observations for other years. Therefore, only data between 1988 and 2016 was plotted.\n\n# In[6]:\n\ngroups = series['1989':'2005'].groupby(TimeGrouper('A')) \nyears = DataFrame()\nfor name, group in groups:\n years[name.year] = group.values\nyears.boxplot(return_type ='dict',figsize=(12,6))\npyplot.show(4)\n\n\n# In[7]:\n\nsplit_point = int(round(len(series)*0.8))\ntrain, holdontest = series[0:split_point], series[split_point:]\nprint('Train %d, holdonTest %d' % (len(train), len(holdontest)))\ntrain.to_csv('trainSet.csv')\nholdontest.to_csv('holdontestSet.csv')\n\n\n# In[8]:\n\nseries=Series.from_csv('trainSet.csv',header=0)\n\n\n# The plot above has 9 box and whisker plots side-by-side, one for each of the 9 years of selected data. \n# \n# Some observations from reviewing the plots include:\n# \n# - The median values for each year (red line) may show an increasing trend.\n# - There are outliers half of the time\n\n# ## 5. Time Series Models\n\n# ### 5.1 Checking stationarity \n\n# Analysis of the time series data assumes that we are working with a stationary time series. As we have seen in previous sections time series is almost certainly non-stationary. \n# \n# We can make it stationary by first differencing the series and using a statistical test to confirm that the result is stationary.\n# \n# For stationarity, the code below creates a deseasonalized version of the series and saves it to file `stationary.csv`.\n\n# The plot does not show any obvious seasonality or trend, suggesting the seasonally differenced dataset is a good starting point for modeling. \n# \n# We can use the the augmented Dickey-Fuller statistical significance test to check that the output series is stationary. \n# \n\n# In[9]:\n\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n# check if stationary\nresult = adfuller(train)\nprint('ADF Statistic: %f' % result[0]) \nprint('p-value: %f' % result[1])\nprint('Critical Values:')\nfor key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value)) \n\n\n# The results show that the test statistic value -4.099 is smaller than the critical value at 1% of -3.47. This suggests that we can reject the null hypothesis with a significance level of less than 1% (i.e. a low probability that the result is a statistical fluke). \n# \n# Rejecting the null hypothesis means that the time series is stationary or does not have time-dependent structure.\n# \n\n# In[10]:\n\ndef mean_absolute_percentage_error(y_true, y_pred): \n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) \n\n\n# ## 3. Naive forecast\n\n# In[11]:\n\nimport matplotlib.axes as ax\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\nfor i in range(len(test)):\n\t# predict\n\tyhat = history[-1]\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n#\tprint('>Predicted=%.3f, Expected=%3.f' % (yhat, obs))\n \n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(test,label='test')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# In[12]:\n\nX = series.values\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n# train autoregression\nmodel = AR(train)\nmodel_fit = model.fit()\nwindow = model_fit.k_ar\ncoef = model_fit.params\n# walk forward over time steps in test\nhistory = train[len(train)-window:]\nhistory = [history[i] for i in range(len(history))]\npredictions = list()\nfor t in range(len(test)):\n\tlength = len(history)\n\tlag = [history[i] for i in range(length-window,length)]\n\tyhat = coef[0]\n\tfor d in range(window):\n\t\tyhat += coef[d+1] * lag[window-d-1]\n\tobs = test[t]\n\tpredictions.append(yhat)\n\thistory.append(obs)\n#\tprint('predicted=%f, expected=%f' % (yhat, obs))\nprint('Lag: %s' % model_fit.k_ar)\n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(test,label='test')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# In[13]:\n\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.stattools import adfuller, arma_order_select_ic\nX = series.values\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\nres = sm.tsa.arma_order_select_ic(train, max_ar=5, max_ma=5, ic=['aic', 'bic'], trend='nc')\nres.aic_min_order\nres.bic_min_order\n\n\n# In[14]:\n\nmodel = ARMA(train, order=(1, 1)) #5,7 is decided using arma_order_select_ic method\nresults = model.fit(trend='nc', method='css-mle')\nprint(results.summary2())\n\n\n# ### 5.2 Manually Configure the ARIMA\n# \n# We will use the difference adjusted dataset as an input to the ARIMA model. \n# \n# The `ARIMA(p,d,q)` model requires three parameters and is traditionally configured manually.\n# \n# The first step is to select the lag values for the Autoregression (`AR`) and Moving Average (`MA`) parameters, `p` and `q` respectively. \n# \n# We can do this by reviewing Autocorrelation Function (`ACF`) and Partial Autocorrelation Function (`PACF`) plots. Note, we are now using the seasonally differenced `stationary.csv` as our dataset. This is because the manual seasonal differencing performed is different from the lag=1 differencing performed by the ARIMA model with the `d` parameter. (It also suggests that no further differencing may be required, and that the `d` parameter may be set to 0.) The example below creates ACF and PACF plots for the series.\n\n# In this section, we will develop an Autoregressive Integrated Moving Average, or ARIMA, model for the problem. \n# \n# An ARIMA model can be considered as a special type of regression model--in which the dependent variable has been stationarized and the independent variables are all lags of the dependent variable and/or lags of the errors. Alternatively, you can think of a hybrid ARIMA/regression model as a regression model which includes a correction for autocorrelated errors. \n# \n# We will approach modeling by both manual and automatic configuration of the ARIMA model. This will be followed by investigating the residual errors of the chosen model. As such, this section is broken down into the following steps:\n# 1. Checking stationarity\n# 2. Manually Configure the ARIMA\n# 3. Running the ARIMA model\n# 4. Review Residual Errors\n# 5. Finalize model \n# 6. Making predictions\n\n# We can make the following observations from the above plots.\n# \n# - The ACF shows a significant lag for 1 month.with perhaps some significant lag at 13 months.\n# - The PACF shows a significant lag for 1 month, with perhaps some significant lag at 13 months.\n# \n# Both the ACF and PACF show a drop-off at the same point, perhaps suggesting a mix of AR and MA.\n# \n# This quick analysis suggests an `ARIMA(1,0,1)` on the stationary data may be a good starting point. The historic observations will be seasonally differenced prior to the fitting of each ARIMA model. The differencing will be inverted for all predictions made to make them directly comparable to the expected observation in the original sale count units. Experimentation shows that this configuration of ARIMA does not converge and results in errors by the underlying\n# \n# \n\n# ### 5.31 Running the ARIMA model\n\n# The example below demonstrates the performance of the selected ARIMA model using the experiemental setup.\n\n# In[19]:\n\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\nfor i in range(len(test)):\n\tmodel = ARIMA(history, order=(1,0,1)) #using manually selected paratments\n\tmodel_fit = model.fit()\n\tyhat = model_fit.forecast()[0]\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n#\tprint('>Predicted=%.3f, Expected=%3.f' % (yhat, obs)) \n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(test,label='test')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# In[20]:\n\nprint(model_fit.summary().as_latex())\n\n\n# Running the ARIMA(0,1,1) model results in an RMSE of 0.844, which is better than the persistence RMSE of 0..\n# \n# This is a great start, but we may be able to get improved results with a better configured ARIMA model.\n\n# In[25]:\n\n# evaluate an ARIMA model for a given order (p,d,q) and return RMSE\ndef evaluate_arima_model(X, arima_order):\n\t# prepare training dataset\n\tX = X.astype('float32')\n\ttrain_size = int(len(X) * 0.80)\n\ttrain, test = X[0:train_size], X[train_size:]\n\thistory = [x for x in train]\n\t# make predictions\n\tpredictions = list()\n\tfor t in range(len(test)):\n\t\tmodel = ARIMA(history, order=arima_order)\n\t\tmodel_fit = model.fit(trend='nc', disp=0)\n\t\tyhat = model_fit.forecast()[0]\n\t\tpredictions.append(yhat)\n\t\thistory.append(test[t])\n\t# calculate out of sample error\n\tmse = mean_squared_error(test, predictions)\n\trmse = sqrt(mse)\n\treturn rmse\n\n# evaluate combinations of p, d and q values for an ARIMA model\ndef evaluate_models(dataset, p_values, d_values, q_values):\n\tdataset = dataset.astype('float32')\n\tbest_score, best_cfg = float(\"inf\"), None\n\tfor p in p_values:\n\t\tfor d in d_values:\n\t\t\tfor q in q_values:\n\t\t\t\torder = (p,d,q)\n\t\t\t\ttry:\n\t\t\t\t\trmse = evaluate_arima_model(dataset, order)\n\t\t\t\t\tif rmse < best_score:\n\t\t\t\t\t\tbest_score, best_cfg = rmse, order\n\t\t\t\t\tprint('ARIMA%s RMSE=%.3f' % (order,rmse))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\tprint('Best ARIMA%s RMSE=%.3f' % (best_cfg, best_score))\n\n# load dataset\nseries = Series.from_csv('trainSet.csv')\n# evaluate parameters\np_values = range(1, 5)\nd_values = range(0, 2)\nq_values = range(1, 5)\nwarnings.filterwarnings(\"ignore\")\nevaluate_models(series.values, p_values, d_values, q_values)\n\n\n# In[26]:\n\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\nfor i in range(len(test)):\n\tmodel = ARIMA(history, order=(4,1,1)) #using manually selected paratments\n\tmodel_fit = model.fit()\n\tyhat = model_fit.forecast()[0]\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n#\tprint('>Predicted=%.3f, Expected=%3.f' % (yhat, obs))\nprint(model_fit.summary()) \n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(test,label='test')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# In[33]:\n\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\nfor i in range(len(test)):\n\tmodel = ARIMA(history, order=(4,1,1)) #using manually selected paratments\n\tmodel_fit = model.fit(disp=False, trend='c',transparams=False)\n\tyhat = model_fit.forecast()[0]\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n#\tprint('>Predicted=%.3f, Expected=%3.f' % (yhat, obs))\nprint(model_fit.summary()) \n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(test,label='test')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# In[34]:\n\nprint(model_fit.summary().as_latex())\n\n\n# ### 5.4 Review residual errors\n\n# A good final check of a model is to review residual forecast errors. Ideally, the distribution of residual errors should be a Gaussian with a zero mean. We can check this by using summary statistics and plots to investigate the residual errors from the ARIMA(0,1,1) model. The example below calculates and summarizes the residual forecast errors.\n\n# In[36]:\n\n# summarize ARIMA forecast residuals\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\n\nfor i in range(len(test)):\n\t# difference data\n\t# predict\n\tmodel = ARIMA(history, order=(4,1,1))\n\tmodel_fit = model.fit(disp=False, trend='c',transparams=False)\n\tyhat = model_fit.forecast()[0]\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n\n# errors\nresiduals = [test[i]-predictions[i] for i in range(len(test))]\nresiduals = DataFrame(residuals)\nprint(residuals.describe())\n\n\n# We can see that the distribution has a right shift and that the mean is non-zero. This is perhaps a sign that the predictions are biased.\n# \n# We can examine this further by plotting the distribution of redidual errors. \n\n# In[67]:\n\nprint(residuals.describe())\nplt.figure(figsize=(8,5))\nsns.distplot(residuals, kde=False, fit=stats.gamma,color='green')\nsns.plt.title('Residual Error Distribution',fontsize=14)\n\n\n# In[69]:\n\n# prepare data\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n# walk-forward validation\nhistory = [x for x in train]\npredictions = list()\nbias = -0.079681\nfor i in range(len(test)):\n\t# predict\n\tmodel = ARIMA(history, order=(4,1,1))\n\tmodel_fit = model.fit(disp=False, trend='c',transparams=False)\n\tyhat = model_fit.forecast()[0]\n\tyhat = yhat+bias\n\tpredictions.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n# report performance\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(test, predictions)\nprint('MAPE: %.3f' % mape)\n# errors\nresiduals = [test[i]-predictions[i] for i in range(len(test))]\nresiduals = DataFrame(residuals)\nprint(residuals.describe())\n# plot\npyplot.figure()\npyplot.subplot(211)\nresiduals.hist(ax=pyplot.gca())\npyplot.subplot(212)\nresiduals.plot(kind='kde', ax=pyplot.gca())\npyplot.show()\n\n\n# In[74]:\n\npyplot.figure(figsize=(11,7))\npyplot.subplot(211)\nplot_acf(residuals, ax=pyplot.gca())\nplt.title('Autocorrelation',fontsize=14)\npyplot.subplot(212)\nplot_pacf(residuals, ax=pyplot.gca())\nplt.title('Partial Autocorrelation',fontsize=14)\npyplot.show()\n\n\n# ## Model Validation\n\n# ### 5.5 Finalize model \n\n# Having manually selected the ARIMA(2,0,2) model we are in a position to finalizing the model by fitting our selected ARIMA model on the dataset, in this case on a transformed version of the entire dataset. Once fit, the model can be saved to file for later use. \n\n# In[89]:\n\nX = series.values\nX = X.astype('float32')\n# difference data\ndef __getnewargs__(self):\n\treturn ((self.endog),(self.k_lags, self.k_diff, self.k_ma))\n \nARIMA.__getnewargs__ = __getnewargs__\n\nmodel = ARIMA(X, order=(4,1,1))\nmodel_fit = model.fit(trend='nc', disp=0)\n# bias constant, could be calculated from in-sample mean residual\nbias = -0.079681\n# save model\nmodel_fit.save('model.pkl')\nnumpy.save('model_bias.npy', [bias])\n\n\n# Running the code above creates two local files:\n# \n# - `model.pkl` This is the `ARIMAResult` object from the call to `ARIMA.fit()`. This includes the coefficients and all other internal data returned when fitting the model.\n# - `model_bias.npy` This is the bias value stored as a one-row, one-column NumPy array.\n\n# ### 5.6 Making predictions\n\n# A natural case may be to load the model and make a single forecast. This is relatively straightforward and involves restoring the saved model and the bias and calling the `forecast()` function. To invert the seasonal differencing, the historical data must also be loaded. The example below loads the model, makes a prediction for the next time step, and prints the prediction.\n\n# In[90]:\n\nfrom statsmodels.tsa.arima_model import ARIMAResults\n\nseries = Series.from_csv('trainSet.csv')\nmodel_fit = ARIMAResults.load('model.pkl')\nbias = numpy.load('model_bias.npy')\nyhat = float(model_fit.forecast()[0])\nyhat = bias + yhat\nprint('Predicted: %.3f' % yhat)\n\n\n# This prediction gives the sort of result we would expect in the first instance. \n# \n# Now let us explore the model properly and use it in a simulated operational manner. In the Experimental setup section, we split the original dataset into test and training. We can load the train.csv file now and use it see how well our model really is on unseen data. There are two ways we might proceed:\n# \n# - Load the model and use it to forecast foreward in time over many months. The forecast beyond the first one or two months will quickly start to degrade in performanace as we get further away from the known data. \n# \n# - Load the model and use it in a rolling-forecast manner, updating the transform and model for each time step. This is the preferred method as it is how one would use this model in practice as it would achieve the best performance.\n# \n# As with model evaluation in previous sections, we will make predictions in a rolling-forecast manner. This means that we will step over lead times in the validation dataset and take the observations as an update to the history.\n\n# In[94]:\n\n# load and prepare datasets\ndataset = Series.from_csv('trainSet.csv')\nX = dataset.values.astype('float32')\nhistory = [x for x in X]\nvalidation = Series.from_csv('holdontestSet.csv')\ny = validation.values.astype('float32')\n# load model\nmodel_fit = ARIMAResults.load('model.pkl')\nbias = numpy.load('model_bias.npy')\n# make first prediction\npredictions = list()\nyhat = float(model_fit.forecast()[0])\nyhat = bias + yhat\npredictions.append(yhat)\nhistory.append(y[0])\nprint('>Predicted=%.3f, Expected=%3.f' % (yhat, y[0]))\n# rolling forecasts\nfor i in range(1, len(y)):\n\t# predict\n\tmodel = ARIMA(history, order=(4,1,1))\n\tmodel_fit = model.fit(trend='nc', disp=0, start_ar_lags=6)\n\tyhat = model_fit.forecast()[0]\n\tyhat = bias + yhat\n\tpredictions.append(yhat)\n\t# observation\n\tobs = y[i]\n\thistory.append(obs)\n\tprint('>Predicted=%.3f, Expected=%.3f' % (yhat, obs))\n \nrmse = sqrt(mean_squared_error(y, predictions))\nprint('RMSE: %.3f' % rmse)\nmape= mean_absolute_percentage_error(y, predictions)\nprint('MAPE: %.3f' % mape)\n# plot\nplt.figure(figsize=(10,6))\nplt.plot(y,label='holdontest')\nplt.plot(predictions, color='green',linestyle='--',label='predictions')\nplt.legend(fontsize=14)\nplt.show()\n\n\n# From the plot above we can see that the `ARIMA` model is working in the manner expected, but that our predictions are not always inline with the test data. \n# Before we look at the errors or failure modes in more detail let us now appraoch this learning problem using a diffrent set of techniques. \n# \n\n# ## 6. Supervised Learning setup\n\n# In previous sections we have trained an ARIMA model on time series data.\n# \n# Here we examin how time series forecasting can be framed as a supervised learning problem. This re-framing of your time series data allows you access to the suite of standard linear and nonlinear machine learning algorithms on your problem (for example eg. Boosted Trees). \n\n# ### 6.1 Sliding Window (univerate) \n# \n# Time series data can be reformulated as supervised learning. Given a sequence of numbers for a time series dataset, we can restructure the data to look like a supervised learning problem. We can do this by using previous time steps as input variables and use the next time step as the output variable. Let’s make this concrete with an example. Imagine we have a time series below and are trying to learn the mapping function from input to output\n# \n# $Y = f(X)$\n# \n# for input variables ($X$) and ouptut variables ($Y$). Starting with the following data:\n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n#
timemeasure
1100
2110
3108
4115
5120
\n# \n#
\n# \n#

We can restructure this time series dataset as a supervised learning problem by using the value at the previous time step to predict the value at the next time-step. Re-organizing the time series dataset this way, the data would look as follows:

\n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n# \n#
$X$$y$
?100
100110
110108
108115
115120
120?
\n# \n\n# Take a look at the above transformed dataset and compare it to the original time series. Here are some observations:\n# \n# - We can see that the previous time step is the input ($X$) and the next time step is the output ($y$) in our supervised learning problem.\n# - We can see that the order between the observations is preserved, and must continue to be preserved when using this dataset to train a supervised model.\n# - We can see that we have no previous value that we can use to predict the first value in the sequence. We will delete this row as we cannot use it.\n# - We can also see that we do not have a known next value to predict for the last value in the sequence. We may want to delete this value while training our supervised model also.\n# \n# The use of prior time steps to predict the next time step is called the sliding window method. For short, it may be called the window method in some literature. In statistics and time series analysis, this is called a lag or lag method. The number of previous time steps is called the window width or size of the lag. This sliding window is the basis for how we can turn any time series dataset into a supervised learning problem. From this simple example, we can notice a few things:\n# \n# - We can see how this can work to turn a time series into either a regression or a classification supervised learning problem for real-valued or labeled time series values.\n# - We can see how once a time series dataset is prepared this way that any of the standard linear and nonlinear machine learning algorithms may be applied, as long as the order of the rows is preserved.\n# - We can see how the width sliding window can be increased to include more previous time steps.\n# - We can see how the sliding window approach can be used on a time series that has more than one value, or so-called multivariate time series.\n# \n\n# Let us frame the Champagne sales forecast problem as a supervised learning one (where the outputs are the differenced series values) so we can apply well-known ML regressors from `scikit`. We start with extracting features.\n\n# In[104]:\n\nseries = Series.from_csv('trainSet.csv', header=0)\nX = series.values\nX = X.astype('float32')\ntrain_size = int(len(X) * 0.80)\ntrain, test = X[0:train_size], X[train_size:]\n\n# creating lag features with pandas\ndiff = DataFrame(difference(series, months_in_year))\ndataframe = concat([diff.shift(3), diff.shift(2), diff.shift(1), diff], axis=1)\ndataframe.columns = ['t-2', 't-1', 't', 't+1']\nprint(len(dataframe))\ndataframe.head()\n\n\n# Two remarks:\n# \n# - There are 12 less rows in diff (hence in dataframe) than in series, since we are dealing with differenced series\n# - The first rows contain nans because of the lag features. Let's discard them and get the values in a form we can use within scikit.\n\n# In[133]:\n\nXX = dataframe.values[3:,0:-1]\nyy = dataframe.values[3:,-1]\n\n\n# We define the training and test sets in a way such that the 1st element in the supervised learning test set corresponds to the 1st element in the previous time series test set.\n\n# In[134]:\n\ntrain_size = int(len(series) * 0.80) - 3 - 12 # because of the lag and of the difference\nXX_train = XX[0:train_size]\nXX_test = XX[train_size:]\nyy_train = yy[0:train_size]\nyy_test = yy[train_size:]\n\n\n# We can compare the sizes of train and test sets with what we had before, and check that the size of the test set is the same.\n\n# In[135]:\n\nprint(train_size)\nprint(len(XX)-train_size)\n\n\n# ## 7. Random Forest Models\n\n# Let's train the regressor we want to work with:\n\n# In[136]:\n\nmodel = RandomForestRegressor()\nmodel.fit(XX_train, yy_train)\n\n\n# We reuse the previous evaluation code but this time we make predictions with this regressor\n\n# In[137]:\n\n# walk-forward validation\nhistory = [x for x in train]\nprediction_sl = list()\nfor i in range(len(test)):\n\tyhat = model.predict(XX_test[i,:])[0]\n\tyhat = inverse_difference(history, yhat, months_in_year)\n\tprediction_sl.append(yhat)\n\t# observation\n\tobs = test[i]\n\thistory.append(obs)\n\tprediction_sl[i]=yhat\n#\tprint('>Predicted=%.3f, Expected=%3.f' % (yhat, obs))\n\n\n# In[138]:\n\nrmse = sqrt(mean_squared_error(test, prediction_sl))\nprint('RMSE: %.3f' % rmse)\n\n\n# In[139]:\n\npyplot.plot(test, color='green')\npyplot.plot(prediction_sl, color='orange')\npyplot.show()\n\n\n# In[ ]:\n\n\n\n","sub_path":"ForecastModelforNumberofCasesSettledMonthly.py","file_name":"ForecastModelforNumberofCasesSettledMonthly.py","file_ext":"py","file_size_in_byte":33082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181571827","text":"'''\nCreated on 2012-5-2\n\n@author: liwenjian\n'''\nfrom django.conf.urls import patterns, url\nfrom django.contrib.auth.views import logout_then_login\n\nurlpatterns = patterns('acsbbs.accounts.views',\n url(r'^register/$', 'acsbbs_register', name='acsbbs_register'),\n url(r'^login/$', 'acsbbs_login', name='acsbbs_login'),\n url(r'^logout/$', logout_then_login, name='acsbbs_logout'),\n)\n","sub_path":"acsbbs/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169391215","text":"from flask import Flask, jsonify, request, render_template, redirect, url_for\nfrom app import app\nimport pusher\n\n\n@app.route('/')\ndef test():\n return render_template('ajax_test.html')\n\n\n@app.route('/ajax/test', methods=['GET', 'POST'])\ndef ajax_test():\n resp = {}\n if request.method == 'POST':\n req = request.form\n\n resp['result'] = int(req['first']) + int(req['second'])\n resp['success'] = True\n return jsonify(resp)\n else:\n return redirect(url_for('test'))\n\n\n@app.route('/chat', methods=['GET'])\ndef chat():\n p = pusher.Pusher(\n app_id='86070',\n key='62270f36d7ecf7bf7ef0',\n secret='46b772c92bcc9d25fae9'\n )\n p['test_channel'].trigger('my_event', {\n 'name': request.args.get('name_data'),\n 'msg': request.args.get('msg_data')\n })\n return \"\"\n\n\n@app.route('/mini_chat')\ndef mini_chat():\n return render_template(\"mini_chat.html\")\n","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"64950356","text":"#\r\n#Question 3:\r\n#Take a list as shown below\r\n#[(1,2,3), [1,2], ['a','hit','less']]\r\n#The List contains tuple and lists. Make the elements of inner lists and tuples to outer list\r\n#\r\n\r\nlist1=[(1,2,3),[1,2],['a','hit','less']]\r\n\r\nlist2=[i for each in list1 for i in each]\r\nprint (list2)","sub_path":"Day7/D7Q3.py","file_name":"D7Q3.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"628904183","text":"import json\ndef main():\n with open(\"leagues_ids.txt\") as f:\n leagues = json.loads(f.read())\n years = []\n start = 1995\n while start <= 2019:\n\t years.append(str(start) + '-' + str(start + 1 ))\n\t start += 1\n\n for k,v in leagues.items():\n for i in range(len(years)):\n print(leagues[k], years[i], k)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Cloud_Hockey_Analytics/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"600047144","text":"from __future__ import absolute_import, unicode_literals\n\nfrom celery import shared_task\nfrom reddit.models import RedditPost, Pending\nfrom .redditBot import RedditBot\n\n@shared_task\ndef getHot(subreddit = 'learnpython'):\n\tRedditBot().hot(subreddit)\n\tPending.objects.filter(subreddit=subreddit).delete()\n\n@shared_task\ndef checkExistance(subreddit):\n\texists = RedditBot().checkSubreddit(subreddit)\n\tif exists:\n\t\tgetHot.delay(subreddit)\n\telse :\n\t\tPending.objects.filter(subreddit=subreddit).delete()","sub_path":"src/Alerts/reddit/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"148697754","text":"\nfrom ctypes import *\nfrom pythc import thc\nlib = cdll.LoadLibrary(\"./libTHCUNN.so\")\n\nclass _nn(object):\n def __init__(self):\n\n THNN_CudaAbs_updateOutput=lib.THNN_CudaAbs_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaAbs_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaAbs_updateOutput.restype = None\n self.THNN_CudaAbs_updateOutput = THNN_CudaAbs_updateOutput\n\n THNN_CudaAbs_updateGradInput=lib.THNN_CudaAbs_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaAbs_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaAbs_updateGradInput.restype = None\n self.THNN_CudaAbs_updateGradInput = THNN_CudaAbs_updateGradInput\n\n THNN_CudaAbsCriterion_updateOutput=lib.THNN_CudaAbsCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaAbsCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaAbsCriterion_updateOutput.restype = None\n self.THNN_CudaAbsCriterion_updateOutput = THNN_CudaAbsCriterion_updateOutput\n\n THNN_CudaAbsCriterion_updateGradInput=lib.THNN_CudaAbsCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaAbsCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaAbsCriterion_updateGradInput.restype = None\n self.THNN_CudaAbsCriterion_updateGradInput = THNN_CudaAbsCriterion_updateGradInput\n\n THNN_CudaClassNLLCriterion_updateOutput=lib.THNN_CudaClassNLLCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,THCudaTensor*,THCudaTensor*\n THNN_CudaClassNLLCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_void_p,c_void_p]\n THNN_CudaClassNLLCriterion_updateOutput.restype = None\n self.THNN_CudaClassNLLCriterion_updateOutput = THNN_CudaClassNLLCriterion_updateOutput\n\n THNN_CudaClassNLLCriterion_updateGradInput=lib.THNN_CudaClassNLLCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,THCudaTensor*,THCudaTensor*\n THNN_CudaClassNLLCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_void_p,c_void_p]\n THNN_CudaClassNLLCriterion_updateGradInput.restype = None\n self.THNN_CudaClassNLLCriterion_updateGradInput = THNN_CudaClassNLLCriterion_updateGradInput\n\n THNN_CudaDistKLDivCriterion_updateOutput=lib.THNN_CudaDistKLDivCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaDistKLDivCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaDistKLDivCriterion_updateOutput.restype = None\n self.THNN_CudaDistKLDivCriterion_updateOutput = THNN_CudaDistKLDivCriterion_updateOutput\n\n THNN_CudaDistKLDivCriterion_updateGradInput=lib.THNN_CudaDistKLDivCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaDistKLDivCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaDistKLDivCriterion_updateGradInput.restype = None\n self.THNN_CudaDistKLDivCriterion_updateGradInput = THNN_CudaDistKLDivCriterion_updateGradInput\n\n THNN_CudaELU_updateOutput=lib.THNN_CudaELU_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,float\n THNN_CudaELU_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_float]\n THNN_CudaELU_updateOutput.restype = None\n self.THNN_CudaELU_updateOutput = THNN_CudaELU_updateOutput\n\n THNN_CudaELU_updateGradInput=lib.THNN_CudaELU_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,float\n THNN_CudaELU_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_float]\n THNN_CudaELU_updateGradInput.restype = None\n self.THNN_CudaELU_updateGradInput = THNN_CudaELU_updateGradInput\n\n THNN_CudaHardTanh_updateOutput=lib.THNN_CudaHardTanh_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,float,float\n THNN_CudaHardTanh_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_float,c_float]\n THNN_CudaHardTanh_updateOutput.restype = None\n self.THNN_CudaHardTanh_updateOutput = THNN_CudaHardTanh_updateOutput\n\n THNN_CudaHardTanh_updateGradInput=lib.THNN_CudaHardTanh_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,float,float\n THNN_CudaHardTanh_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_float,c_float]\n THNN_CudaHardTanh_updateGradInput.restype = None\n self.THNN_CudaHardTanh_updateGradInput = THNN_CudaHardTanh_updateGradInput\n\n THNN_CudaL1Cost_updateOutput=lib.THNN_CudaL1Cost_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaL1Cost_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaL1Cost_updateOutput.restype = None\n self.THNN_CudaL1Cost_updateOutput = THNN_CudaL1Cost_updateOutput\n\n THNN_CudaL1Cost_updateGradInput=lib.THNN_CudaL1Cost_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaL1Cost_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaL1Cost_updateGradInput.restype = None\n self.THNN_CudaL1Cost_updateGradInput = THNN_CudaL1Cost_updateGradInput\n\n THNN_CudaLeakyReLU_updateOutput=lib.THNN_CudaLeakyReLU_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,double,bool\n THNN_CudaLeakyReLU_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_double,c_bool]\n THNN_CudaLeakyReLU_updateOutput.restype = None\n self.THNN_CudaLeakyReLU_updateOutput = THNN_CudaLeakyReLU_updateOutput\n\n THNN_CudaLeakyReLU_updateGradInput=lib.THNN_CudaLeakyReLU_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,double,bool\n THNN_CudaLeakyReLU_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_double,c_bool]\n THNN_CudaLeakyReLU_updateGradInput.restype = None\n self.THNN_CudaLeakyReLU_updateGradInput = THNN_CudaLeakyReLU_updateGradInput\n\n THNN_CudaLogSigmoid_updateOutput=lib.THNN_CudaLogSigmoid_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaLogSigmoid_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaLogSigmoid_updateOutput.restype = None\n self.THNN_CudaLogSigmoid_updateOutput = THNN_CudaLogSigmoid_updateOutput\n\n THNN_CudaLogSigmoid_updateGradInput=lib.THNN_CudaLogSigmoid_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaLogSigmoid_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaLogSigmoid_updateGradInput.restype = None\n self.THNN_CudaLogSigmoid_updateGradInput = THNN_CudaLogSigmoid_updateGradInput\n\n THNN_CudaLogSoftMax_updateOutput=lib.THNN_CudaLogSoftMax_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaLogSoftMax_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaLogSoftMax_updateOutput.restype = None\n self.THNN_CudaLogSoftMax_updateOutput = THNN_CudaLogSoftMax_updateOutput\n\n THNN_CudaLogSoftMax_updateGradInput=lib.THNN_CudaLogSoftMax_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaLogSoftMax_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaLogSoftMax_updateGradInput.restype = None\n self.THNN_CudaLogSoftMax_updateGradInput = THNN_CudaLogSoftMax_updateGradInput\n\n THNN_CudaLookupTable_accGradParameters=lib.THNN_CudaLookupTable_accGradParameters\n # THCState*,THIndexTensor*,THCudaTensor*,THCudaTensor*,THIntegerTensor*,THCudaTensor*,THCudaTensor*,bool,int,float\n THNN_CudaLookupTable_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_int,c_float]\n THNN_CudaLookupTable_accGradParameters.restype = None\n self.THNN_CudaLookupTable_accGradParameters = THNN_CudaLookupTable_accGradParameters\n\n THNN_CudaMarginCriterion_updateOutput=lib.THNN_CudaMarginCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,float\n THNN_CudaMarginCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_float]\n THNN_CudaMarginCriterion_updateOutput.restype = None\n self.THNN_CudaMarginCriterion_updateOutput = THNN_CudaMarginCriterion_updateOutput\n\n THNN_CudaMarginCriterion_updateGradInput=lib.THNN_CudaMarginCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,float\n THNN_CudaMarginCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_float]\n THNN_CudaMarginCriterion_updateGradInput.restype = None\n self.THNN_CudaMarginCriterion_updateGradInput = THNN_CudaMarginCriterion_updateGradInput\n\n THNN_CudaSoftMarginCriterion_updateOutput=lib.THNN_CudaSoftMarginCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int\n THNN_CudaSoftMarginCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int]\n THNN_CudaSoftMarginCriterion_updateOutput.restype = None\n self.THNN_CudaSoftMarginCriterion_updateOutput = THNN_CudaSoftMarginCriterion_updateOutput\n\n THNN_CudaSoftMarginCriterion_updateGradInput=lib.THNN_CudaSoftMarginCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int\n THNN_CudaSoftMarginCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int]\n THNN_CudaSoftMarginCriterion_updateGradInput.restype = None\n self.THNN_CudaSoftMarginCriterion_updateGradInput = THNN_CudaSoftMarginCriterion_updateGradInput\n\n THNN_CudaMSECriterion_updateOutput=lib.THNN_CudaMSECriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaMSECriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaMSECriterion_updateOutput.restype = None\n self.THNN_CudaMSECriterion_updateOutput = THNN_CudaMSECriterion_updateOutput\n\n THNN_CudaMSECriterion_updateGradInput=lib.THNN_CudaMSECriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaMSECriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaMSECriterion_updateGradInput.restype = None\n self.THNN_CudaMSECriterion_updateGradInput = THNN_CudaMSECriterion_updateGradInput\n\n THNN_CudaMultiMarginCriterion_updateOutput=lib.THNN_CudaMultiMarginCriterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,int,THCudaTensor*\n THNN_CudaMultiMarginCriterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_int,c_void_p]\n THNN_CudaMultiMarginCriterion_updateOutput.restype = None\n self.THNN_CudaMultiMarginCriterion_updateOutput = THNN_CudaMultiMarginCriterion_updateOutput\n\n THNN_CudaMultiMarginCriterion_updateGradInput=lib.THNN_CudaMultiMarginCriterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,int,THCudaTensor*\n THNN_CudaMultiMarginCriterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_int,c_void_p]\n THNN_CudaMultiMarginCriterion_updateGradInput.restype = None\n self.THNN_CudaMultiMarginCriterion_updateGradInput = THNN_CudaMultiMarginCriterion_updateGradInput\n\n THNN_CudaPReLU_updateOutput=lib.THNN_CudaPReLU_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,long\n THNN_CudaPReLU_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_long]\n THNN_CudaPReLU_updateOutput.restype = None\n self.THNN_CudaPReLU_updateOutput = THNN_CudaPReLU_updateOutput\n\n THNN_CudaPReLU_updateGradInput=lib.THNN_CudaPReLU_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,long\n THNN_CudaPReLU_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_long]\n THNN_CudaPReLU_updateGradInput.restype = None\n self.THNN_CudaPReLU_updateGradInput = THNN_CudaPReLU_updateGradInput\n\n THNN_CudaPReLU_accGradParameters=lib.THNN_CudaPReLU_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,long,float\n THNN_CudaPReLU_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_long,c_float]\n THNN_CudaPReLU_accGradParameters.restype = None\n self.THNN_CudaPReLU_accGradParameters = THNN_CudaPReLU_accGradParameters\n\n THNN_CudaRReLU_updateOutput=lib.THNN_CudaRReLU_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,double,double,bool,bool,void*\n THNN_CudaRReLU_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_double,c_double,c_bool,c_bool,c_void_p]\n THNN_CudaRReLU_updateOutput.restype = None\n self.THNN_CudaRReLU_updateOutput = THNN_CudaRReLU_updateOutput\n\n THNN_CudaRReLU_updateGradInput=lib.THNN_CudaRReLU_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,double,double,bool,bool\n THNN_CudaRReLU_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_double,c_double,c_bool,c_bool]\n THNN_CudaRReLU_updateGradInput.restype = None\n self.THNN_CudaRReLU_updateGradInput = THNN_CudaRReLU_updateGradInput\n\n THNN_CudaSigmoid_updateOutput=lib.THNN_CudaSigmoid_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaSigmoid_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaSigmoid_updateOutput.restype = None\n self.THNN_CudaSigmoid_updateOutput = THNN_CudaSigmoid_updateOutput\n\n THNN_CudaSigmoid_updateGradInput=lib.THNN_CudaSigmoid_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaSigmoid_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaSigmoid_updateGradInput.restype = None\n self.THNN_CudaSigmoid_updateGradInput = THNN_CudaSigmoid_updateGradInput\n\n THNN_CudaSmoothL1Criterion_updateOutput=lib.THNN_CudaSmoothL1Criterion_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaSmoothL1Criterion_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaSmoothL1Criterion_updateOutput.restype = None\n self.THNN_CudaSmoothL1Criterion_updateOutput = THNN_CudaSmoothL1Criterion_updateOutput\n\n THNN_CudaSmoothL1Criterion_updateGradInput=lib.THNN_CudaSmoothL1Criterion_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool\n THNN_CudaSmoothL1Criterion_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_bool]\n THNN_CudaSmoothL1Criterion_updateGradInput.restype = None\n self.THNN_CudaSmoothL1Criterion_updateGradInput = THNN_CudaSmoothL1Criterion_updateGradInput\n\n THNN_CudaSoftMax_updateOutput=lib.THNN_CudaSoftMax_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaSoftMax_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaSoftMax_updateOutput.restype = None\n self.THNN_CudaSoftMax_updateOutput = THNN_CudaSoftMax_updateOutput\n\n THNN_CudaSoftMax_updateGradInput=lib.THNN_CudaSoftMax_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaSoftMax_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaSoftMax_updateGradInput.restype = None\n self.THNN_CudaSoftMax_updateGradInput = THNN_CudaSoftMax_updateGradInput\n\n THNN_CudaSoftPlus_updateOutput=lib.THNN_CudaSoftPlus_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,float,float\n THNN_CudaSoftPlus_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_float,c_float]\n THNN_CudaSoftPlus_updateOutput.restype = None\n self.THNN_CudaSoftPlus_updateOutput = THNN_CudaSoftPlus_updateOutput\n\n THNN_CudaSoftPlus_updateGradInput=lib.THNN_CudaSoftPlus_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,float,float\n THNN_CudaSoftPlus_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_float,c_float]\n THNN_CudaSoftPlus_updateGradInput.restype = None\n self.THNN_CudaSoftPlus_updateGradInput = THNN_CudaSoftPlus_updateGradInput\n\n THNN_CudaSoftShrink_updateOutput=lib.THNN_CudaSoftShrink_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,double\n THNN_CudaSoftShrink_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_double]\n THNN_CudaSoftShrink_updateOutput.restype = None\n self.THNN_CudaSoftShrink_updateOutput = THNN_CudaSoftShrink_updateOutput\n\n THNN_CudaSoftShrink_updateGradInput=lib.THNN_CudaSoftShrink_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,double\n THNN_CudaSoftShrink_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_double]\n THNN_CudaSoftShrink_updateGradInput.restype = None\n self.THNN_CudaSoftShrink_updateGradInput = THNN_CudaSoftShrink_updateGradInput\n\n THNN_CudaSqrt_updateOutput=lib.THNN_CudaSqrt_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,float\n THNN_CudaSqrt_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_float]\n THNN_CudaSqrt_updateOutput.restype = None\n self.THNN_CudaSqrt_updateOutput = THNN_CudaSqrt_updateOutput\n\n THNN_CudaSqrt_updateGradInput=lib.THNN_CudaSqrt_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaSqrt_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaSqrt_updateGradInput.restype = None\n self.THNN_CudaSqrt_updateGradInput = THNN_CudaSqrt_updateGradInput\n\n THNN_CudaSquare_updateOutput=lib.THNN_CudaSquare_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaSquare_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaSquare_updateOutput.restype = None\n self.THNN_CudaSquare_updateOutput = THNN_CudaSquare_updateOutput\n\n THNN_CudaSquare_updateGradInput=lib.THNN_CudaSquare_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaSquare_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaSquare_updateGradInput.restype = None\n self.THNN_CudaSquare_updateGradInput = THNN_CudaSquare_updateGradInput\n\n THNN_CudaTanh_updateOutput=lib.THNN_CudaTanh_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*\n THNN_CudaTanh_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p]\n THNN_CudaTanh_updateOutput.restype = None\n self.THNN_CudaTanh_updateOutput = THNN_CudaTanh_updateOutput\n\n THNN_CudaTanh_updateGradInput=lib.THNN_CudaTanh_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaTanh_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaTanh_updateGradInput.restype = None\n self.THNN_CudaTanh_updateGradInput = THNN_CudaTanh_updateGradInput\n\n THNN_CudaThreshold_updateOutput=lib.THNN_CudaThreshold_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,double,double,bool\n THNN_CudaThreshold_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_double,c_double,c_bool]\n THNN_CudaThreshold_updateOutput.restype = None\n self.THNN_CudaThreshold_updateOutput = THNN_CudaThreshold_updateOutput\n\n THNN_CudaThreshold_updateGradInput=lib.THNN_CudaThreshold_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,double,bool\n THNN_CudaThreshold_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_double,c_bool]\n THNN_CudaThreshold_updateGradInput.restype = None\n self.THNN_CudaThreshold_updateGradInput = THNN_CudaThreshold_updateGradInput\n\n THNN_CudaTemporalConvolution_updateOutput=lib.THNN_CudaTemporalConvolution_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int\n THNN_CudaTemporalConvolution_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int]\n THNN_CudaTemporalConvolution_updateOutput.restype = None\n self.THNN_CudaTemporalConvolution_updateOutput = THNN_CudaTemporalConvolution_updateOutput\n\n THNN_CudaTemporalConvolution_updateGradInput=lib.THNN_CudaTemporalConvolution_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaTemporalConvolution_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaTemporalConvolution_updateGradInput.restype = None\n self.THNN_CudaTemporalConvolution_updateGradInput = THNN_CudaTemporalConvolution_updateGradInput\n\n THNN_CudaTemporalConvolution_accGradParameters=lib.THNN_CudaTemporalConvolution_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,float\n THNN_CudaTemporalConvolution_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_float]\n THNN_CudaTemporalConvolution_accGradParameters.restype = None\n self.THNN_CudaTemporalConvolution_accGradParameters = THNN_CudaTemporalConvolution_accGradParameters\n\n THNN_CudaTemporalMaxPooling_updateOutput=lib.THNN_CudaTemporalMaxPooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaTemporalMaxPooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaTemporalMaxPooling_updateOutput.restype = None\n self.THNN_CudaTemporalMaxPooling_updateOutput = THNN_CudaTemporalMaxPooling_updateOutput\n\n THNN_CudaTemporalMaxPooling_updateGradInput=lib.THNN_CudaTemporalMaxPooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaTemporalMaxPooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaTemporalMaxPooling_updateGradInput.restype = None\n self.THNN_CudaTemporalMaxPooling_updateGradInput = THNN_CudaTemporalMaxPooling_updateGradInput\n\n THNN_CudaBatchNormalization_updateOutput=lib.THNN_CudaBatchNormalization_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,bool,double,double\n THNN_CudaBatchNormalization_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_bool,c_double,c_double]\n THNN_CudaBatchNormalization_updateOutput.restype = None\n self.THNN_CudaBatchNormalization_updateOutput = THNN_CudaBatchNormalization_updateOutput\n\n THNN_CudaBatchNormalization_backward=lib.THNN_CudaBatchNormalization_backward\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,float\n THNN_CudaBatchNormalization_backward.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_float]\n THNN_CudaBatchNormalization_backward.restype = None\n self.THNN_CudaBatchNormalization_backward = THNN_CudaBatchNormalization_backward\n\n THNN_CudaSpatialConvolutionMM_updateOutput=lib.THNN_CudaSpatialConvolutionMM_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaSpatialConvolutionMM_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialConvolutionMM_updateOutput.restype = None\n self.THNN_CudaSpatialConvolutionMM_updateOutput = THNN_CudaSpatialConvolutionMM_updateOutput\n\n THNN_CudaSpatialConvolutionMM_updateGradInput=lib.THNN_CudaSpatialConvolutionMM_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaSpatialConvolutionMM_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialConvolutionMM_updateGradInput.restype = None\n self.THNN_CudaSpatialConvolutionMM_updateGradInput = THNN_CudaSpatialConvolutionMM_updateGradInput\n\n THNN_CudaSpatialConvolutionMM_accGradParameters=lib.THNN_CudaSpatialConvolutionMM_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,float\n THNN_CudaSpatialConvolutionMM_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_float]\n THNN_CudaSpatialConvolutionMM_accGradParameters.restype = None\n self.THNN_CudaSpatialConvolutionMM_accGradParameters = THNN_CudaSpatialConvolutionMM_accGradParameters\n\n THNN_CudaSpatialConvolutionLocal_updateOutput=lib.THNN_CudaSpatialConvolutionLocal_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,long,long,long,long\n THNN_CudaSpatialConvolutionLocal_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_long,c_long,c_long,c_long]\n THNN_CudaSpatialConvolutionLocal_updateOutput.restype = None\n self.THNN_CudaSpatialConvolutionLocal_updateOutput = THNN_CudaSpatialConvolutionLocal_updateOutput\n\n THNN_CudaSpatialConvolutionLocal_updateGradInput=lib.THNN_CudaSpatialConvolutionLocal_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,long,long,long,long\n THNN_CudaSpatialConvolutionLocal_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_long,c_long,c_long,c_long]\n THNN_CudaSpatialConvolutionLocal_updateGradInput.restype = None\n self.THNN_CudaSpatialConvolutionLocal_updateGradInput = THNN_CudaSpatialConvolutionLocal_updateGradInput\n\n THNN_CudaSpatialConvolutionLocal_accGradParameters=lib.THNN_CudaSpatialConvolutionLocal_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,long,long,long,long,float\n THNN_CudaSpatialConvolutionLocal_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_long,c_long,c_long,c_long,c_float]\n THNN_CudaSpatialConvolutionLocal_accGradParameters.restype = None\n self.THNN_CudaSpatialConvolutionLocal_accGradParameters = THNN_CudaSpatialConvolutionLocal_accGradParameters\n\n THNN_CudaSpatialFullConvolution_updateOutput=lib.THNN_CudaSpatialFullConvolution_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int\n THNN_CudaSpatialFullConvolution_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialFullConvolution_updateOutput.restype = None\n self.THNN_CudaSpatialFullConvolution_updateOutput = THNN_CudaSpatialFullConvolution_updateOutput\n\n THNN_CudaSpatialFullConvolution_updateGradInput=lib.THNN_CudaSpatialFullConvolution_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int\n THNN_CudaSpatialFullConvolution_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialFullConvolution_updateGradInput.restype = None\n self.THNN_CudaSpatialFullConvolution_updateGradInput = THNN_CudaSpatialFullConvolution_updateGradInput\n\n THNN_CudaSpatialFullConvolution_accGradParameters=lib.THNN_CudaSpatialFullConvolution_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int,float\n THNN_CudaSpatialFullConvolution_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_float]\n THNN_CudaSpatialFullConvolution_accGradParameters.restype = None\n self.THNN_CudaSpatialFullConvolution_accGradParameters = THNN_CudaSpatialFullConvolution_accGradParameters\n\n THNN_CudaSpatialCrossMapLRN_updateOutput=lib.THNN_CudaSpatialCrossMapLRN_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,float,float,float\n THNN_CudaSpatialCrossMapLRN_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_float,c_float,c_float]\n THNN_CudaSpatialCrossMapLRN_updateOutput.restype = None\n self.THNN_CudaSpatialCrossMapLRN_updateOutput = THNN_CudaSpatialCrossMapLRN_updateOutput\n\n THNN_CudaSpatialCrossMapLRN_updateGradInput=lib.THNN_CudaSpatialCrossMapLRN_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,float,float,float\n THNN_CudaSpatialCrossMapLRN_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_float,c_float,c_float]\n THNN_CudaSpatialCrossMapLRN_updateGradInput.restype = None\n self.THNN_CudaSpatialCrossMapLRN_updateGradInput = THNN_CudaSpatialCrossMapLRN_updateGradInput\n\n THNN_CudaSpatialAdaptiveMaxPooling_updateOutput=lib.THNN_CudaSpatialAdaptiveMaxPooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaSpatialAdaptiveMaxPooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaSpatialAdaptiveMaxPooling_updateOutput.restype = None\n self.THNN_CudaSpatialAdaptiveMaxPooling_updateOutput = THNN_CudaSpatialAdaptiveMaxPooling_updateOutput\n\n THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput=lib.THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*\n THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]\n THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput.restype = None\n self.THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput = THNN_CudaSpatialAdaptiveMaxPooling_updateGradInput\n\n THNN_CudaSpatialAveragePooling_updateOutput=lib.THNN_CudaSpatialAveragePooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,bool,bool\n THNN_CudaSpatialAveragePooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_bool,c_bool]\n THNN_CudaSpatialAveragePooling_updateOutput.restype = None\n self.THNN_CudaSpatialAveragePooling_updateOutput = THNN_CudaSpatialAveragePooling_updateOutput\n\n THNN_CudaSpatialAveragePooling_updateGradInput=lib.THNN_CudaSpatialAveragePooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,bool,bool\n THNN_CudaSpatialAveragePooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_bool,c_bool]\n THNN_CudaSpatialAveragePooling_updateGradInput.restype = None\n self.THNN_CudaSpatialAveragePooling_updateGradInput = THNN_CudaSpatialAveragePooling_updateGradInput\n\n THNN_CudaSpatialMaxPooling_updateOutput=lib.THNN_CudaSpatialMaxPooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,bool\n THNN_CudaSpatialMaxPooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_bool]\n THNN_CudaSpatialMaxPooling_updateOutput.restype = None\n self.THNN_CudaSpatialMaxPooling_updateOutput = THNN_CudaSpatialMaxPooling_updateOutput\n\n THNN_CudaSpatialMaxPooling_updateGradInput=lib.THNN_CudaSpatialMaxPooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,bool\n THNN_CudaSpatialMaxPooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_bool]\n THNN_CudaSpatialMaxPooling_updateGradInput.restype = None\n self.THNN_CudaSpatialMaxPooling_updateGradInput = THNN_CudaSpatialMaxPooling_updateGradInput\n\n THNN_CudaSpatialMaxUnpooling_updateOutput=lib.THNN_CudaSpatialMaxUnpooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaSpatialMaxUnpooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaSpatialMaxUnpooling_updateOutput.restype = None\n self.THNN_CudaSpatialMaxUnpooling_updateOutput = THNN_CudaSpatialMaxUnpooling_updateOutput\n\n THNN_CudaSpatialMaxUnpooling_updateGradInput=lib.THNN_CudaSpatialMaxUnpooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int\n THNN_CudaSpatialMaxUnpooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int]\n THNN_CudaSpatialMaxUnpooling_updateGradInput.restype = None\n self.THNN_CudaSpatialMaxUnpooling_updateGradInput = THNN_CudaSpatialMaxUnpooling_updateGradInput\n\n THNN_CudaSpatialFractionalMaxPooling_updateOutput=lib.THNN_CudaSpatialFractionalMaxPooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int,int,int,int,THCudaTensor*,THCudaTensor*\n THNN_CudaSpatialFractionalMaxPooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_void_p,c_void_p]\n THNN_CudaSpatialFractionalMaxPooling_updateOutput.restype = None\n self.THNN_CudaSpatialFractionalMaxPooling_updateOutput = THNN_CudaSpatialFractionalMaxPooling_updateOutput\n\n THNN_CudaSpatialFractionalMaxPooling_updateGradInput=lib.THNN_CudaSpatialFractionalMaxPooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,THCudaTensor*\n THNN_CudaSpatialFractionalMaxPooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_void_p]\n THNN_CudaSpatialFractionalMaxPooling_updateGradInput.restype = None\n self.THNN_CudaSpatialFractionalMaxPooling_updateGradInput = THNN_CudaSpatialFractionalMaxPooling_updateGradInput\n\n THNN_CudaSpatialSubSampling_updateOutput=lib.THNN_CudaSpatialSubSampling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int\n THNN_CudaSpatialSubSampling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialSubSampling_updateOutput.restype = None\n self.THNN_CudaSpatialSubSampling_updateOutput = THNN_CudaSpatialSubSampling_updateOutput\n\n THNN_CudaSpatialSubSampling_updateGradInput=lib.THNN_CudaSpatialSubSampling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int\n THNN_CudaSpatialSubSampling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialSubSampling_updateGradInput.restype = None\n self.THNN_CudaSpatialSubSampling_updateGradInput = THNN_CudaSpatialSubSampling_updateGradInput\n\n THNN_CudaSpatialSubSampling_accGradParameters=lib.THNN_CudaSpatialSubSampling_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,float\n THNN_CudaSpatialSubSampling_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_float]\n THNN_CudaSpatialSubSampling_accGradParameters.restype = None\n self.THNN_CudaSpatialSubSampling_accGradParameters = THNN_CudaSpatialSubSampling_accGradParameters\n\n THNN_CudaSpatialUpSamplingNearest_updateOutput=lib.THNN_CudaSpatialUpSamplingNearest_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int\n THNN_CudaSpatialUpSamplingNearest_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int]\n THNN_CudaSpatialUpSamplingNearest_updateOutput.restype = None\n self.THNN_CudaSpatialUpSamplingNearest_updateOutput = THNN_CudaSpatialUpSamplingNearest_updateOutput\n\n THNN_CudaSpatialUpSamplingNearest_updateGradInput=lib.THNN_CudaSpatialUpSamplingNearest_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int\n THNN_CudaSpatialUpSamplingNearest_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int]\n THNN_CudaSpatialUpSamplingNearest_updateGradInput.restype = None\n self.THNN_CudaSpatialUpSamplingNearest_updateGradInput = THNN_CudaSpatialUpSamplingNearest_updateGradInput\n\n THNN_CudaVolumetricAveragePooling_updateOutput=lib.THNN_CudaVolumetricAveragePooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaVolumetricAveragePooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricAveragePooling_updateOutput.restype = None\n self.THNN_CudaVolumetricAveragePooling_updateOutput = THNN_CudaVolumetricAveragePooling_updateOutput\n\n THNN_CudaVolumetricAveragePooling_updateGradInput=lib.THNN_CudaVolumetricAveragePooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaVolumetricAveragePooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricAveragePooling_updateGradInput.restype = None\n self.THNN_CudaVolumetricAveragePooling_updateGradInput = THNN_CudaVolumetricAveragePooling_updateGradInput\n\n THNN_CudaVolumetricConvolution_updateOutput=lib.THNN_CudaVolumetricConvolution_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaVolumetricConvolution_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricConvolution_updateOutput.restype = None\n self.THNN_CudaVolumetricConvolution_updateOutput = THNN_CudaVolumetricConvolution_updateOutput\n\n THNN_CudaVolumetricConvolution_updateGradInput=lib.THNN_CudaVolumetricConvolution_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaVolumetricConvolution_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricConvolution_updateGradInput.restype = None\n self.THNN_CudaVolumetricConvolution_updateGradInput = THNN_CudaVolumetricConvolution_updateGradInput\n\n THNN_CudaVolumetricConvolution_accGradParameters=lib.THNN_CudaVolumetricConvolution_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,float\n THNN_CudaVolumetricConvolution_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_float]\n THNN_CudaVolumetricConvolution_accGradParameters.restype = None\n self.THNN_CudaVolumetricConvolution_accGradParameters = THNN_CudaVolumetricConvolution_accGradParameters\n\n THNN_CudaVolumetricFullConvolution_updateOutput=lib.THNN_CudaVolumetricFullConvolution_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int,int\n THNN_CudaVolumetricFullConvolution_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricFullConvolution_updateOutput.restype = None\n self.THNN_CudaVolumetricFullConvolution_updateOutput = THNN_CudaVolumetricFullConvolution_updateOutput\n\n THNN_CudaVolumetricFullConvolution_updateGradInput=lib.THNN_CudaVolumetricFullConvolution_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int,int\n THNN_CudaVolumetricFullConvolution_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricFullConvolution_updateGradInput.restype = None\n self.THNN_CudaVolumetricFullConvolution_updateGradInput = THNN_CudaVolumetricFullConvolution_updateGradInput\n\n THNN_CudaVolumetricFullConvolution_accGradParameters=lib.THNN_CudaVolumetricFullConvolution_accGradParameters\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int,int,float\n THNN_CudaVolumetricFullConvolution_accGradParameters.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_float]\n THNN_CudaVolumetricFullConvolution_accGradParameters.restype = None\n self.THNN_CudaVolumetricFullConvolution_accGradParameters = THNN_CudaVolumetricFullConvolution_accGradParameters\n\n THNN_CudaVolumetricMaxPooling_updateOutput=lib.THNN_CudaVolumetricMaxPooling_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int,int,int,int,bool\n THNN_CudaVolumetricMaxPooling_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_int,c_bool]\n THNN_CudaVolumetricMaxPooling_updateOutput.restype = None\n self.THNN_CudaVolumetricMaxPooling_updateOutput = THNN_CudaVolumetricMaxPooling_updateOutput\n\n THNN_CudaVolumetricMaxPooling_updateGradInput=lib.THNN_CudaVolumetricMaxPooling_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int,int,int\n THNN_CudaVolumetricMaxPooling_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int,c_int,c_int]\n THNN_CudaVolumetricMaxPooling_updateGradInput.restype = None\n self.THNN_CudaVolumetricMaxPooling_updateGradInput = THNN_CudaVolumetricMaxPooling_updateGradInput\n\n THNN_CudaSpatialReflectionPadding_updateOutput=lib.THNN_CudaSpatialReflectionPadding_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int,int,int\n THNN_CudaSpatialReflectionPadding_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int,c_int,c_int]\n THNN_CudaSpatialReflectionPadding_updateOutput.restype = None\n self.THNN_CudaSpatialReflectionPadding_updateOutput = THNN_CudaSpatialReflectionPadding_updateOutput\n\n THNN_CudaSpatialReflectionPadding_updateGradInput=lib.THNN_CudaSpatialReflectionPadding_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int\n THNN_CudaSpatialReflectionPadding_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialReflectionPadding_updateGradInput.restype = None\n self.THNN_CudaSpatialReflectionPadding_updateGradInput = THNN_CudaSpatialReflectionPadding_updateGradInput\n\n THNN_CudaSpatialReplicationPadding_updateOutput=lib.THNN_CudaSpatialReplicationPadding_updateOutput\n # THCState*,THCudaTensor*,THCudaTensor*,int,int,int\n THNN_CudaSpatialReplicationPadding_updateOutput.argtypes=[c_void_p,c_void_p,c_void_p,c_int,c_int,c_int]\n THNN_CudaSpatialReplicationPadding_updateOutput.restype = None\n self.THNN_CudaSpatialReplicationPadding_updateOutput = THNN_CudaSpatialReplicationPadding_updateOutput\n\n THNN_CudaSpatialReplicationPadding_updateGradInput=lib.THNN_CudaSpatialReplicationPadding_updateGradInput\n # THCState*,THCudaTensor*,THCudaTensor*,THCudaTensor*,int,int,int,int\n THNN_CudaSpatialReplicationPadding_updateGradInput.argtypes=[c_void_p,c_void_p,c_void_p,c_void_p,c_int,c_int,c_int,c_int]\n THNN_CudaSpatialReplicationPadding_updateGradInput.restype = None\n self.THNN_CudaSpatialReplicationPadding_updateGradInput = THNN_CudaSpatialReplicationPadding_updateGradInput\n\n\n","sub_path":"thnn.py","file_name":"thnn.py","file_ext":"py","file_size_in_byte":45545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"501231661","text":"import cv2\nfrom collections import defaultdict\nimport pickle\nimport os\nimport numpy as np\nimport itertools\nimport DeleteBackGround\nfrom sklearn.datasets import base\n\n\n\n\n\n# def actualize_dict(dict, matches, desc):\n# [dict[match]]\n\ndef getDescriptions1(path_folder1, name1, blur_mat, flag_resize = False):\n img = cv2.imread(path_folder1 + \"/\" + name1, 0)\n img = cv2.blur(img, (blur_mat,blur_mat))\n if flag_resize is not False:\n img = cv2.resize(img, (1653, 840))\n keys, des = descriptor.detectAndCompute(img, None)\n return img, keys, des\ndef getDescriptions2(path_folder1, name1, path_folder2, name2, blur):\n img1, keys1, des1 = getDescriptions1(path_folder1, name1, blur, True)\n img2, keys2, des2 = getDescriptions1(path_folder2, name2, blur, True)\n return img1, keys1, des1, img2, keys2, des2\ndef getDescriptions3(path_folder, blur):\n imageList = DeleteBackGround.loadFiles(path_folder)\n keys_list =[]\n des_list =[]\n img_list =[]\n for img_pat in imageList:\n img = cv2.imread(path_folder+\"/\"+img_pat,0)\n img=cv2.blur(img,(blur, blur))\n img = cv2.resize(img, (1653, 840))\n keys, des = descriptor.detectAndCompute(img, None)\n keys_list.append(keys)\n des_list.append(des)\n img_list.append(img)\n return img_list, keys_list, des_list\ndef bestMatches(keys_pattern, des_pattern, keys_trans, des_trans, amount=1):\n matcher = cv2.BFMatcher_create(cv2.cv2.NORM_HAMMING, crossCheck=False)\n matches_hier = matcher.knnMatch(des_pattern, des_trans, 2)\n matches = [i for (i, j) in matches_hier if i.distance < 0.8 * j.distance]\n matches = sorted(matches, key=lambda x: x.distance)\n points_patter = np.array([keys_pattern[num.queryIdx].pt for num in matches], dtype='float32').reshape(-1,1,2)\n points_trans = np.array([keys_trans[num.trainIdx].pt for num in matches], dtype='float32').reshape(-1,1,2)\n M, mask = cv2.findHomography(points_patter, points_trans, cv2.RANSAC)\n mask = mask.ravel().tolist()\n return M, mask, matches\ndef actualizeGraph(main_graph, temp_pairs):\n for pair in temp_pairs:\n key_first_node = None\n key_second_node = None\n for key in main_graph:\n if (np.isin(main_graph[key], pair[0])).all():\n key_first_node = key\n if (np.isin(main_graph[key], pair[1])).all():\n key_second_node = key\n if key_first_node is not None and key_second_node is not None:\n pass\n elif key_first_node is not None:\n main_graph[key_first_node].append(pair[1])\n elif key_second_node is not None:\n main_graph[key_second_node].append(pair[0])\n elif key_first_node is None and key_second_node is None:\n pass\n return main_graph\ndef make_part_of_dataset(des, dict, keys):\n data= []\n target = []\n target_names =[]\n #for key in keys:\n\n\n\n\ncv2.namedWindow(\"draw keypoints\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"draw keypoints2\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\ndescriptor = cv2.BRISK_create(thresh=30,octaves=3,patternScale=1.0)#,radiusList = np.ones((100)), numberList = np.ones((100))\npath = \"/home/krzysztof/Pulpit/banknoty/100zl_nobackground\"\npath_pattern_folder = \"/home/krzysztof/Pulpit/banknoty\"\nname_pattern_img = \"100zl_wzor.jpeg\"\n\n# images, keys, des = getDescriptions3(path, 7)\nimg_pattern, keys_pattern, des_pattern = getDescriptions1(path_pattern_folder, name_pattern_img, 7)\n\n# M, mask, matches = bestMatches(keys_pattern, des_pattern, keys, des)\n\nimageList = DeleteBackGround.loadFiles(path)\nimg_matches =None\ndict_matches = defaultdict(list)\n\n\n\nfor counter, imgPath in enumerate(imageList, start=0):\n img_trans, keys_trans, des_trans = getDescriptions1(path, imgPath, 9, True)\n M, mask, matches = bestMatches(keys_pattern, des_pattern, keys_trans, des_trans)\n matches_good = [matches[i] for i, elem in enumerate(mask) if elem is 1]\n for match in matches_good:\n dict_matches[keys_pattern[match.queryIdx].pt].append(des_trans[match.trainIdx])#\n\n # drawing keypoints\n img_matches = cv2.drawMatches(img_pattern, keys_pattern, img_trans,\n keys_trans, matches, img_matches, matchesMask=mask)#, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS\n\n cv2.imshow(\"draw keypoints\", img_matches)\n cv2.waitKey(10)\n print(len(matches_good) , len(dict_matches))\n len_dict = [len(dict_matches[key]) for key in dict_matches]\n hist = np.histogram(len_dict, bins=range(1, 24))\n print(hist[0][:])\n\ndata = np.empty((0,64),np.uint8)\ntarget = np.array([],np.uint8)\ntarget_names = np.array([])\n\nkeys_good = [key for key in dict_matches if len(dict_matches[key])>8]\nfor counter, key in enumerate(keys_good, start=0):\n target_names = np.append(target_names, str(key))\n for row in dict_matches[key]:\n data = np.append(data, np.resize(row,(1, 64)), axis=0)\n target = np.append(target, counter)\n\n\ndata_base = base.Bunch(data= data, target=target, target_names=target_names)\n\n# save database\n\nf = open(\"/home/krzysztof/Pulpit/banknoty/100zl_des/database.base\", 'wb')\npickle.dump(data_base, f)\nf.close()\n\n\n\n\n\n# combinations = itertools.combinations(imageList, 2)\n# node_pairs = list()\n# for i, comb in enumerate(combinations,start=0):\n# img1, keys1, des1, img2, keys2, des2 = getDescriptions2(path, comb[0], path, comb[1], 5)\n# M, mask, matches = bestMatches(keys1, des1, keys2, des2)\n# matches_good = [matches[i] for i, elem in enumerate(mask) if elem is 1]\n# [node_pairs.append((des1[match.queryIdx], des2[match.trainIdx])) for match in matches_good]\n# dict_matches=actualizeGraph(dict_matches, node_pairs)\n# len_dict = [len(dict_matches[key]) for key in dict_matches]\n# print(np.histogram(len_dict, bins=range(24))[0][12:])\n# img_matches = cv2.drawMatches(img1, keys1, img2,\n# keys2, matches, img_matches,\n# matchesMask=mask) # , flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS\n# cv2.imshow(\"draw keypoints\", img_matches)\n# cv2.waitKey(0)","sub_path":"OpenCVFirstProject/Create_Learning_Set.py","file_name":"Create_Learning_Set.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"395422870","text":"import os\r\nimport sys\r\n\r\nfile_1=[]\r\nfile_2=[]\r\n\r\nf=open(\"semantic.txt\",\"r\",encoding=\"utf-8\")\r\n\r\ncount_of_sentence_1=1\r\nfor i,line in enumerate(f):\r\n line=line.rstrip()\r\n \r\n if line==\"\":\r\n count_of_sentence_1=count_of_sentence_1+1\r\n continue\r\n\r\n row=line.split('\t')\r\n file_1.append(row)\r\n\r\nf.close()\r\n\r\n\r\nf=open(\"Tests.txt\",\"r\",encoding=\"utf-8\")\r\n\r\ncount_of_sentence_2=1\r\n\r\nfor i,line in enumerate(f):\r\n line=line.rstrip()\r\n\r\n if line==\"\":\r\n count_of_sentence_2=count_of_sentence_2+1\r\n continue\r\n\r\n if count_of_sentence_2==count_of_sentence_1: \r\n break\r\n \r\n row=line.split('\t') \r\n file_2.append(row)\r\n\r\nf.close()\r\n\r\n\r\n \r\nparent_link_type_link=0\r\nparent_link = 0\r\ntype_link=0\r\nmorph_error=0\r\nline=0\r\nno_strings=0\r\n\r\n\r\nif len(file_1)!=len(file_2):\r\n no_strings=abs(len(file_2)-len(file_1))\r\n length=min(len(file_1),len(file_2))\r\nelse:\r\n length=len(file_1)\r\n\r\n\r\nf=open(\"test_result.txt\",\"w\",encoding=\"utf-8\")\r\nindex=0\r\n\r\nind=[]\r\n\r\nfor i in range(len(file_1)):\r\n index=index+1\r\n if \"SENT\" in file_1[i][3]:\r\n ind.append(index)\r\n index=0\r\n\r\n \r\nu=0\r\nall_write=0\r\nfor i in range(length):\r\n if file_1[i][3]==\"SENT\":\r\n if (all_write==0):\r\n f.write(\"Correct !\\n\")\r\n f.write(\"------------------------------\\n\")\r\n \r\n all_write=0\r\n u=u+1\r\n line=0\r\n morph_error=0\r\n parent_link_type_link=0\r\n parent_link=0\r\n type_link=0\r\n procent_1=0\r\n \r\n \r\n if (file_1[i][4]==file_2[i][4] and file_1[i][5]==file_2[i][5] and file_1[i][6]==file_2[i][6]):\r\n continue\r\n else:\r\n all_write=all_write+1\r\n if (file_1[i][5]!=file_2[i][5] or file_1[i][6]!=file_2[i][6]):\r\n parent_link_type_link = parent_link_type_link+1\r\n f.write(file_1[i][1]+\" \"+file_1[i][4]+\" \"+file_1[i][5]+\" \"+file_1[i][6]+\"\\n\")\r\n f.write(file_2[i][1]+\" \"+file_2[i][4]+\" \"+file_2[i][5]+\" \"+file_2[i][6]+\"\\n\")\r\n f.write(\"------------------------------\\n\")\r\n \r\n if (file_1[i][5]!=file_2[i][5]):\r\n parent_link = parent_link+1\r\n\r\n if (file_1[i][6]!=file_2[i][6]):\r\n type_link = type_link+1\r\n\r\n if (file_1[i][4]!=file_2[i][4]):\r\n morph_error = morph_error+1\r\n\r\n line=line+1\r\n\r\n #f.write(\"Amount_of_sent1 \"+str(count_of_sentence_1)+\"\\n\")\r\n #f.write(\"Amount_of_sent2 \"+str(count_of_sentence_2)+\"\\n\")\r\n #f.write(\"No string \"+str(no_strings)+\"\\n\")\r\n f.write(\"Line \"+str(line)+\"\\n\")\r\n f.write('Morph '+str(morph_error)+\"\\n\")\r\n procent_1=100/(int(ind[u]))\r\n f.write(\"Type+parent \"+str(100-parent_link_type_link*procent_1)+\"\\n\")\r\n f.write(\"Type \"+str(100-type_link*procent_1)+\"\\n\")\r\n f.write(\"Parent \"+str(100-parent_link*procent_1)+\"\\n\")\r\n f.write(\"------------------------------\\n\")\r\n\r\n \r\n \r\n \r\n\r\n\r\nf.close()\r\n","sub_path":"Syntax/Ver 2.0/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473407633","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n#from .views import *\n\nurlpatterns = [\n path('', views.index, name = 'shopme'),\n path('about', views.about, name='about'),\n path('contact', views.contact, name='contact'),\n path('search', views.search, name='search'),\n path('productview', views.productview, name='productview'),\n path('tracker', views.tracker, name='myorder'),\n path('checkout', views.checkout, name='checkout'),\n path('accounts', views.accounts, name='accounts'),\n path('help', views.help, name='help'),\n path('purchaseperuser', views.purchaseperuser, name='purchaseperuser')\n]","sub_path":"shopme/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"233934195","text":"\nimport os\nimport random\nfrom sklearn import metrics\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom options import Options\nfrom datafolder import SlideLevelDataset\nfrom models import AttnClassifier\n\n\ndef main():\n opt = Options(isTrain=False)\n opt.parse()\n\n os.makedirs(opt.test['save_dir'], exist_ok=True)\n\n opt.save_options()\n opt.print_options()\n\n mode = 'test'\n get_probs(opt, mode)\n\n # compute accuracy\n save_dir = opt.test['save_dir']\n test_prob = np.load('{:s}/{:s}_prob_results.npy'.format(save_dir, mode), allow_pickle=True).item()\n\n txt_file = open('{:s}/{:s}_results.txt'.format(save_dir, mode), 'w')\n acc, auc, auc_CIs = compute_accuracy(test_prob)\n message = '{:11s}: Acc: {:5.2f}\\tAUC: {:5.2f} ({:5.2f}, {:5.2f})'.format('All', acc * 100, auc * 100, auc_CIs[0] * 100, auc_CIs[1] * 100)\n print(message)\n txt_file.write('{:s}\\n'.format(message))\n txt_file.close()\n\n\ndef get_probs(opt, mode='test'):\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(x) for x in opt.test['gpus'])\n\n model_path = opt.test['model_path']\n save_dir = opt.test['save_dir']\n\n # create model\n if opt.model['name'].lower() == 'attnclassifier':\n model = AttnClassifier(2048, 2)\n else:\n raise NotImplemented\n model = model.cuda()\n\n # print(\"=> loading trained model\")\n best_checkpoint = torch.load(model_path)\n model.load_state_dict(best_checkpoint['state_dict'])\n print('Model obtained in epoch: {:d}'.format(best_checkpoint['epoch']))\n\n class_info_test = np.load('{:s}/{:s}/{:s}_class_info_{:s}.npy'.format(opt.train['data_dir'], mode, opt.data_type, opt.gene_or_pathway),\n allow_pickle=True).item()\n test_set = SlideLevelDataset('{:s}/20x_features_resnet101_hdf5/{:s}_tumor.h5'.format(opt.root_data_dir, mode), class_info_test)\n\n print(\"=> Test begins:\")\n # switch to evaluate mode\n model.eval()\n prob_results = {}\n attn_maps = {}\n slide_names = list(test_set.keys)\n for i in range(len(test_set)):\n input, target, true_indices = test_set[i]\n\n flag = '' if i < len(test_set)-1 else '\\n'\n print('\\r\\t{:d}/{:d}'.format(i+1, len(test_set)), end=flag)\n\n with torch.no_grad():\n output, attn = model(input.unsqueeze(1).cuda())\n probs = nn.functional.softmax(output, dim=1)\n probs = probs.cpu().numpy()\n\n attn_map = attn[0].detach().cpu().numpy()\n\n slide_name = slide_names[i]\n prob_results[slide_name] = {'prob': probs, 'label': target.item()}\n attn_maps[slide_name] = {'attn': attn_map, 'true_indices': np.array(true_indices)}\n\n if i % 10 == 0:\n np.save('{:s}/{:s}_attn_maps.npy'.format(save_dir, mode), attn_maps)\n np.save('{:s}/{:s}_prob_results.npy'.format(save_dir, mode), prob_results)\n\n np.save('{:s}/{:s}_prob_results.npy'.format(save_dir, mode), prob_results)\n np.save('{:s}/{:s}_attn_maps.npy'.format(save_dir, mode), attn_maps)\n\n\ndef compute_accuracy(slides_probs):\n all_probs = []\n all_labels = []\n for slide_name in slides_probs.keys():\n probs = np.array(slides_probs[slide_name]['prob'])\n label = slides_probs[slide_name]['label']\n\n all_probs.extend(probs[:, 1])\n all_labels.append(label)\n\n all_probs = np.array(all_probs)\n all_pred = (all_probs > 0.5).astype(np.float)\n\n acc = metrics.accuracy_score(all_labels, all_pred)\n if np.unique(np.array(all_labels)).size == 1:\n auc = -0.01\n auc_CIs = [-0.01, -0.01]\n else:\n auc, auc_CIs = bootstrap_AUC_CIs(all_probs, all_labels)\n return acc, auc, auc_CIs\n\n\ndef plot_roc_curve(slides_probs, save_filename):\n all_probs = []\n all_labels = []\n for slide_name in slides_probs.keys():\n probs = np.array(slides_probs[slide_name]['prob'])\n label = slides_probs[slide_name]['label']\n\n all_probs.extend(probs[:, 1])\n all_labels.append(label)\n\n all_probs = np.array(all_probs)\n\n fpr, tpr, thresholds = metrics.roc_curve(all_labels, all_probs)\n auc = metrics.auc(fpr, tpr)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(fpr, tpr)\n ax.set_xlabel('FPR')\n ax.set_ylabel('TPR')\n ax.set_aspect('equal')\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n ax.set_title('ROC curve, AUC: {:.2f})'.format(auc * 100))\n fig.savefig(save_filename)\n plt.close()\n\n\ndef bootstrap_AUC_CIs(probs, labels):\n probs = np.array(probs)\n labels = np.array(labels)\n N_slide = len(probs)\n index_list = np.arange(0, N_slide)\n AUC_list = []\n i = 0\n while i < 1000:\n sampled_indices = random.choices(index_list, k=N_slide)\n sampled_probs = probs[sampled_indices]\n sampled_labels = labels[sampled_indices]\n\n if np.unique(sampled_labels).size == 1: # reject the sample if there is only one class\n continue\n\n auc_bs = metrics.roc_auc_score(sampled_labels, sampled_probs)\n AUC_list.append(auc_bs)\n i += 1\n\n assert len(AUC_list) == 1000\n AUC_list = np.array(AUC_list)\n auc_avg = np.mean(AUC_list)\n auc_CIs = [np.percentile(AUC_list, 2.5), np.percentile(AUC_list, 97.5)]\n return auc_avg, auc_CIs\n\n\nif __name__ == '__main__':\n main()","sub_path":"code_prediction_on_breast_cancer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"487508417","text":"from Tile import Tile\n\nclass Tilemap:\n '''\n Defining the pattern of the tile map (A list of char string) 28 X 36 tiles\n '0': The tile is a blank dead space\n 'H': The tile is a dead space and has a horizontal blue line (image of map)\n 'V': The tile is a dead space and has a vertical blue line (image of map)\n '2': The tile is a dead space and has a blue bottom right corner (image of map)\n '3': The tile is a dead space and has a blue bottom left corner (image of map)\n '4': The tile is a dead space and has a blue top right corner (image of map)\n '5': The tile is a dead space and has a blue top left corner (image of map)\n 'P': The tile is a dead space and has a pink line (door of the ghosts house)\n 'G': The tile is a dead space in the ghost house, ghost can only exit the house\n 'D': The tile is a legal space, containing a dot\n 'E': The tile is a legal space, containing a energizer\n '1': The tile is a blank legal space\n\n Input Argument: gameScreen: the gameScreen window from GameStart\n '''\n\n def __init__(self, gameScreen):\n # The tile map\n tilemap = [\"2HHHHHHHHHHHH32HHHHHHHHHHHH3\",\n \"VDDDDDDDDDDDDVVDDDDDDDDDDDDV\",\n \"VD2HH3D2HHH3DVVD2HHH3D2HH3DV\",\n \"VEV00VDV000VDVVDV000VDV00VEV\",\n \"VD4HH5D4HHH5D45D4HHH5D4HH5DV\",\n \"VDDDDDDDDDDDDDDDDDDDDDDDDDDV\",\n \"VD2HH3D23D2HHHHHH3D23D2HH3DV\",\n \"VD4HH5DVVD4HH32HH5DVVD4HH5DV\",\n \"VDDDDDDVVDDDDVVDDDDVVDDDDDDV\",\n \"4HHHH3DV4HH31VV12HH5VD2HHHH5\",\n \"00000VDV2HH514514HH3VDV00000\",\n \"00000VDVV1111111111VVDV00000\",\n \"00000VDVV12HHPPHH31VVDV00000\",\n \"HHHHH5D451VGGGGGGV145D4HHHHH\",\n \"111111D111VGGGGGGV111D111111\",\n \"HHHHH3D231VGGGGGGV123D2HHHHH\",\n \"00000VDVV14HHHHHH51VVDV00000\",\n \"00000VDVV1111111111VVDV00000\",\n \"00000VDVV12HHHHHH31VVDV00000\",\n \"2HHHH5D4514HH32HH5145D4HHHH3\",\n \"VDDDDDDDDDDDDVVDDDDDDDDDDDDV\",\n \"VD2HH3D2HHH3DVVD2HHH3D2HH3DV\",\n \"VD4H3VD4HHH5D45D4HHH5DV2H5DV\",\n \"VEDDVVDDDDDDD11DDDDDDDVVDDEV\",\n \"4H3DVVD23D2HHHHHH3D23DVVD2H5\",\n \"2H5D45DVVD4HH32HH5DVVD45D4H3\",\n \"VDDDDDDVVDDDDVVDDDDVVDDDDDDV\",\n \"VD2HHHH54HH3DVVD2HH54HHHH3DV\",\n \"VD4HHHHHHHH5D45D4HHHHHHHH5DV\",\n \"VDDDDDDDDDDDDDDDDDDDDDDDDDDV\",\n \"4HHHHHHHHHHHHHHHHHHHHHHHHHH5\"]\n\n # the legal space tile dict: key: tile location, value: Tile object\n self.legaltile = dict()\n # the dead space tile dict: key: tile location, value: Tile object\n self.deadtile = dict()\n # The space tile in the ghost house dict: key: tile location, value: Tile object\n self.ghosthousetile = dict()\n\n # Initialize the whole tile map\n for i in range(len(tilemap)):\n for j in range(len(tilemap[i])):\n # When the char in map is '1', 'D', 'E', the tile is a legal space tile\n if tilemap[i][j] == '1' or tilemap[i][j] == 'D' or tilemap[i][j] == 'E':\n self.legaltile[(i,j)] = Tile(tilemap[i][j], (i,j), gameScreen)\n elif tilemap[i][j] == 'G':\n # The tile is in the ghost house, the ghost can only exit the tile\n self.ghosthousetile[(i,j)] = Tile(tilemap[i][j], (i,j), gameScreen)\n else:\n # The tile is a dead space tile\n self.deadtile[(i,j)] = Tile(tilemap[i][j], (i,j), gameScreen)\n","sub_path":"PacMan/TileMap.py","file_name":"TileMap.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"351673893","text":"# ABC084B\nA, B = map(int, input().split())\nS = input()\nans = 'No'\nflag = True\nnum = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nif S[A] == '-':\n aa = S[0:A]\n bb = S[A+1:A+B+1]\n for i in range(len(aa)):\n if aa[i] not in num:\n flag = False\n for k in range(len(bb)):\n if bb[k] not in num:\n flag = False\n if flag:\n ans = 'Yes'\nprint(ans)\n","sub_path":"AtCoder_Python/ABC/ABC084/ABC_084_B.py","file_name":"ABC_084_B.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"191869547","text":"##string = str(\"Hello World\")\r\n##i=0\r\n##for s in string:\r\n## if(s == ' '):\r\n## break\r\n## else:\r\n## print(string[i], end = '')\r\n## i+=1\r\n\r\n## Palindrome\r\n\r\nflag = 0\r\nx = 0\r\ny = -1\r\nfor x in string:\r\n for y in string:\r\n if (x == y):\r\n flag+=1\r\n print(y)\r\n\r\n\r\nif flag > 0:\r\n print(\"it is palindrome\")\r\n print(flag)\r\nelse:\r\n print(\"Not palindrome\")\r\n","sub_path":"string print.py","file_name":"string print.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379942361","text":"def premik(ukaz, x, y, smer):\r\n smeri = \"NESW\"\r\n premiki = [(0, -1), (1, 0), (0, 1), (-1, 0)]\r\n ismer = smeri.index(smer)\r\n if ukaz == \"R\":\r\n smer = smeri[(ismer + 1) % 4]\r\n elif ukaz == \"L\":\r\n smer = smeri[(ismer - 1) % 4]\r\n else:\r\n dx, dy = premiki[ismer]\r\n x += dx * ukaz\r\n y += dy * ukaz\r\n return x, y, smer\r\n\r\ndef izvedi(ime_datoteke):\r\n smer = \"N\"\r\n seznam_premikov = [(0, 0, \"N\")]\r\n x = 0\r\n y = 0\r\n ukazi = open(ime_datoteke, encoding=\"utf-8\")\r\n for vrstica in ukazi:\r\n ukaz = vrstica.strip().split(\" \")\r\n if ukaz[0] == \"DESNO\":\r\n jojo = \"R\"\r\n ukaz.append(0)\r\n x += ukaz[1]\r\n if ukaz[0] == \"LEVO\":\r\n jojo = \"L\"\r\n ukaz.append(0)\r\n y += ukaz[1]\r\n if ukaz[0] == \"NAPREJ\":\r\n jojo = int(ukaz[1])\r\n premiki = premik(jojo, x, y, smer)\r\n x = premiki[0]\r\n y = premiki[1]\r\n smer = premiki[2]\r\n seznam_premikov.append(premiki)\r\n return seznam_premikov\r\n\r\ndef opisi_stanje(x, y, smer):\r\n if smer == \"N\":\r\n smer = \"^\"\r\n elif smer == \"E\":\r\n smer = \">\"\r\n elif smer == \"S\":\r\n smer = \"v\"\r\n elif smer == \"W\":\r\n smer = \"<\"\r\n return (\"{x:>3}:{y:<4}{smer}\".format(x=x, y=y, smer=smer))\r\n\r\ndef prevedi(ime_vhoda, ime_izhoda):\r\n nova_dat = open(ime_izhoda, \"w\")\r\n izveden = izvedi(ime_vhoda)\r\n for iz in izveden:\r\n x, y, smer = iz\r\n opis = opisi_stanje(x, y, smer)\r\n nova_dat.write(opis+\"\\n\")\r\n\r\ndef opisi_stanje_2(x, y, smer):\r\n x = \"(\" + str(x)\r\n if smer == \"N\":\r\n smer = \"^\"\r\n elif smer == \"E\":\r\n smer = \">\"\r\n elif smer == \"S\":\r\n smer = \"v\"\r\n elif smer == \"W\":\r\n smer = \"<\"\r\n return (\"{smer}{x:>5}:{y})\".format(x=x, y=y, smer=smer))\r\n\r\n\r\n\r\n","sub_path":"code/batch-2/vse-naloge-brez-testov/DN9-M-177.py","file_name":"DN9-M-177.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378165803","text":"import numpy as np\nfrom numpy.random import RandomState\nfrom sklearn.base import BaseEstimator\n\nclass RandomParcels(BaseEstimator):\n \n def __init__(self, geo, n_parcels, medial_wall_inds=None,\n medial_wall_mask=None, random_state=1):\n \n # Set passed params\n self.geo = geo\n self.n_parcels = n_parcels\n self.medial_wall_inds = medial_wall_inds\n self.medial_wall_mask = medial_wall_mask\n self.random_state = random_state\n self.mask = None\n \n def get_parc(self, copy=True):\n\n if self.mask is None:\n self._generate_parc_from_params()\n\n if copy:\n return self.mask.copy()\n else:\n return self.mask\n\n def _generate_parc_from_params(self):\n\n # Proc by input args\n self._proc_geo()\n self._proc_medial_wall()\n self._proc_random_state()\n\n # Set up mask, done and flags\n self.sz = len(self._geo)\n self.reset()\n\n # Init\n self.init_parcels()\n\n # Then generate\n self.generate_parcels()\n\n def _proc_geo(self):\n self._geo = [np.array(g) for g in self.geo]\n\n def _proc_medial_wall(self):\n \n # Proc medial wall inds\n if self.medial_wall_inds is not None:\n self.m_wall = set(list(self.medial_wall_inds))\n elif self.medial_wall_mask is not None:\n self.m_wall = set(list(np.where(self.medial_wall_mask == True)[0]))\n else:\n self.m_wall = set()\n\n def _proc_random_state(self):\n \n if self.random_state is None:\n self.r_state = RandomState()\n elif isinstance(self.random_state, int):\n self.r_state = RandomState(seed = self.random_state)\n else:\n self.r_state = self.random_state\n\n def reset(self):\n '''Just reset the mask, and set w/ done info'''\n\n self.mask = np.zeros(self.sz, dtype='int16')\n self.done = self.m_wall.copy()\n self.ready, self.generated = False, False\n \n def init_parcels(self):\n\n # Generate the starting locs\n valid = np.setdiff1d(np.arange(self.sz), np.array(list(self.done)))\n self.start_locs = self.r_state.choice(valid, size=self.n_parcels,\n replace=False)\n\n # Set random probs. that each loc is chosen\n self.probs = self.r_state.random(size=self.n_parcels)\n\n def setup(self):\n '''This should be called before generating parcel,\n so after a mutation has been made, setup needs to\n be called. It also does not hurt to call setup an\n extra time, as nothing random is set.'''\n\n # Generate corresponding labels w/ each loc\n self.labels = np.arange(1, self.n_parcels+1, dtype='int16')\n\n # Mask where if == 1, then that parcel is done\n self.finished = np.zeros(self.n_parcels, dtype='bool_')\n\n # Drop the first points\n self.mask[self.start_locs] = self.labels\n\n # Set ready flag to True\n self.ready = True\n\n def get_probs(self):\n\n return self.probs / np.sum(self.probs)\n \n def choice(self):\n '''Select a valid label based on probs.'''\n \n msk = self.finished == 0\n probs = self.probs[msk] / np.sum(self.probs[msk])\n label = self.r_state.choice(self.labels[msk], p=probs)\n \n return label\n \n def get_valid_neighbors(self, loc):\n \n ns = self._geo[loc]\n valid_ns = ns[self.mask[ns] == 0]\n \n return valid_ns\n \n def generate_parcels(self):\n\n if self.ready is False:\n self.setup()\n \n # Keep looping until every spot is filled\n while (self.finished == 0).any():\n self.add_spot()\n\n # Set generated flag when done\n self.generated = True\n \n def add_spot(self):\n\n # Select which parcel to add to\n label = self.choice()\n\n # Determine valid starting locations anywhere in exisitng parcel\n current = np.where(self.mask == label)[0]\n valid = set(current) - self.done\n\n self.proc_spot(valid, label)\n\n def proc_spot(self, valid, label):\n\n # If no valid choices, then set this parcel to finished\n if len(valid) == 0:\n self.finished[label-1] = 1\n return\n\n # Select randomly from the valid starting locs\n loc = self.r_state.choice(tuple(valid))\n \n # Select a valid + open neighbor\n valid_ns = self.get_valid_neighbors(loc)\n\n if len(valid_ns) > 0:\n\n # Select a valid choice, and add it w/ the right label\n choice = self.r_state.choice(valid_ns)\n self.mask[choice] = label\n\n # If this was the only choice, mark start loc as done\n if len(valid_ns) == 1:\n self.done.add(loc)\n\n # If there are no valid choices, mark as done\n else:\n self.done.add(loc)\n\n valid.remove(loc)\n self.proc_spot(valid, label)\n","sub_path":"ABCD_ML/extensions/RandomParcels.py","file_name":"RandomParcels.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163294535","text":"import numpy as np\nimport pandas as pd\nfrom sympy import *\nimport json\nfrom matplotlib import pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n'''\n物质平衡法预测产能\n'''\nclass Gas_prediction():\n def __init__(self,well_info):\n '''\n 初始化\n :param well_info:\n '''\n self.A = well_info['A'] # 供气面积,m^2\n self.P_L = well_info['P_L'] # Langmuir 压力系数,Mpa\n self.P_cd = well_info['P_cd'] # 临界解吸压力\n self.V_L = well_info['V_L'] # Langmuir 体积系数,m^3/t\n self.P_i = well_info['P_i'] # 初始压力,Mpa\n self.h = well_info['h'] # 煤厚,m\n self.phi_i = well_info['phi_i'] # 初始孔隙度\n self.K_i=well_info['K_i'] #初始渗透率\n self.rho_B = well_info['rho_B'] # 煤密度,t/m^3\n self.S_wi = 0.95 # 初始含水饱和度\n self.B_W = 1 # 水的地层体积系数\n self.T = 313 # 温度,K\n self.P_wf = 1 # 井底流压\n self.mu_g = 0.01 # 气体粘度,mPa/s\n self.r_e = 200 # 泄流半径,m\n self.r_w = 0.1 # 井筒半径,m\n self.s = -1 # 表皮系数\n self.mu_w = 0.6#水的粘度系数\n self.P_sc = 0.1013#标准压力,Mpa\n self.T_sc = 289#标准温度,K\n self.Z_sc = 1#标准压缩系数\n self.q_wi = 2.5#初始排水量,m^3\n self.Z_i = self.get_z(self.P_i ,self.T , 0.8)\n self.G = self.A * self.h * self.rho_B * self.V_L * (self.P_cd / (self.P_cd + self.P_L))\n self.G_f = self.A * self.h * self.phi_i * (1 - self.S_wi) * (self.P_i * self.Z_sc * self.T_sc / (self.Z_i * self.T * self.P_sc))\n\n def get_z(self, P, T, theta):\n '''\n 计算天然气压缩系数\n 算法来源:天然气压缩因子计算方法对比及应用_董萌\n :param P:当前压力,Mpa\n :param theta: 天然气相对密度\n :param T: 温度,K\n :return:\n '''\n P = P * 1000\n\n A1 = 0.31506237\n A2 = -1.046099\n A3 = -0.57832720\n A4 = 0.53530771\n A5 = -0.61232023\n A6 = -0.10488813\n A7 = 0.68127001\n A8 = 0.68446549\n P_c = 4881 - 386.11 * theta\n T_c = 92 + 116.67 * theta\n P_r = P / P_c\n T_r = T / T_c\n\n T1 = A1 + (A2 / T_r) + (A3 / T_r ** 3)\n T2 = A4 + (A5 / T_r)\n T3 = A5 * A6 / T_r\n T4 = A7 / (T_r ** 3)\n T5 = 0.27 * P_r / T_r\n\n rho = 0.27 * P_r / T_r\n rho_pass = False\n\n while rho_pass == False:\n f_rho = 1 + T1 * rho + T2 * rho ** 2 + T3 * rho ** 5 + (\n T4 * rho ** 2 * (1 + A8 * rho ** 2) * np.exp(-(A8 * rho ** 2))) - T5 / rho\n f_rho_coe = T1 + 2 * T2 * rho + 5 * T3 * rho ** 4 + 2 * T4 * rho * (\n 1 + A8 * rho ** 2 - A8 ** 2 * rho ** 4) * np.exp(-(A8 * rho ** 2)) + T5 / rho ** 2\n\n rho_old = rho\n rho_new = rho - (f_rho / f_rho_coe)\n rho = rho_new\n if rho_old - rho_new < 0.1:\n rho_pass = True\n Z = 0.27 * P_r / (rho_new * T_r)\n return Z\n\n def get_phi(self,P):\n '''\n 计算孔隙度\n 来源:一种快速准确预测煤层气井生产动态的解析模型_石军太\n :param P:当前压力\n :return:\n '''\n C_p=0.02#孔隙压缩系数\n phi=(1-C_p*(self.P_i-P))*self.phi_i\n\n return phi\n\n def get_K(self,phi):\n '''\n 计算渗透率\n 来源:一种快速准确预测煤层气井生产动态的解析模型_石军太\n :param phi:当前孔隙度\n :return:\n '''\n K=self.K_i*(phi/self.phi_i)**3\n return K\n\n def get_P_1(self, S_w, Z, phi, G_p):\n '''\n 排水降压阶段压力计算\n :param S_w: 含水饱和度\n :param Z: 气体压缩因子\n :param phi: 孔隙度\n :param G_p: 累计产气量\n :return:\n '''\n B = self.A * self.h * phi * (1 - S_w) * self.Z_sc * self.T_sc / (Z * self.T * self.P_sc)\n P = (self.G_f - G_p) / B\n return P\n\n def get_P_2(self, S_w, Z, phi, G_p):\n '''\n 煤层气解吸阶段压力计算\n :param S_w:\n :param Z:\n :param phi:\n :param G_p:\n :return:\n '''\n\n x=self.G+self.G_f-G_p\n A=self.A*self.h*self.rho_B*self.V_L\n B=self.A*self.h*phi*(1-S_w)*self.Z_sc*self.T_sc/(Z*self.T*self.P_sc)\n\n P=self.P_i\n is_pass=False\n\n while is_pass == False:\n\n f_p=B*P**2+(B*self.P_L+A-x)*P-x*self.P_L\n f_P_coe=2*B*P+B*self.P_L+A-x\n\n P_old=P\n P=P_old-(f_p/f_P_coe)\n\n if np.abs(P_old-P)<=0.0001:\n is_pass=True\n return P\n\n def get_S_w(self,W_p,phi):\n '''\n 计算含水饱和度\n :param W_p: 累计产水量\n :param phi: 孔隙度\n :return:\n '''\n S_w = self.S_wi -( self.B_W * W_p / (self.A * self.h * phi))\n return S_w\n\n def get_k_rg_k_rw(self,S_w):\n '''\n 气水相渗透率计算\n :param S_w: 含水饱和度\n :return:\n '''\n k_rg=(1-S_w)**1.1\n k_rw=S_w**2.5\n return k_rg,k_rw\n\n def get_gas_prediction(self,P,k_g,Z,P_wf,K):\n '''\n 气体产能计算\n :param P:压力\n :param k_g:气相渗透率\n :param Z:气体压缩因子\n :param P_wf:井底流压\n :param K:渗透率\n :return:q_g,日产气量,m^3\n '''\n q_g=774.6*K*k_g*self.h*(P**2-P_wf**2)/(self.T*self.mu_g*Z*( np.log(self.r_e/self.r_w)-0.75+self.s ))\n # q_g=1000*k_g*K*self.h*(P**2-P_wf**2)/(1.31*self.T*self.mu_g*Z*( np.log(self.r_e/self.r_w)-0.75+self.s ))\n\n return q_g\n\n def get_water_prediction(self,P,k_w,P_wf,K):\n '''\n 产水量计算\n :param P:\n :param k_w:\n :param P_wf:\n :param K:\n :return: q_w,日产水量,m^3\n '''\n q_w=0.543*k_w*K*self.h*(P-P_wf)/(self.B_W*self.mu_w*( np.log(self.r_e/self.r_w)-0.75+self.s ))\n return q_w\n\n def get_P_wf(self, P,k_w,K):\n '''\n 计算井底流压,根据产水量计算公式反推\n :param P:\n :param k_w:\n :param K:\n :param S_w:\n :return:\n '''\n P_wf=P-(self.q_wi*self.B_W*self.mu_w*( np.log(self.r_e/self.r_w)-0.75+self.s )/(0.543*k_w*K*self.h))\n return P_wf\n\n\n\ndef run(well_info,time):\n GP = Gas_prediction(well_info)\n\n '''\n 定义列表,存放结果\n '''\n q_g_list=[]\n q_w_list=[]\n P_list=[]\n i_list=[]\n Z_list=[]\n phi_list=[]\n S_w_list=[]\n G_P_list=[]\n W_p_list=[]\n P_wf_list=[]\n K_list=[]\n Kg_list=[]\n Kw_list=[]\n\n '''\n 定义初始参数\n '''\n P=GP.P_i\n W_p=0\n G_p=0\n Z = GP.Z_i\n phi = GP.phi_i\n K=GP.K_i\n S_w =GP.S_wi\n k_g, k_w = GP.get_k_rg_k_rw(S_w)\n P_wf = GP.get_P_wf( P,k_w,K)\n '''\n 设定排采时间,动态预测\n '''\n for i in range(time):\n # print(i+1)\n test_step=10\n if i %test_step==0:\n i_list.append(i*10)\n '''\n 排水,根据井底流压计算排水量\n '''\n if P_wf > GP.P_wf:\n q_w=GP.q_wi*10\n else:\n q_w=GP.get_water_prediction(P,k_w,P_wf,K)*10\n if i % test_step == 0:\n q_w_list.append(q_w/10)\n # print('q_w:', q_w/10)\n\n W_p = W_p + q_w\n if i % test_step == 0:\n W_p_list.append(W_p)\n '''\n 计算含水饱和度\n '''\n S_w = GP.get_S_w(W_p,phi)\n if i % test_step == 0:\n S_w_list.append(S_w)\n # print('S_w:', S_w)\n\n '''\n 计算压力\n '''\n if P > GP.P_cd:\n P=GP.get_P_1( S_w, Z, phi, G_p)\n else:\n P = GP.get_P_2(S_w, Z, phi, G_p)\n if i % test_step == 0:\n P_list.append(P)\n # print('P:',P)\n\n '''\n 计算气体压缩因子\n '''\n Z = GP.get_z(P,GP.T,0.8)\n if i % test_step == 0:\n Z_list.append(Z)\n # print('Z:',Z)\n\n '''\n 计算孔隙度\n '''\n phi=GP.get_phi(P)\n if i % test_step == 0:\n phi_list.append(phi)\n # print('phi:',phi)\n\n '''\n 计算渗透率\n '''\n K=GP.get_K(phi)\n if i % test_step == 0:\n K_list.append(K)\n\n '''\n 计算气水相渗透率\n '''\n k_g, k_w=GP.get_k_rg_k_rw(S_w)\n\n '''\n 计算井底流压,若井底流压小于设定值,则认为当前井底流压为设定值\n '''\n if P_wf > GP.P_wf:\n # P_wf = GP.P_wf\n P_wf = GP.get_P_wf(P, k_w, K)\n else:\n P_wf = GP.P_wf\n if i % test_step == 0:\n P_wf_list.append(P_wf)\n\n '''\n 根据当前压力,判断排采阶段,计算产气量\n '''\n if P > GP.P_cd:\n # q_g=GP. get_gas_prediction_level_1( P, phi, S_w, Z,G_p)\n q_g=0\n else:\n q_g = GP.get_gas_prediction(P,k_g,Z,P_wf,K)*10\n if i % test_step == 0:\n q_g_list.append(q_g/10)\n # print('q_g:',q_g/10)\n G_p = G_p + q_g\n if i % test_step == 0:\n G_P_list.append(G_p)\n # print('G_p',G_p)\n if i % test_step == 0:\n Kg_list.append(k_g)\n Kw_list.append(k_w)\n\n #\n print('采收率:',G_p/GP.G)\n\n '''\n 结果可视化\n '''\n # fig = plt.figure()\n # font = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\")\n #\n # ax1 = fig.add_subplot(3, 2, 1)\n # ax1.set_title('日产水量', fontproperties=font)\n # plt.scatter(i_list, q_w_list, marker='x', color='red', s=2, label='First')\n #\n # ax2 = fig.add_subplot(3, 2, 2)\n # ax2.set_title('日产气量', fontproperties=font)\n # plt.scatter(i_list, q_g_list, marker='o', color='red', s=2, label='First')\n #\n # ax3 = fig.add_subplot(3, 2, 3)\n # ax3.set_title('压力', fontproperties=font)\n # plt.scatter(i_list, P_list, marker='x', color='blue', s=2, label='First')\n #\n # ax4 = fig.add_subplot(3, 2, 4)\n # ax4.set_title('Z', fontproperties=font)\n # plt.scatter(i_list, Z_list, marker='x', color='red', s=2, label='First')\n #\n # ax5 = fig.add_subplot(3, 2, 5)\n # ax5.set_title('井底流压', fontproperties=font)\n # plt.scatter(i_list, P_wf_list, marker='x', color='red', s=2, label='First')\n #\n # ax6 = fig.add_subplot(3, 2,6)\n # ax6.set_title('有效渗透率', fontproperties=font)\n # plt.scatter(i_list, Kg_list, marker='x', color='red', s=2, label='First')\n # plt.show()\n\n fig = plt.figure()\n font = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\")\n\n ax1 = fig.add_subplot(111)\n line1=ax1.plot(i_list, Kg_list,'r',marker='o', mec='r',lw=1,ms=0,label='气相渗透率')\n\n ax2 = ax1.twinx() # this is the important function\n line2=ax2.plot(i_list, Kw_list,'b', marker='x', mec='b',lw=1,ms=0,label='水相渗透率')\n\n lns =line1+ line2\n labs = [l.get_label() for l in lns]\n plt.legend(lns, labs,prop=font,loc='center right')\n\n ax1.set_xlabel('时间(天)',fontproperties=font) # 设置x轴标题\n ax1.set_ylabel('气相渗透率', color='r',fontproperties=font) # 设置Y1轴标题\n ax2.set_ylabel('水相渗透率', color='b',fontproperties=font) # 设置Y2轴标题\n\n plt.show()\n\n return G_p\n\nif __name__==\"__main__\":\n '''\n 汇总数据至data_CBM_info.csv\n '''\n # data_P_i=np.array(pd.read_csv('data/IDW_储层压力.csv',header=None))\n # data_V_L = np.array(pd.read_csv('data/IDW_兰氏体积.csv', header=None))\n # data_P_L = np.array(pd.read_csv('data/IDW_兰氏压力.csv', header=None))\n # data_phi_i = np.array(pd.read_csv('data/IDW_孔隙度.csv', header=None))\n # data_K_i = np.array(pd.read_csv('data/IDW_渗透率.csv', header=None))\n # data_h = np.array(pd.read_csv('data/IDW_煤厚.csv', header=None))\n # data_P_cd = np.array(pd.read_csv('data/IDW_解吸压力.csv', header=None))\n #\n #\n # data_target = np.zeros([60, 60],dtype=dict)\n # for i in range(3600):\n # row=int(np.floor(i/60))\n # column=i%60\n #\n # data_target[row,column]={\n # 'V_L':data_V_L[row,column],\n # 'P_L': data_P_L[row,column],\n # 'P_cd':data_P_cd[row,column],\n # 'P_i':data_P_i[row,column],\n # 'h':data_h[row,column],\n # 'phi_i':data_phi_i[row,column],\n # 'K_i':data_K_i[row,column],\n # }\n #\n # pd.DataFrame(data_target).to_csv('data/data_CBM_info.csv',index=0,header=0)\n\n # data_CBM_info=np.array(pd.read_csv('data/data_CBM_info.csv',header=None))\n #\n # for i in range(3600):\n # row=int(np.floor(i/60))\n # column=i%60\n #\n # info_str=data_CBM_info[row,column]\n # info_dict=eval(info_str)\n #\n #\n # well_info={\n #\n # 'A':250*250,\n # 'V_L':info_dict['V_L'],\n # 'P_L': info_dict['P_L'],\n # 'P_cd':info_dict['P_cd'],\n # 'P_i':info_dict['P_i'],\n # 'h':info_dict['h'],\n # 'phi_i':info_dict['phi_i'],\n # 'K_i':info_dict['K_i'],\n # 'rho_B':1.58\n # }\n #\n # G_p=run(well_info, 180)\n # print([row,column],G_p/1000000)\n\n well_info = {\n\n 'A': 200*200,\n 'V_L':40.97,\n 'P_L': 3.69,\n 'P_cd': 6.236,\n 'P_i': 7.612,\n 'h':6.5,\n 'phi_i': 0.0498,\n 'K_i': 1.2,\n 'rho_B': 1.58\n }\n\n G_p = run(well_info, 720)\n # print([row, column], G_p / 1000000)\n\n\n","sub_path":"gas_prediction.py","file_name":"gas_prediction.py","file_ext":"py","file_size_in_byte":13784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"596526072","text":"import cv2\nimport numpy as np\nimport face_recognition\n\nimgad = face_recognition.load_image_file('ImagesBasic/Adarsh.jpg')\nimgad = cv2.cvtColor(imgad, cv2.COLOR_BGR2RGB)\nimgTest = face_recognition.load_image_file('ImagesBasic/Adarsh Test.jpg')\nimgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)\n\nfaceloc = face_recognition.face_locations(imgad)[0]\nencodeAd = face_recognition.face_encodings(imgad)[0]\ncv2.rectangle(imgad, (faceloc[3], faceloc[0]), (faceloc[1], faceloc[2]), (255, 0, 255), 2)\n\nfacelocTest = face_recognition.face_locations(imgTest)[0]\nencodeTest = face_recognition.face_encodings(imgTest)[0]\ncv2.rectangle(imgTest, (facelocTest[3], facelocTest[0]), (facelocTest[1], facelocTest[2]), (255, 0, 255), 2)\n\nresults = face_recognition.compare_faces([encodeAd], encodeTest)\nfaceDis = face_recognition.face_distance([encodeAd], encodeTest)\nprint(results, faceDis)\ncv2.putText(imgTest, f'{results} {round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)\ncv2.imshow('ADARSH', imgad)\ncv2.imshow('ADARSH TEST', imgTest)\ncv2.waitKey(0)\n","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338873053","text":"from flask import jsonify\r\nfrom flask_restful import request, Resource\r\n\r\nfrom .experiment.experiment_cache import ExperimentCache\r\n\r\n\r\nclass Experiments(Resource):\r\n\r\n def get(self):\r\n experiment_cache = ExperimentCache().get_experiment_cache()\r\n experiments = []\r\n for exp in experiment_cache.values():\r\n experiments.append({\r\n 'name': exp.pretty_name(),\r\n 'id': exp.__class__.__name__,\r\n 'description': exp.description(),\r\n 'domain': exp.domain().value,\r\n 'initialized': exp.status.initialized\r\n })\r\n return jsonify({'all_experiments': experiments})\r\n","sub_path":"api/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412803233","text":"import requests\nfrom UA import agents\nimport random\nimport time\nimport json\nfrom db import *\nimport multiprocessing\nfrom thread_pool import ThreadPool#自己写的线程池\nimport lxml.html as lhtml\nfrom retry import retry\nfrom proxy import get_proxies\nfrom stock_queue import get_stock\nimport re\n\nimport logging\nLOG_FORMAT = \"%(asctime)s : %(levelname)s - %(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\n\n# helper method\ndef format_time(t):\n return '2019-'+t if len(t)==11 else t\n\ndef format_type_and_price(text):\n match = re.search(r'以([¥¥]|undefined){0,1}([0-9]{1,6}\\.{0,1}[0-9]{0,6})元{0,1}买入', text)\n if match:\n return '买入', float(match.group(2))\n match = re.search(r'委托买入价([0-9\\.]*)。', text)\n if match:\n return '买入', float(match.group(1))\n \n match = re.search(r'以([¥¥]|undefined){0,1}([0-9]{1,6}\\.{0,1}[0-9]{0,6})元{0,1}卖出', text)\n if match:\n return '卖出', float(match.group(2))\n match = re.search(r'委托卖出价([0-9\\.]*)。', text)\n if match:\n return '卖出', float(match.group(1))\n\n if '关注' in text:\n return '关注', None\n\n return '其他', None\n \n \n \n@retry(delay=10)\ndef download_transaction_from_web(symbol, num):\n real_time = str(time.time()).replace('.', '')[0:-1]#获取当前时间\n transaction_url= 'https://xueqiu.com/statuses/search.json?count=10&comment=0&symbol={symbol}&hl=0&source=trans&sort=time&page={page}&_={real_time}' # source=trans determines to get transaction\n url = transaction_url.format(symbol=symbol, page=str(num), real_time=real_time)\n session = requests.session()\n session.proxies = get_proxies() # 携带代理\n headers={\n 'User-Agent':random.choice(agents)\n }\n _first_request = session.get(url='https://xueqiu.com/', headers=headers, timeout=10)\n transactions_response = session.get(url, headers=headers, timeout=10)\n if str(transactions_response.status_code) != str(200):\n raise Exception(\"Can't download transaction from web. symbol: {}, num: {}. {}\".format(symbol, num, transactions_response.text))\n return transactions_response.text\n\ndef update_transaction_database(transactions_text, num, symbol):\n stocks_transaction = json.loads(transactions_text)['list']\n page = json.loads(transactions_text)['maxPage']#获取最大页数\n transaction_database=TransactionMongo()\n for transaction in stocks_transaction:\n try:\n text = transaction.get('text').strip()\n selector = lhtml.fromstring(text) # 里面的标签各种各样,各种嵌套,用正则调了很久,投降了,改用xpath\n transaction_text = selector.xpath('string(.)').replace(' ','')\n user_id = transaction.get('user_id') # 评论者ID\n user = transaction.get('user') # 评论者信息\n title = transaction.get('title') # 标题\n stock_code = symbol # 股票代码\n transaction_id = transaction.get('id') # 每条评论都要唯一的ID\n source = transaction.get('source')\n like_count = transaction.get('like_count')\n reply_count = transaction.get('reply_count')\n retweet_count = transaction.get('retweet_count')\n transaction_time = format_time(transaction.get('timeBefore'))\n transaction_type, price = format_type_and_price(transaction_text)\n transaction_database.push(message_id=transaction_id,message_time=transaction_time,transaction_type=transaction_type, price=price, symbol=stock_code,message=transaction_text,user_id=user_id,user=user,title=title,source=source,like_count=like_count,reply_count=reply_count,retweet_count=retweet_count,max_page=page,current_page=num)\n logging.info('{}的第{}页爬取成功, 共{}页'.format(symbol, num, page))\n except Exception as e:\n logging.warning('Json解析失败. 错误信息: {}'.format(e))\n\ndef thread_get_transaction(args):\n num, symbol, max_page = args\n logging.info('开始抓取{}的第{}页, 共{}页'.format(symbol, num, max_page))\n transactions_text = download_transaction_from_web(symbol, num)\n update_transaction_database(transactions_text, num, symbol)\n\ndef transaction_crawler(symbol):\n pool=ThreadPool(5)\n transactions_text = download_transaction_from_web(symbol, 1)\n max_page = json.loads(transactions_text)['maxPage']#获取最大页数\n for num in range(1,max_page+1):\n pool.run(func=thread_get_transaction,args=(num,symbol, max_page)) \n\ndef get_transaction():\n stock_database = StockMongo()#链接第一步抓取的股票代码数据表,根据股票代码抓取评论\n while True:\n try:\n symbol = stock_database.pop()\n transaction_crawler(symbol)\n except AttributeError:\n logging.info('All stocks are successfully scraped. ')\n break\n\ndef process_crawler():\n get_queue()\n process=[]\n num_cpus=multiprocessing.cpu_count()\n logging.info('将会启动的进程数为{}'.format(num_cpus))\n for _ in range(int(num_cpus)):\n p=multiprocessing.Process(target=get_transaction)#创建进程\n p.start()\n process.append(p)\n for p in process:\n p.join()\n \nif __name__ == '__main__':\n get_stock()\n TransactionMongo().clear()\n QueueMongo().clear()\n process_crawler()","sub_path":"deprecated/crawl_transaction.py","file_name":"crawl_transaction.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637780637","text":"import dialogflow\nimport os\nfrom google.api_core.exceptions import InvalidArgument\n\nDIALOGFLOW_PROJECT_ID = 'kwili-chatbot-hlonvo'\nDIALOGFLOW_LANGUAGE_CODE = 'fr-FR'\nSESSION_ID = 'current-user-id'\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'key.json'\n\nsession_client = dialogflow.SessionsClient()\nsession = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID)\n\ndef send_query(user_text):\n\ttext_input = dialogflow.types.TextInput(text=user_text, language_code=DIALOGFLOW_LANGUAGE_CODE)\n\tquery_input = dialogflow.types.QueryInput(text=text_input)\n\ttry:\n\t\tresponse = session_client.detect_intent(session=session, query_input=query_input)\n\t\treturn response.query_result.fulfillment_messages\n\texcept InvalidArgument:\n\t\traise","sub_path":"src/services/chat/dial.py","file_name":"dial.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104407549","text":"# Copyright (c) 2015, Alphamonak Solutions Ltd. \n# License: GNU General Public License v3. See license.txt\n\nimport redapp\nimport redapp.permissions\n\ndef execute():\n\tredapp.reload_doc(\"core\", \"doctype\", \"block_module\")\n\tredapp.reload_doctype(\"User\")\n\tredapp.reload_doctype(\"Lead\")\n\tredapp.reload_doctype(\"Contact\")\n\n\tredapp.reload_doc('crm', 'doctype', 'newsletter_list')\n\tredapp.reload_doc('crm', 'doctype', 'newsletter_list_subscriber')\n\tredapp.reload_doc('crm', 'doctype', 'newsletter')\n\n\tredapp.permissions.reset_perms(\"Newsletter\")\n\n\tif not redapp.db.exists(\"Role\", \"Newsletter Manager\"):\n\t\tredapp.get_doc({\"doctype\": \"Role\", \"role\": \"Newsletter Manager\"}).insert()\n\n\tfor userrole in redapp.get_all(\"UserRole\", \"parent\", {\"role\": \"Sales Manager\"}):\n\t\tif redapp.db.exists(\"User\", userrole.parent):\n\t\t\tuser = redapp.get_doc(\"User\", userrole.parent)\n\t\t\tuser.append(\"user_roles\", {\n\t\t\t\t\"doctype\": \"UserRole\",\n\t\t\t\t\"role\": \"Newsletter Manager\"\n\t\t\t})\n\t\t\tuser.flags.ignore_mandatory = True\n\t\t\tuser.save()\n\n\t# create default lists\n\tgeneral = redapp.new_doc(\"Newsletter List\")\n\tgeneral.title = \"General\"\n\tgeneral.insert()\n\tgeneral.import_from(\"Lead\")\n\tgeneral.import_from(\"Contact\")\n","sub_path":"redapple/patches/v5_0/newsletter.py","file_name":"newsletter.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"609496412","text":"import configparser\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass DbManager(object):\n \"\"\"数据连接类\"\"\"\n\n def __init__(self):\n super(DbManager, self).__init__()\n conf = configparser.ConfigParser()\n conf.read('./conf/config.ini')\n mysql_string = conf.get('db', 'mysql_string')\n self.engine = create_engine(\n mysql_string,\n connect_args={'charset': 'utf8'})\n DBSession = sessionmaker(bind=self.engine)\n self.session = DBSession()\n\n def add_data(self, data):\n try:\n self.session.add(data)\n self.session.commit()\n self.session.close()\n return True\n except Exception as e:\n print(e)\n return False\n\n def find(self, model, params, order_by=None):\n if order_by is None:\n return self.session.query(model).filter(params).first()\n else:\n return self.session.query(model).filter(params).order_by(order_by).first()\n\n def select(self, model, params, order_by=None):\n if order_by is None:\n return self.session.query(model).filter(params).all()\n else:\n return self.session.query(model).filter(params).order_by(order_by).all()\n\n def get_session(self):\n return self.session\n","sub_path":"model/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346447273","text":"#!/usr/bin/python3\n\nimport csv\nimport os\nimport sys\nimport requests\n\n# Output filepaths and formatting\nCWD = os.getcwd()\nCSV_NAME = \"location_to_device.csv\"\nOUTPUT_PATH = CWD + \"/\" + CSV_NAME\nCOLUMN_DESCRIPTIONS = [\"Country\", \"State\", \"Device\", \"Browser\"]\n\n# Required Userstack API key:\ndef userstack_api_key() -> str:\n userstack_api_key = os.getenv('USERSTACK_KEY')\n if userstack_api_key is not None:\n return userstack_api_key\n print(\"Rquired Userstack API key not found in environment variables. Export key as USERSTACK_KEY\")\n sys.exit(os.EX_CONFIG)\n\nUSERSTACK_KEY = userstack_api_key()\nUSERSTACK_URL = \"http://api.userstack.com/detect\"\n\n# Optional IPAPI key added if present\ndef ip_api_suffix() -> str:\n ip_location_suffix = \"/json\"\n ip_api_key = os.getenv('IPAPI_KEY')\n if ip_api_key is not None:\n ip_location_suffix += \"?key=\" + ip_api_key\n return ip_location_suffix\n\nIP_API_BASE_URL = \"https://ipapi.co/\"\nIP_API_SUFFIX = ip_api_suffix()\n\n# Validate arguments and filepath\ndef argument_exists() -> bool:\n return len(sys.argv) > 1\n\ndef target_exists(path: str) -> bool:\n return os.path.exists(path)\n\ndef exit_no_input() -> None:\n print(\"Target not found at specified path. Ensure correct path to target is passed as argument. \\nExample usage: ./log_parser.py \")\n sys.exit(os.EX_NOINPUT)\n\ndef get_target_path() -> str:\n if argument_exists():\n if target_exists(sys.argv[1]):\n return sys.argv[1]\n exit_no_input()\n return \"\"\n\nTARGET_PATH = get_target_path()\n\n# Request location using IP\ndef get_location(url: str) -> {}:\n location = {}\n response = requests.get(url)\n if response.status_code == 200:\n json_response = response.json()\n country = json_response['country_name']\n region = json_response['region']\n location = {'country' : country, 'region': region}\n return location\n\n# Request device information using useragent\ndef get_device(useragent: str) -> {}:\n device_info = {}\n params = {\n 'access_key': USERSTACK_KEY,\n 'ua' : useragent\n }\n response = requests.get(USERSTACK_URL, params)\n if response.status_code == 200:\n json_response = response.json()\n device_type = json_response['device']['type']\n browser = json_response['browser']['name']\n device_info = {'type' : device_type, 'browser': browser}\n return device_info\n\ndef convert_log_to_csv() -> None:\n with open(TARGET_PATH, \"r\") as target, open(OUTPUT_PATH, \"w\", newline = \"\") as output:\n print(\"Looking up locations and devices for access log\")\n writer = csv.writer(output)\n writer.writerow(COLUMN_DESCRIPTIONS)\n\n for line in target:\n ip_addr = line.split(\" \")[0]\n print(ip_addr)\n user_agent = line.rsplit(\"\\\"\")[-2]\n\n ip_location_url = IP_API_BASE_URL + ip_addr + IP_API_SUFFIX\n location = get_location(ip_location_url)\n if location == {}:\n print(\"No location or bad response, skipping entry\")\n continue\n print(\"Location: \" + location['country'] + \" Region: \" + location['region'])\n\n device = get_device(user_agent)\n if device == {}:\n print(\"No device or bad response, skipping entry\")\n continue\n\n print(\"Device Type: \" + device['type'] + \" Browser: \" + device['browser'])\n\n writer.writerow([location['country'], location['region'], device['type'], device['browser']])\n\n print(\"Complete! CSV file written to location_device.csv\")\n\ndef main() -> None:\n convert_log_to_csv()\n sys.exit(os.EX_OK)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"log_parser.py","file_name":"log_parser.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461882452","text":"from __future__ import absolute_import\nimport argparse\nimport datetime\nimport textwrap\nimport logging\n\nimport boto3\n\n\nclass AwsSnapper(object):\n VERSION = '0.1'\n\n def __init__(self):\n self._loaded = False\n\n self.tag_prefix = None\n self.ec2_regions = list()\n self.sns_arn = None\n\n self.report = {\n 'started': datetime.datetime.now(),\n 'finished': None,\n 'instances_managed': 0,\n 'volumes_managed': 0,\n 'snaps_created': 0,\n 'snaps_deleted': 0,\n 'problem_volumes': list()\n }\n\n def _load_config(self):\n if self._loaded:\n return\n\n parser = argparse.ArgumentParser(description='Create and manage scheduled EBS snapshots')\n parser.add_argument('regions', metavar='region', nargs='*',\n help='EC2 Region(s) to process for snapshots',\n default=[None])\n parser.add_argument('--sns-arn', dest='sns_arn', action='store', default=None,\n help='SNS ARN for reporting results', metavar='ARN')\n parser.add_argument('--prefix', dest='tag_prefix', action='store', default='autosnap',\n help='Prefix to use for AWS tags on snapshots', metavar='PREFIX')\n parser.add_argument('--version', action='version',\n version='AwsSnapper v{}'.format(self.VERSION))\n settings = parser.parse_args()\n\n self.sns_arn = settings.sns_arn\n self.tag_prefix = settings.tag_prefix\n for region in settings.regions:\n self.ec2_regions.append(region)\n\n self._loaded = True\n\n def scan_and_snap(self, region):\n if not self._loaded:\n self._load_config()\n\n tag_interval = '{prefix}'.format(prefix=self.tag_prefix)\n tag_retain = '{prefix}_retain'.format(prefix=self.tag_prefix)\n tag_ignore = '{prefix}_ignore'.format(prefix=self.tag_prefix)\n today = datetime.date.today()\n\n if region is not None:\n ec2 = boto3.resource('ec2', region_name=region)\n else:\n ec2 = boto3.resource('ec2')\n\n instances = ec2.instances.all()\n for instance in instances:\n i_tags = instance.tags\n i_ignore = True\n i_snap_interval = 0\n i_snap_retain = 0\n i_name = instance.id\n for i_tag in i_tags:\n if i_tag['Key'] == tag_interval:\n i_ignore = False\n i_snap_interval = i_tag['Value']\n if i_tag['Key'] == tag_retain:\n i_snap_retain = i_tag['Value']\n if i_tag['Key'] == 'Name' and len(i_tag['Value']) > 2:\n i_name = '{}-({})'.format(i_tag['Value'], instance.id)\n i_name_only = '{}'.format(i_tag['Value'])\n if i_ignore:\n continue\n\n self.report['instances_managed'] += 1\n\n volumes = ec2.volumes.filter(Filters=[{'Name': 'attachment.instance-id',\n 'Values': [instance.id]}])\n for volume in volumes:\n v_snap_interval = i_snap_interval\n v_snap_retain = i_snap_retain\n v_tags = volume.tags\n v_ignore = False\n v_name = volume.id\n for v_tag in v_tags:\n if v_tag['Key'] == tag_ignore:\n v_ignore = True\n if v_tag['Key'] == tag_interval:\n v_snap_interval = v_tag['Value']\n if v_tag['Key'] == tag_retain:\n v_snap_retain = v_tag['Value']\n if v_tag['Key'] == 'Name':\n v_name = '{} ({})'.format(v_tag['Value'], volume.id)\n if v_ignore:\n continue\n\n if v_snap_interval == 0 or v_snap_retain == 0:\n self.report['problem_volumes'].append(volume.id)\n continue\n\n self.report['volumes_managed'] += 1\n\n snap_collection = ec2.snapshots.filter(Filters=[{'Name': 'volume-id',\n 'Values': [volume.id]},\n {'Name': 'tag:snapshot_tool',\n 'Values': [self.tag_prefix]}])\n snap_list = list(snap_collection)\n\n snap_needed = False\n if len(snap_list) == 0:\n snap_needed = True\n else:\n snap_list.sort(key=lambda s: s.start_time, reverse=True)\n interval = int(v_snap_interval)\n expected = snap_list[0].start_time.date() + datetime.timedelta(days=interval)\n if today >= expected:\n snap_needed = True\n\n if snap_needed:\n description = '{}: {} from {} of {}'.format(self.tag_prefix, v_name, today, i_name)\n short_description = '{}-{}-{}'.format(today, v_name, i_name_only)\n snapshot = volume.create_snapshot(Description=description)\n snapshot.create_tags(Tags=[{'Key': 'Name', 'Value': short_description},\n {'Key': 'snapshot_tool', 'Value': self.tag_prefix}])\n self.report['snaps_created'] += 1\n else:\n # too soon\n pass\n\n while len(snap_list) > int(v_snap_retain):\n snapshot_to_delete = snap_list.pop()\n snapshot_to_delete.delete()\n self.report['snaps_deleted'] += 1\n\n def generate_report(self):\n self.report['finished'] = datetime.datetime.now()\n\n report = textwrap.dedent(\"\"\"\\\n AWS Snapshot Report\n\n Run Started: {started}\n Run Finished: {finished}\n\n Snapshots created: {snaps_created}\n Snapshots deleted: {snaps_deleted}\n\n > Details:\n > Instances managed: {instances_managed}\n > Volumes managed: {volumes_managed}\n \"\"\".format(**self.report))\n\n if len(self.report['problem_volumes']) > 0:\n report += '> \\n> \\n> Volumes with tags that prevented snapshot management: \\n'\n for vol in self.report['problem_volumes']:\n report += '> * {}\\n'.format(vol)\n\n if self.sns_arn is not None:\n sns = boto3.resource('sns')\n topic = sns.Topic(self.sns_arn)\n topic.publish(Message=report, Subject='AWS Snapshot Report')\n logging.warn('Snapshot run completed at {}. Report sent via SNS.'.format(\n self.report['finished']))\n else:\n logging.warn(report)\n\n def daily_run(self):\n self._load_config()\n for region in self.ec2_regions:\n self.scan_and_snap(region)\n self.generate_report()\n\ndef main(event, context):\n snapper = AwsSnapper()\n snapper.daily_run()\n\nif __name__ == '__main__':\n main(0, 0)","sub_path":"aws-snapper.py","file_name":"aws-snapper.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"112735508","text":"#\n# Monitor the gke pool\n# Reuse the python script name for ease of use\n#\n\nimport sys,time\nimport kubernetes\n\nkubernetes.config.load_incluster_config()\nk8s = kubernetes.client.CoreV1Api()\n\ndef get_nodes():\n cntn = 0 # number of nodes\n cntg = 0 # number of gpus\n nlist = k8s.list_node(label_selector=\"osgclass=gpu\")\n cntn = len(nlist.items)\n for i in range(cntn):\n try:\n cntg += int(nlist.items[i].status.capacity['nvidia.com/gpu'])\n except:\n pass # no gpu?\n return (cntn, cntg)\n\ndef get_pods():\n cntr = 0 # number of running\n cntp = 0 # number of pending\n plist = k8s.list_namespaced_pod(namespace=\"gke-icecube\",label_selector=\"prp-htcondor-portal=wn\")\n nels = len(plist.items)\n for i in range(nels):\n phase = plist.items[i].status.phase\n if phase == \"Pending\":\n cntp += 1\n elif phase==\"Running\":\n cntr += 1\n return (cntr, cntp)\n\nwhile True:\n try:\n (cntn, cntg) = get_nodes()\n (cntr, cntp) = get_pods()\n\n with open(\"/var/log/provisioner/logs/monitor/gke.log.\"+time.strftime(\"%Y%m%d\"),\"a\") as fd:\n fd.write(\"%s (%i) Nodes: %i (gpus: %i ) Pods running: %i pending: %i\\n\"%(time.ctime(), time.time(), cntn, cntg, cntr, cntp))\n except:\n print(\"Moonitoring excpetion!\")\n # sleep a bit\n time.sleep(60)\n\n","sub_path":"gke21-icecube/direct/provisioner-monitor/provisioner_main.py","file_name":"provisioner_main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"2872945","text":"from rest_framework import serializers\n\nfrom .models import Category\n\nclass SubCategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('id', 'name')\n\n\nclass CreateCategorySerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(queryset=Category.objects.all(), required=False)\n\n def get_fields(self):\n fields = super(CreateCategorySerializer, self).get_fields()\n fields['children'] = CreateCategorySerializer(many=True, required=False)\n return fields\n\n\n def create(self, validated_data):\n children = validated_data.pop('children', [])\n top_cat = super().create(validated_data)\n for child in children:\n child['parent'] = top_cat\n CreateCategorySerializer().create(child)\n return top_cat\n\n\n class Meta:\n model = Category\n fields = ('name', 'parent', 'children')\n\n\nclass ReadCategorySerializer(serializers.ModelSerializer):\n parents = serializers.SerializerMethodField()\n children = serializers.SerializerMethodField()\n siblings = serializers.SerializerMethodField()\n \n def get_ancestors(self, parents, obj):\n if obj.parent:\n qs = Category.objects.filter(pk=obj.parent.pk)\n parents += qs\n if qs:\n res = self.get_parents(obj=qs[0])\n parents += res\n else:\n parents = [qs]\n return parents\n\n\n def get_parents(self, obj):\n parents = []\n ancestors = self.get_ancestors(parents, obj)\n\n serializer = SubCategorySerializer(parents, many=True)\n return serializer.data\n\n\n def get_children(self, obj):\n children = obj.children.all()\n serializer = SubCategorySerializer(children, many=True)\n return serializer.data\n\n\n def get_siblings(self, obj):\n if obj.parent:\n qs = obj.parent.children.exclude(pk=obj.pk)\n else:\n qs = Category.objects.none()\n serializer = SubCategorySerializer(qs, many=True)\n return serializer.data\n\n class Meta:\n model = Category\n fields = ('id', 'name', 'parents', 'children', 'siblings')","sub_path":"api/categories/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"597747374","text":"from time import time\nfrom src.Utils import Utils\nfrom src import Constants\nimport sqlite3\nimport os\n\n\nclass PersisterSqlite(object):\n conn = None\n insert_data_statement = None\n insert_recommendation_sql = None\n\n def __init__(self):\n #self.conn = sqlite3.connect(Constants.DB_DIR)\n self.conn = sqlite3.connect(Constants.DB_DIR)\n self.insert_data_statement = \"INSERT INTO sp500_time_series_data (symbol, company_name, trade_date, \" \\\n \"RSI_rsi, MACD_macd_signal, MACD_macd_histogram, MACD_macd,\" \\\n \"BB_real_upper_band, BB_real_middle_band, BB_real_lower_band, OBV_obv,open_price, low_price, high_price, close_price, volume) \" \\\n \"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\n self.insert_recommendation_sql = \"INSERT INTO recommendation(symbol, model_name, recommendation)\" \\\n \"VALUES (?,?,?)\"\n\n def insert_recommendation(self, row_values):\n self.insert_row(self.insert_recommendation_sql, row_values)\n\n def insert_row(self, sql_stmt, row_values):\n try:\n db_cursor = self.conn.cursor()\n db_cursor.execute(sql_stmt, row_values)\n self.conn.commit()\n #self.conn.close()\n except sqlite3.Error as err:\n print(\"Something went wrong: {}\".format(err))\n self.conn.rollback()\n\n def insert(self, stmts):\n try:\n db_cursor = self.conn.cursor()\n for stmt in stmts:\n db_cursor.execute(stmt)\n self.conn.commit()\n self.conn.close()\n except sqlite3.Error as err:\n print(\"Something went wrong: {}\".format(err))\n self.conn.rollback()\n\n def insert_data(self, data, symbol_map):\n try:\n db_cursor = self.conn.cursor()\n for symbol in data:\n market_data = data[symbol]\n market_data = market_data.dropna()\n insert_data = []\n start_time = time()\n print(\"Persisting data for symbol: \" + symbol)\n for index, row in market_data.iterrows():\n date = index.date()\n split_factor = float(row['4. close']) / float(row['5. adjusted close'])\n insert_data.append([symbol, symbol_map[symbol], date, float(row['RSI']), float(row['MACD_Signal']), float(row['MACD_Hist']),\n float(row['MACD']), float(row['Real Upper Band']), float(row['Real Middle Band']),\n float(row['Real Lower Band']), float(row['OBV']), round(float(row['1. open']) / split_factor, 4),\n round(float(row['3. low']) / split_factor, 4), round(float(row['2. high']) / split_factor, 4), float(row['5. adjusted close']), int(row['6. volume'])])\n db_cursor.executemany(self.insert_data_statement, insert_data)\n self.conn.commit()\n end_time = time()\n print(\"Persisted data for symbol: \" + symbol + \" in ms: \" + str(end_time - start_time))\n self.conn.close()\n except sqlite3.Error as err:\n print(\"Something went wrong: {}\".format(err))\n self.conn.rollback()\n","sub_path":"PersisterSqlite.py","file_name":"PersisterSqlite.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"260067548","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\ndef creFile(rowCnt):\n\n f = open('testFile' + str(rowCnt), mode='w')\n\n for i in range(int(rowCnt)):\n f.write(str(i).rjust(20, '0') + '\\n')\n\n f.close()\n\nif __name__ == '__main__':\n \n creFile(sys.argv[1])\n\n","sub_path":"py/test1/creTestFile.py","file_name":"creTestFile.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"641445757","text":"# Import Packages\nimport os\nimport nlp\nimport torch \nfrom torch import nn\nimport logging\nimport datasets\nfrom dataclasses import dataclass, field\nfrom typing import Any, Union, Dict, List, Optional\nfrom tqdm.auto import tqdm\nfrom transformers import AutoTokenizer\nfrom transformers import AutoModelForSeq2SeqLM\nfrom transformers import Trainer as HFTrainer\nfrom transformers import TrainingArguments\nfrom transformers import HfArgumentParser\n\n# Hyperparameters\ndevice = 'cuda' if torch.cuda.is_available else 'cpu'\nprint('* You are using device: ', device)\nlogger = logging.getLogger(__name__)\n\nmodel_checkpoint = \"facebook/bart-base\" # \"facebook/bart-base\"\nmodel_test_checkpoint = 'training_v1/'\n\nmax_source_length = 512\nmax_target_length = 32\npretrained_model = 'facebook/bart-base' # \"facebook/bart-base\"\nnum_epochs = 4\nevaluation_strategy = 'steps' # epoch/no/steps\neval_steps = 5000\nsave_steps = 5000\n\nprint('* Your hyperparameter settings:')\nprint('\\t PRETRAINED_MODEL:', pretrained_model)\nprint('\\t MAX_SOURCE_LENGTH:', max_source_length)\nprint('\\t MAX_TARGET_LENGTH:', max_target_length)\nprint('\\t NUM_EPOCHS:', num_epochs)\nprint('\\t EVALUATION_STRATEGY:', evaluation_strategy)\nprint('\\t SAVE_STEPS:', save_steps)\nprint('* Your hyperparameter settings:\\n')\nprint('\\tPRETRAINED_MODEL:', model_checkpoint)\nprint('\\tRESUME FROM: ', model_test_checkpoint)\n\n# Define Functions\ndef convert_to_features(example_batch):\n source_encoding = tokenizer.batch_encode_plus(\n example_batch['source_text'],\n max_length=max_source_length,\n padding='max_length',\n pad_to_max_length=True,\n truncation=True, \n )\n target_encoding = tokenizer.batch_encode_plus(\n example_batch['target_text'],\n max_length=max_target_length,\n padding='max_length',\n pad_to_max_length=True,\n truncation=True, \n )\n encodings = {\n 'source_ids': source_encoding['input_ids'], \n 'target_ids': target_encoding['input_ids'],\n 'attention_mask': source_encoding['attention_mask'],\n }\n\n return encodings\n\n\ndef get_correct_alignement(context, answer): \n gold_text = answer['text'][0]\n start_idx = answer['answer_start'][0]\n end_idx = start_idx + len(gold_text)\n if context[start_idx:end_idx] == gold_text:\n return start_idx, end_idx \n elif context[start_idx-1:end_idx-1] == gold_text:\n return start_idx-1, end_idx-1 \n elif context[start_idx-2:end_idx-2] == gold_text:\n return start_idx-2, end_idx-2 \n else:\n raise ValueError()\n \n \ndef process_qg_text(example):\n context = example['context']\n question = example['question']\n answer = example['answers']\n answer_text = answer['text'][0]\n start_pos, end_pos = get_correct_alignement(context, answer)\n qg_input = f\"{context[:start_pos]} {hl} {answer_text} {hl} {context[end_pos:]}\"\n qg_target = f\"{question}\"\n qg_input = qg_input + \" \"\n qg_target = qg_target + \" \"\n return {\"source_text\": qg_input, \"target_text\": qg_target}\n\n\ndef trim_batch(input_ids, pad_token_id, attention_mask=None,):\n \"\"\"Remove columns that are populated exclusively by pad_token_id\"\"\"\n keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)\n if attention_mask is None:\n return input_ids[:, keep_column_mask]\n else:\n return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])\n\ndef get_predictions(model, tokenizer, data_loader, num_beams=4, max_length=32, length_penalty=1):\n model.to(device)\n \n predictions = []\n model.eval()\n with torch.no_grad():\n for batch in tqdm(data_loader):\n outs = model.generate(\n input_ids=batch['input_ids'].to(device), \n attention_mask=batch['attention_mask'].to(device),\n num_beams=num_beams,\n max_length=max_length,\n length_penalty=length_penalty,\n )\n prediction = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]\n predictions.extend(prediction)\n\n return predictions\n\n\nclass T2TDataCollator():\n def __init__(self, tokenizer, model_type='bart', mode='training'):\n self.tokenizer = tokenizer\n self.model_type = model_type\n self.mode = mode\n\n def __call__(self, batch: List) -> Dict[str, torch.Tensor]:\n\n input_ids = torch.stack([example['source_ids'] for example in batch])\n target_ids = torch.stack([example['target_ids'] for example in batch])\n attention_mask = torch.stack([example['attention_mask'] for example in batch])\n\n pad_token_id = self.tokenizer.pad_token_id\n \n input_ids, attention_mask = trim_batch(input_ids, pad_token_id, attention_mask=attention_mask)\n target_ids = trim_batch(target_ids, pad_token_id)\n \n if self.model_type == 'bart':\n decoder_input_ids = target_ids[:, :-1].contiguous()\n lm_labels = target_ids[:, 1:].clone()\n if self.mode == 'training':\n lm_labels[target_ids[:, 1:] == pad_token_id] = -100 \n else: # self.model_type == 't5'\n print('You are using model_type: t5.')\n lm_labels = target_ids.clone()\n decoder_input_ids = self._shift_right(lm_labels)\n if self.mode == 'training':\n lm_labels[lm_labels[:, :] == pad_token_id] = -100\n \n params = {\n \"input_ids\": input_ids, \n \"attention_mask\": attention_mask,\n \"labels\": lm_labels,\n \"decoder_input_ids\": decoder_input_ids\n }\n \n return params\n \n def _shift_right(self, input_ids):\n decoder_start_token_id = self.tokenizer.pad_token_id\n pad_token_id = self.tokenizer.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined.\"\n\n # shift the inputs to the right \n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n \n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `labels` has only positive values and -100\"\n\n return shifted_input_ids\n\n\n# Load Model\nprint('* Load Model...')\nmodel = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)\nprint('\\t Model structure: ', model)\n\ntokenizer = AutoTokenizer.from_pretrained(model_checkpoint)\ntokenizer.add_tokens('','')\nprint('\\t Max length of tokenizer: ', tokenizer.model_max_length)\nhl = \"\"\nsep = \"\"\n\n# Load Valid Dataset\nprint('* Loading Datasets...')\n#train_dataset = nlp.load_dataset('squad', split=nlp.Split.TRAIN)\nvalid_dataset = nlp.load_dataset('squad', split=nlp.Split.VALIDATION)\n\nprint('* Preparing Datasets...')\nhl = \"\"\n\n# extract useful inforamtionj\n#trainset = train_data.map(process_qg_text)\nvalidset = valid_dataset.map(process_qg_text)\n#print('* The amount of train dataset:', len(trainset))\nprint('* The amount of valid dataset:', len(validset))\n\n\nprint('* Tokenizing data...')\n# tokenize data\n#train_features = trainset.map(convert_to_features, batched=True)\nvalid_features = validset.map(convert_to_features, batched=True)\n\ncolumns = [\"source_ids\", \"target_ids\", \"attention_mask\"]\n#train_features.set_format(type='torch', columns=columns)\nvalid_features.set_format(type='torch', columns=columns)\n\n#torch.save(train_features, 'data/train_v1.pt')\n#logger.info(f\"saved train dataset at data/train_v1.pt\")\n#print(\"saved train dataset at data/train_v1.pt\")\n\n#torch.save(valid_features, 'data/valid_v1.pt')\n#logger.info(f\"saved validation dataset at data/valid_v1.pt\")\n#print(\"saved validation dataset at data/valid_v1.pt\")\n\n\ndata_collator = T2TDataCollator(\n tokenizer=tokenizer,\n model_type='bart',\n mode=\"inference\"\n)\nprint('* You are using model type: ', data_collator.model_type)\n\n# Inference Model: use valid dataset to inference\nloader = torch.utils.data.DataLoader(valid_features, batch_size=32, collate_fn=data_collator)\nmodel_1 = AutoModelForSeq2SeqLM.from_pretrained(model_test_checkpoint)\npredictions = get_predictions(\n model=model_1,\n tokenizer=tokenizer,\n data_loader=loader,\n num_beams=4,\n max_length=32\n)\nwith open('inference_v1/inference_2.txt', 'w') as f:\n f.write(\"\\n\".join(predictions))\n \nwith open('inference_v1/ground_truth_2.txt', 'w') as f:\n f.write(\"\\n\".join(valid_features['question']))\n \n# with open('inference/answers.txt', 'w') as f:\n# f.write(\"\\n\".join(valid_features['answers']['text']))\n\n# Inference\n# !pip install git+https://github.com/Maluuba/nlg-eval.git@master\nfrom nlgeval import compute_metrics, compute_individual_metrics\nprint('testing on checkpoint:', model_test_checkpoint)\nmetrics_dict = compute_metrics(hypothesis='inference_v1/inference_2.txt', references=['inference_v1/ground_truth_2.txt'])\nprint('* Process Finished!')","sub_path":"bart/test_bart_v1.py","file_name":"test_bart_v1.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"463536403","text":"\"\"\"\r\nAssignment #10\r\nJun Seob Shim\r\n8/12/2020\r\nIntro to Programming Section 012\r\nThesaurus\r\n\"\"\"\r\n#import random module\r\nimport random\r\n\r\n#define thesaurus\r\n#start counter for how many words there are\r\nwordcounter = 0\r\n#open file for reading\r\nthesaurus_file = open(\"python_asg10_Roget_Thesaurus.txt\",\"r\")\r\n\r\n\"\"\"\r\nthe thesaurus file is messed up somehow and a lot of the synonyms are just wrong (eg. synonyms for \"happy\" are agreement,\r\ncheerfulness,elegance,occasion,pleasure)which obviously doesn't make any sense. should a identically named, valid thesaurus\r\nfile be supplied, the program should work as intended\r\n\"\"\"\r\n\r\n#create empty dictionary to add to\r\nthesaurus = {}\r\n\r\n#reading and processing data from file\r\nalldata = thesaurus_file.read()\r\n\r\n#split into list by lines\r\ngiantlist = alldata.split(\"\\n\")\r\n\r\n#for each word and its synonyms\r\nfor i in range(len(giantlist)):\r\n #split each line into a list by words\r\n wordlist = giantlist[i].split(\",\")\r\n #count number of words in thesaurus\r\n wordcounter += len(wordlist)\r\n\r\n #conditions based on how many synonyms\r\n if len(wordlist) > 2:\r\n synonyms = wordlist[1:len(wordlist)-1]\r\n elif len(wordlist) == 2:\r\n synonyms = [wordlist[1]]\r\n else:\r\n synonyms = []\r\n\r\n #add to dictionary (thesaurus)\r\n thesaurus[wordlist[0]] = synonyms\r\n\r\n#close file\r\nthesaurus_file.close()\r\n\r\n#print prelim data\r\nprint(\"Total words in thesaurus: \",wordcounter)\r\nprint()\r\n\r\n#ask user for input chance\r\nchance = float(input(\"Enter a % chance to change a word: \"))\r\n\r\n#open lyrics file\r\nbaby_file = open(\"bieber_baby.txt\",\"r\")\r\n\r\n#take out into long string, and close\r\nphrase = baby_file.read()\r\nbaby_file.close()\r\n\r\n#set up new phrase to add to (remove punctuation)\r\nnewphrase = \"\"\r\n\r\n#iterate over supplied phrase, and add to empty string\r\nfor i in phrase:\r\n if i.isalpha() == True or i.isspace() == True:\r\n newphrase += i.lower()\r\n\r\n#split new string into a list (by words)\r\nphraselist = newphrase.split(\" \")\r\n\r\n#create new empty string to put final result in\r\napplied = \"\"\r\n\r\n#iterate over thesaurus\r\nfor key, value in thesaurus.items():\r\n #if the key is in the list of words\r\n if (key in phraselist) == True:\r\n #iterate over the words in the list\r\n for i in range(len(phraselist)):\r\n #if the element equals a key\r\n percentage = random.random()\r\n if phraselist[i] == key and percentage < chance:\r\n #pick a random value and replace\r\n if len(value)== 0:\r\n continue\r\n else:\r\n r = random.randint(0,len(value)-1)\r\n phraselist[i] = value[r-1].upper()\r\n\r\n#add the list back to the empty string with spaces\r\nfor i in range(len(phraselist)):\r\n applied += phraselist[i] + \" \"\r\n\r\n#print final transformed string\r\nprint(applied)\r\n","sub_path":"Intro to Programming/Assignments/Assignment 10/ShimJun Seob_assign10.py","file_name":"ShimJun Seob_assign10.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373376674","text":"import pdb\nimport re\nimport logging\ntry:\n from encoding import encodingConstants\n from ... import constants\nexcept:\n from encoding import encodingConstants\n import constants\n\n\nfrom lark import Lark, Transformer\nsymmetric_operators = [encodingConstants.LAND, encodingConstants.LOR]\nbinary_operators = [encodingConstants.LAND, encodingConstants.LOR, encodingConstants.UNTIL,encodingConstants.IMPLIES, encodingConstants.BEFORE, encodingConstants.STRICTLY_BEFORE]\nunary_operators = [\"X\", encodingConstants.F, encodingConstants.G, encodingConstants.LNOT, encodingConstants.ENDS]\nclass SimpleTree:\n def __init__(self, label = \"dummy\"):\n self.left = None\n self.right = None\n self.label = label\n \n def __hash__(self):\n return hash((self.label, self.left, self.right))\n \n def __eq__(self, other):\n if other == None:\n return False\n else:\n return self.label == other.label and self.left == other.left and self.right == other.right\n \n def __ne__(self, other):\n return not self == other\n \n def _isLeaf(self):\n return self.right == None and self.left == None\n \n def _addLeftChild(self, child):\n if child == None:\n return\n if type(child) is str:\n child = SimpleTree(child)\n self.left = child\n \n def _addRightChild(self, child):\n if type(child) is str:\n child = SimpleTree(child)\n self.right = child\n \n def addChildren(self, leftChild = None, rightChild = None): \n self._addLeftChild(leftChild)\n self._addRightChild(rightChild)\n \n \n def addChild(self, child):\n self._addLeftChild(child)\n \n def getAllNodes(self):\n leftNodes = []\n rightNodes = []\n \n if self.left != None:\n leftNodes = self.left.getAllNodes()\n if self.right != None:\n rightNodes = self.right.getAllNodes()\n return [self] + leftNodes + rightNodes\n\n def getAllOperators(self):\n leftOperators = []\n rightOperators = []\n if self.left is None and self.right is None:\n return []\n if not self.left is None:\n leftOperators = self.left.getAllOperators()\n if not self.right is None:\n rightOperators = self.right.getAllOperators()\n return [self.label] + leftOperators + rightOperators\n\n\n def getAllLabels(self):\n if self.left != None:\n leftLabels = self.left.getAllLabels()\n else:\n leftLabels = []\n \n if self.right != None:\n rightLabels = self.right.getAllLabels()\n else:\n rightLabels = []\n return [self.label] + leftLabels + rightLabels\n\n def __repr__(self):\n if self.left == None and self.right == None:\n return self.label\n \n # the (not enforced assumption) is that if a node has only one child, that is the left one\n elif self.left != None and self.right == None:\n return self.label + '(' + self.left.__repr__() + ')'\n \n elif self.left != None and self.right != None:\n return self.label + '(' + self.left.__repr__() + ',' + self.right.__repr__() + ')'\n\n\nclass Formula(SimpleTree):\n \n def __init__(self, formulaArg = \"dummyF\"):\n \n if not isinstance(formulaArg, str):\n self.label = formulaArg[0]\n self.left = formulaArg[1]\n try:\n self.right = formulaArg[2]\n except:\n self.right = None\n else:\n super().__init__(formulaArg)\n\n def __lt__(self, other):\n\n if self.getDepth() < other.getDepth():\n return True\n elif self.getDepth() > other.getDepth():\n return False\n else:\n if self.getNumberOfSubformulas() < other.getNumberOfSubformulas():\n return True\n elif self.getNumberOfSubformulas() > other.getNumberOfSubformulas():\n return False\n if self._isLeaf() and other._isLeaf():\n return self.label < other.label\n\n if self.right is None:\n if other.right is None:\n return self.left < other.left\n else:\n return True\n\n if not self.right is None:\n if other.right is None:\n return False\n\n return self.label < other.label\n\n \"\"\"\n normalization is an incomplete method to eliminate equivalent formulas\n \"\"\"\n\n @classmethod\n def normalize(cls, f):\n\n if f is None:\n return None\n if f._isLeaf():\n return Formula([f.label, f.left, f.right])\n fLeft = Formula.normalize(f.left)\n fRight = Formula.normalize(f.right)\n\n\n if fLeft.label == \"true\":\n if f.label in [encodingConstants.LOR, encodingConstants.F, encodingConstants.G, encodingConstants.X]:\n return Formula(\"true\")\n if f.label in [encodingConstants.LAND, encodingConstants.IMPLIES]:\n return Formula.normalize(fRight)\n if f.label == encodingConstants.LNOT:\n return Formula(\"false\")\n if f.label == encodingConstants.UNTIL:\n return Formula.normalize(Formula([encodingConstants.F, fRight, None]))\n\n if fLeft.label == \"false\":\n if f.label in [encodingConstants.IMPLIES, encodingConstants.LNOT]:\n return Formula[\"true\"]\n if f.label in [encodingConstants.LAND, encodingConstants.F, encodingConstants.G, encodingConstants.X]:\n return Formula[\"false\"]\n if f.label in [encodingConstants.LOR, encodingConstants.UNTIL]:\n return Formula.normalize(fRight)\n\n if not fRight is None:\n if fRight.label == \"true\":\n if f.label in [encodingConstants.LOR, encodingConstants.IMPLIES, encodingConstants.UNTIL]:\n return Formula(\"true\")\n if f.label in [encodingConstants.LAND]:\n return Formula.normalize(fLeft)\n\n if fRight.label == \"false\":\n if f.label in []:\n return Formula[\"true\"]\n if f.label in [encodingConstants.LAND, encodingConstants.UNTIL]:\n return Formula[\"false\"]\n if f.label in [encodingConstants.LOR]:\n return Formula.normalize(fLeft)\n if f.label in [encodingConstants.IMPLIES]:\n return Formula.normalize(Formula([encodingConstants.LNOT, fRight, None]))\n\n # elimiting p&p and similar\n if fLeft == fRight:\n if f.label in [encodingConstants.LAND, encodingConstants.UNTIL, encodingConstants.LOR]:\n return Formula.normalize(fLeft)\n elif f.label in [encodingConstants.BEFORE]:\n return Formula([encodingConstants.BEFORE, fLeft, Formula(\"true\")])\n elif f.label in [encodingConstants.IMPLIES]:\n return Formula(\"true\")\n\n # eliminating Fp U p and !p U p\n if f.label == encodingConstants.UNTIL:\n if fLeft.label == encodingConstants.F or fLeft.label == encodingConstants.LNOT:\n fLeftLeft = Formula.normalize(fLeft.left)\n if fLeftLeft == fRight:\n return Formula.normalize(Formula([encodingConstants.F, fLeftLeft]))\n if fRight.label == encodingConstants.F:\n fRightLeft = Formula.normalize(fRight.left)\n if fRightLeft == fLeft:\n return fRight\n\n if f.label == encodingConstants.F and fLeft.label == encodingConstants.F:\n return fLeft\n\n # if there is p | q, don't add q | p\n if f.label in symmetric_operators and not fLeft < fRight:\n return Formula([f.label, fRight, fLeft])\n\n return Formula([f.label, fLeft, fRight])\n\n\n @classmethod\n def convertTextToFormula(cls, formulaText):\n\n f = Formula()\n try:\n formula_parser = Lark(r\"\"\"\n ?formula: _binary_expression\n |_unary_expression\n | constant\n | variable\n !constant: \"true\"\n | \"false\"\n _binary_expression: binary_operator \"(\" formula \",\" formula \")\"\n _unary_expression: unary_operator \"(\" formula \")\" \n !variable: NAME\n !binary_operator: \"and\" | \"or\" | \"->\" | \"until\" | \"B\" | \"before\"\n !unary_operator: \"eventually\" | \"G\" | \"neg\" | \"X\" | \"E\" \n %import common.SIGNED_NUMBER\n %import common.WS \n %import common.ESCAPED_STRING\n %import common.CNAME -> NAME\n %ignore WS \n \"\"\", start = 'formula')\n \n \n tree = formula_parser.parse(formulaText)\n #logging.debug(tree.pretty())\n \n except Exception as e:\n logging.error(\"can't parse formula %s\" %formulaText)\n logging.error(\"error: %s\" %e)\n \n \n f = TreeToFormula().transform(tree)\n return f\n\n # used for compatibility with DSLTL grammar\n def reFormat(self):\n lb = \"{\"\n rb = \"}\"\n if self._isLeaf():\n label_array = self.label.split(\"_\")\n if len(label_array) >= 3 and label_array[-3] == \"at\":\n # this is because of the weird thing that numbers have to be separated by comma\n formatted_label = \" \".join(label_array[:-1])\n formatted_label = formatted_label + \", \"+label_array[-1]\n return formatted_label\n if len(label_array) == 2 and label_array[0] == \"at\":\n formatted_label = \" \".join(label_array)\n return formatted_label\n else:\n return self.label\n else:\n if self.label in unary_operators:\n return lb + self.label + \" \" + self.left.reFormat() + rb\n\n if self.label in binary_operators:\n return lb + self.left.reFormat() + \" \" + self.label + \" \" + self.right.reFormat() + rb\n\n\n def prettyPrint(self, top=False):\n if top is True:\n lb = \"\"\n rb = \"\"\n else:\n lb = \"(\"\n rb = \")\"\n if self._isLeaf():\n return self.label\n if self.label in unary_operators:\n return lb + self.label +\" \"+ self.left.prettyPrint() + rb\n\n if self.label in binary_operators:\n return lb + self.left.prettyPrint() +\" \"+ self.label +\" \"+ self.right.prettyPrint() + rb\n\n \n \n def getAllVariables(self):\n allNodes = list(set(self.getAllNodes()))\n return [ node for node in allNodes if node._isLeaf() == True ]\n def getDepth(self):\n if self.left == None and self.right == None:\n return 0\n leftValue = -1\n rightValue = -1\n if self.left != None:\n leftValue = self.left.getDepth()\n if self.right != None:\n rightValue = self.right.getDepth()\n return 1 + max(leftValue, rightValue)\n \n def getNumberOfSubformulas(self):\n return len(self.getSetOfSubformulas())\n \n def getSetOfSubformulas(self):\n if self.left == None and self.right == None:\n return [self]\n leftValue = []\n rightValue = []\n if self.left != None:\n leftValue = self.left.getSetOfSubformulas()\n if self.right != None:\n rightValue = self.right.getSetOfSubformulas()\n return list(set([self] + leftValue + rightValue))\n\n \n\nclass TreeToFormula(Transformer):\n def formula(self, formulaArgs):\n return Formula(formulaArgs)\n def variable(self, varName):\n\n\n\n varStr = str(varName[0])\n varDesc = varStr.split(\"_\")\n\n # if the variables are not as expected\n\n if varDesc[0] == constants.PICK and not (varDesc[2] in constants.COLORS and varDesc[3] in constants.SHAPES):\n if varDesc[2] in constants.SHAPES or varDesc[3] in constants.COLORS:\n swap = varDesc[2]\n varDesc[2] = varDesc[3]\n varDesc[3] = swap\n\n if varDesc[2] in constants.SHAPES and varDesc[3] in constants.SHAPES:\n varDesc[2] = \"x\"\n if varDesc[2] in constants.COLORS and varDesc[3] in constants.COLORS:\n varDesc[3] = \"x\"\n varStr = \"_\".join(varDesc)\n\n return Formula([varStr, None, None])\n def constant(self, arg):\n if str(arg[0]) == \"true\":\n connector = encodingConstants.LOR\n elif str(arg[0]) == \"false\":\n connector = encodingConstants.LAND\n return Formula([connector, Formula([\"x0\", None, None]), Formula([encodingConstants.LNOT, Formula([\"x0\", None, None] ), None])])\n \n def binary_operator(self, args):\n return str(args[0])\n def unary_operator(self, args):\n return str(args[0])\n \n \n \n ","sub_path":"encoding/utils/SimpleTree.py","file_name":"SimpleTree.py","file_ext":"py","file_size_in_byte":13302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290915370","text":"import pandas as pd\n\n\ndef find_line_index(table_title,data_list, start=0):\n for index, line in enumerate(data_list[start:]):\n if table_title in line:\n break\n return index + start\n\ndef filter_lines(line_str, data_list, start=0):\n for index, line in reversed(list(enumerate(data_list))):\n if index == start:\n break\n if line_str in line:\n del data_list[index]\n return data_list\n\ndef convert_to_rows(l,alpha_pr_col):\n number_columns = 10\n\n tags = ['f', 'F', 'h','H','k','K','n','N','R','w','s','S','t','T','r']\n\n l = l.replace('r',' ')\n for i in tags:\n l = l.replace(i, '')\n\n split_data = l.split(' ')\n\n for index, elem in reversed(list(enumerate(split_data))):\n if elem == '':\n del split_data[index]\n if elem == '-':\n split_data[index] = -1.0\n else:\n split_data[index] = float(split_data[index])\n return split_data\n\ndef take_table(txt_table):\n iter = []\n objective = []\n inf_pr = []\n inf_du = []\n lg_mu = []\n abs_d = []\n lg_rg = []\n alpha_du = []\n alpha_pr = []\n ls = []\n\n alpha_pr_ind = 8\n\n for index, r in enumerate(txt_table):\n data_row = convert_to_rows(r, alpha_pr_ind)\n if data_row:\n cols= data_row\n iter.append(cols[0])\n objective.append(cols[1])\n inf_pr.append(cols[2])\n inf_du.append(cols[3])\n lg_mu.append(cols[4])\n abs_d.append(cols[5])\n lg_rg.append(cols[6])\n alpha_du.append(cols[7])\n alpha_pr.append(cols[8])\n ls.append(cols[9])\n\n table_data = {'iter': iter, 'objective': objective,\n 'inf_pr': inf_pr, 'inf_du': inf_du,\n 'lg_mu': lg_mu, 'abs_d': abs_d,\n 'lg_rg': lg_rg, 'alpha_du': alpha_du,\n 'alpha_pr': alpha_pr, 'ls': ls}\n return table_data\n\ndef parse_IPOPT_log(file, output_file):\n content = []\n table_title = 'iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls'\n table_end = 'Number of Iterations....:'\n slack_line = 'Slack too small'\n with open(file) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n table_start = find_line_index(table_title, content)\n content = filter_lines(table_title,content, start=table_start+1)\n content = filter_lines(slack_line, content)\n table_data = content[table_start+1:table_end]\n table = take_table(table_data)\n\n df = pd.DataFrame(table).set_index('iter')\n\n df.to_pickle(output_file)\n return df","sub_path":"pythonProject/Parsing/IPOPT_Parse.py","file_name":"IPOPT_Parse.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"336616541","text":"#!/usr/bin/python3\n\nimport re\nimport sys\n\ntmp = {}\n\nfor line in sys.stdin:\n line = line.strip()\n\n words = re.split(\"[ *$&#/\\t\\n\\f\\\"\\'\\\\,.:;?!\\[\\](){}<>~\\-_]\", line.lower())\n\n for word in words:\n if len(word):\n tmp[word] = tmp.get(word, 0) + 1\n\nfor k, v in tmp.items():\n print (k + '\\t' + str(v))\n\n","sub_path":"lab02/p1/mapper2.py","file_name":"mapper2.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"251055646","text":"import os\r\nimport re\r\nimport time\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nclass Samples:\r\n\tbase = \"/shares_bgfs/margres_lab/Devils/BEE_Probe_Data\"\r\n\tbatch_ids = [f\"{base}/Capture1_6-11-21/rename_key.csv\", f\"{base}/Capture2_7-29-21/rename_key.csv\", f\"{base}/Capture3/rename_key.csv\", f\"{base}/Capture4/rename_key.csv\", f\"{base}/Capture5/rename_key.csv\"]\r\n\tprelim = batch_ids[:2]\r\n\r\n\r\n\tdef __init__(self,\r\n\t\tsample_csv_path=\"/work_bgfs/d/dgallinson/data/pheno_data/master_corrections.csv\",\r\n\t\tid_paths=[]\r\n\t\t):\r\n\t\tfull_df = pd.read_csv(sample_csv_path)\r\n\t\tif id_paths:\r\n\t\t\tids = self.__L_ID(id_paths)\r\n\t\t\tself.sample_df = full_df[full_df[\"Library number\"].isin(ids)].reset_index(drop=True)\r\n\t\telse:\r\n\t\t\tself.sample_df = full_df\r\n\t\tself.factor_key = {\"pheno\": [], \"key\": [], \"val\": []}\r\n\r\n\r\n\tdef __str__(self):\r\n\t\treturn repr(self.sample_df)\r\n\r\n\r\n\tdef chip_sort(self, df, num_only=False, reset_index=True):\r\n\t\tif num_only:\r\n\t\t\tsort_df = df.sort_values(by=\"Microchip\", key=lambda x: x.str[-6:])\r\n\t\telse:\r\n\t\t\tsort_df = df.sort_values(by=\"Microchip\")\r\n\t\tif reset_index:\r\n\t\t\tsort_df.reset_index(drop=True, inplace=True)\r\n\t\treturn sort_df\r\n\r\n\r\n\tdef compare_to(self, df_path, cols, sep=\",\", id_col=\"Microchip\", inplace=True):\r\n\t\tcompare_df = pd.read_csv(df_path, sep=sep)\r\n\t\tcompare_df = compare_df[[id_col] + cols]\r\n\t\tcompare_df.rename(columns={id_col: \"Microchip\"}, inplace=True)\r\n\t\tcombined = self.sample_df.merge(compare_df, how=\"left\", on=\"Microchip\")\r\n\t\tif inplace:\r\n\t\t\tself.sample_df = combined\r\n\t\telse:\r\n\t\t\treturn combined\r\n\r\n\r\n\tdef count(self, col, pattern):\r\n\t\treturn len(self.sample_df[self.sample_df[col] == pattern])\r\n\r\n\r\n\tdef count_groups(self, col, sorted=True):\r\n\t\tgroups = self.sample_df.groupby(col)[col].count()\r\n\t\treturn groups.sort_index()\r\n\r\n\r\n\tdef count_pairs(self, pair_N):\r\n\t\tpairs = self.get_pairs()\r\n\t\treturn len(pairs[pairs == pair_N])\r\n\r\n\r\n\tdef extract_year(self, col, replace=True):\r\n\t\tdates = self.sample_df[col]\r\n\t\tyears = dates[~dates.isna()].astype(str)\r\n\t\tif (years.str.find(\".\") != -1).any():\r\n\t\t\tyears = years.str[:4]\r\n\t\telse:\r\n\t\t\tyears = years.str[-4:]\r\n\t\tnans = dates[dates.isna()]\r\n\t\tyears = pd.concat([years, nans]).sort_index()\r\n\t\tif not replace:\r\n\t\t\treturn years\r\n\t\tself.sample_df[col] = years\r\n\r\n\r\n\tdef factor_file(self, path, cols=[], silent=False):\r\n\t\tself.update_factors(cols)\r\n\t\tfactor_df = pd.DataFrame(self.factor_key)\r\n\t\tif not cols:\r\n\t\t\tsubset = factor_df\r\n\t\telse:\r\n\t\t\tsubset = factor_df[factor_df[\"pheno\"].isin(cols)]\r\n\t\tif subset.empty:\r\n\t\t\tif not silent:\r\n\t\t\t\tprint(\"No factors to print\")\r\n\t\t\treturn None\r\n\t\tprint(f\"Writing factor file to: {path}\")\r\n\t\tsubset.to_csv(path, index=False, sep=\"\\t\")\r\n\r\n\r\n\tdef fast_stats(self):\r\n\t\tprint(f\"Samples: {self.sample_df.shape[0]}\")\r\n\t\tprint(f\"Hosts: {self.sample_df[self.sample_df['Tissue'] == 'Host'].shape[0]}\")\r\n\t\tprint(f\"Tumors: {self.sample_df[self.sample_df['Tissue'] == 'Tumour'].shape[0]}\")\r\n\t\tprint(f\"Missing microchips: {self.nan_sum('Microchip', print_flag=False)}\")\r\n\t\tprint(f\"Singles: {self.count_pairs(1)}\")\r\n\t\tprint(f\"Pairs: {self.count_pairs(2)}\")\r\n\t\tprint(f\"Triplets: {self.count_pairs(3)}\")\r\n\r\n\r\n\t# col can be a string or list of strings for clustering on multiple columns\r\n\tdef get_groups(self, col):\r\n\t\treturn self.sample_df.groupby(col)\r\n\r\n\r\n\tdef get_pairs(self, pair_count=None):\r\n\t\tpairs = self.sample_df.groupby(\"Microchip\")[\"Microchip\"].count()\r\n\t\tif pair_count:\r\n\t\t\tpairs = pairs[pairs == pair_count]\r\n\t\treturn pairs\r\n\r\n\r\n\tdef nan_rows(self, col, tissue=\"both\"):\r\n\t\tif tissue == \"both\":\r\n\t\t\tsubset = self.sample_df\r\n\t\telse:\r\n\t\t\tsubset = self.sample_df[self.sample_df[\"Tissue\"] == tissue]\r\n\t\treturn subset[subset[col].isna()]\r\n\r\n\r\n\tdef nan_sum(self, col, tissue=\"both\", print_flag=True):\r\n\t\tif tissue == \"both\":\r\n\t\t\tsubset = self.sample_df\r\n\t\telse:\r\n\t\t\tsubset = self.sample_df[self.sample_df[\"Tissue\"] == tissue]\r\n\t\ttotal = len(subset)\r\n\t\tnans = subset[col].isna().sum()\r\n\t\tif print_flag and isinstance(nans, np.integer):\r\n\t\t\tprint(f\"{col}: {nans}/{total} ({nans/total*100:.2f}%) NaNs\")\r\n\t\telse:\r\n\t\t\treturn nans\r\n\r\n\r\n\t# Definition: \r\n\t# \t\t\t Functions to normalize and transform data with sample_df. These can be strung together to transform and then normalize data, such as\r\n\t# \t\t\t the log normal transform: samples.normalize(\"my_col\", method=\"log\"); samples.normalize(\"my_col\"). If the original data within a col\r\n\t# \t\t\t wish to be maintained, the new_col argument should be used, with subsequent normalizations/transformations targetting that new col.\r\n\t# \t\t\t Normalized cols are named as new cols based on the ordering of \"cols\" and \"new_cols\", e.g: cols=[\"col1\", \"col2\", \"col3\"] and\r\n\t# \t\t\t new_cols=[\"col1_log\", \"col2_norm\", \"col3_lognorm\"]\r\n\t# Arguments:\r\n\t# \t\t\t cols: \tthe columns to be normalized \r\n\t# \t\t\t method: \tmethod of normalization/transformation \r\n\t# \t\t\t inplace: \twhether or not to return data or change the column in place \r\n\t# \t\t\t new_cols: put the normalized/transformed data into new columns \r\n\t# Returns:\r\n\t# \t\t\t The normalized/transformed column(s) or None if inplace or new_cols is specified\r\n\t# Steps:\r\n\t# \t\t\t Depends on the chosen method\r\n\t# Gotchas:\r\n\t# \t\t\t inplace versus new_cols: if both are specified, new_cols will override inplace=True\r\n\t# TODO:\r\n\t# \t\t\t Test all methods by hand to ensure the norms/transforms are working.\r\n\t# \t\t\t Consider an \"undo\" method (maintaining raw values with new_cols may be sufficient)\r\n\tdef normalize(self, cols, method=\"standard\", inplace=True, new_cols=[]):\r\n\t\tmethods = [\"inv_logit\", \"log\", \"logit\", \"standard\"]\r\n\t\tcols = cols if isinstance(cols, list) else [cols]\r\n\t\tdf_cols = self.sample_df[cols]\r\n\t\tif method == \"standard\":\r\n\t\t\tmeans = df_cols.mean()\r\n\t\t\tstds = df_cols.std()\r\n\t\t\tnorms = (df_cols - means) / stds\r\n\t\telif method == \"inv_logit\":\r\n\t\t\tnorms = np.exp(df_cols) / (1 + np.exp(df_cols))\r\n\t\telif method == \"log\":\r\n\t\t\tnorms = np.log(df_cols)\r\n\t\telif method == \"logit\":\r\n\t\t\tnorms = np.log(df_cols / (1 - df_cols))\r\n\t\telse:\r\n\t\t\tprint(f\"*normalize() ERROR* Method \\\"{method}\\\" does not exist! Use: {', '.join(methods)}\")\r\n\t\t\treturn None\r\n\t\tif not inplace:\r\n\t\t\treturn norms\r\n\t\telif new_cols:\r\n\t\t\tdf_cols = new_cols\r\n\t\telse:\r\n\t\t\tdf_cols = cols\r\n\t\tfor i in range(len(df_cols)):\r\n\t\t\tself.sample_df[df_cols[i]] = norms[cols[i]]\r\n\r\n\r\n\tdef pair_rows(self, pair_count):\r\n\t\tpairs = self.get_pairs()\r\n\t\tpairs = pairs[pairs == pair_count]\r\n\t\tchips = pairs.index.values\r\n\t\treturn self.sample_df[self.sample_df[\"Microchip\"].isin(chips)]\r\n\r\n\r\n\tdef plot_col(self, col, outpath, plot=\"hist\"):\r\n\t\tplots = [\"hist\"]\r\n\t\tdata = self.sample_df[col]\r\n\t\tif plot == \"hist\":\r\n\t\t\tq25, q75 = np.percentile(data, [0.25, 0.75])\r\n\t\t\tbin_width = 2 * ((q75 - q25) / len(data)**(-1/3))\r\n\t\t\tbins = round((data.max() - data.min()) / bin_width)\r\n\t\t\tplt.hist(data, bins=bins, density=True)\r\n\t\telse:\r\n\t\t\tprint(f\"*plot_col() ERROR* Plot \\\"{plot}\\\" does not exist! Use: {', '.join(plots)}\")\r\n\t\t\treturn None\r\n\t\tplt.ylabel(col)\r\n\t\tplt.savefig(outpath)\r\n\t\tplt.close()\r\n\t\tprint(f\"Plot saved to: {outpath}\")\r\n\r\n\r\n\tdef print_col(self, col):\r\n\t\tprint(self.sample_df[col].to_list())\r\n\r\n\r\n\tdef print_cols(self, cols):\r\n\t\tprint(self.sample_df[cols])\r\n\r\n\r\n\tdef print_factor_key(self):\r\n\t\tif not self.factor_key[\"key\"]:\r\n\t\t\tprint(\"No factors have been created\")\r\n\t\t\treturn None\r\n\t\tfactor_df = pd.DataFrame(self.factor_key)\r\n\t\tfor pheno in factor_df[\"pheno\"].unique():\r\n\t\t\tcurrent = factor_df[factor_df[\"pheno\"] == pheno]\r\n\t\t\tprint(pheno)\r\n\t\t\tprint(current[[\"key\", \"val\"]].to_string(index=False))\r\n\r\n\r\n\tdef subset(self, col, pattern):\r\n\t\tself.sample_df = self.sample_df[self.sample_df[col] == pattern]\r\n\r\n\r\n\tdef subset_pairs(self):\r\n\t\tindices = self.sample_df.groupby(\"Microchip\")[\"Microchip\"].count()\r\n\t\tindices = indices[indices > 1].index\r\n\t\tself.sample_df = self.sample_df[self.sample_df[\"Microchip\"].isin(indices)]\r\n\r\n\r\n\tdef subset_non_nan(self, col):\r\n\t\tna = self.sample_df[col].isna()\r\n\t\tif isinstance(col, list):\r\n\t\t\tna = na.any(axis=1)\r\n\t\tself.sample_df = self.sample_df[~na]\r\n\r\n\r\n\tdef summarize_pairs(self):\r\n\t\tpairs = self.get_pairs()\r\n\t\tprint(\"PAIR\\tCOUNT\")\r\n\t\tfor pair in pd.unique(pairs):\r\n\t\t\tprint(f\"{pair}\\t{len(pairs[pairs == pair])}\")\r\n\r\n\r\n\tdef to_factor(self, col, inplace=True):\r\n\t\tphenotype = self.sample_df[col]\r\n\t\tunique_vals = sorted(phenotype.dropna().unique(), key=str.casefold)\r\n\t\tphenotype = phenotype.fillna(\"NA\")\r\n\t\tfactors = [i for i in range(len(unique_vals))]\r\n\t\tphenotype = phenotype.replace(unique_vals, factors)\r\n\t\tself.factor_key[\"pheno\"] += [col for i in range(len(unique_vals))]\r\n\t\tself.factor_key[\"key\"] += list(unique_vals)\r\n\t\tself.factor_key[\"val\"] += factors\r\n\t\tif inplace:\r\n\t\t\tself.sample_df[col] = phenotype\r\n\t\telse:\r\n\t\t\treturn phenotype\r\n\r\n\r\n\tdef ATOMM_pheno(self, outpath, cols):\r\n\t\ttissue = pd.unique(self.sample_df[\"Tissue\"])\r\n\t\tif len(tissue) < 2:\r\n\t\t\tprint(f\"*WARNING* the sample DF only contains a single tissue ({tissue[0]}), and thus pairs cannot be generated.\")\r\n\t\t\tprint(\"If this is a TumorSamples object, please specify the argument tissue=\\\"both\\\"\")\r\n\t\t\tprint(\"Exiting\")\r\n\t\t\treturn None\r\n\t\tself.subset_pairs()\r\n\t\tself.__handle_triplets()\r\n\t\tself.to_vcf_chip()\r\n\t\ttumor = self.chip_sort(self.sample_df[self.sample_df[\"Tissue\"] == \"Tumour\"], num_only=True)\r\n\t\tself.subset(\"Tissue\", \"Host\")\r\n\t\thost = self.chip_sort(self.sample_df, num_only=True)\r\n\t\thost.rename(columns={\"Microchip\": \"host_chip\"}, inplace=True)\r\n\t\thost[\"tumor_chip\"] = tumor[\"Microchip\"]\r\n\t\tself.sample_df = host\r\n\t\tself.to_pheno(outpath, cols, id_cols=[\"host_chip\", \"tumor_chip\"], vcf_chip=False)\r\n\r\n\r\n\tdef to_pheno(self, outpath, cols, id_cols=[\"Microchip\"], vcf_chip=True):\r\n\t\tif vcf_chip:\r\n\t\t\tself.to_vcf_chip()\r\n\t\tpheno_df = self.sample_df[id_cols + cols]\r\n\t\tpheno_df = pheno_df.fillna(\"NA\")\r\n\t\toutdir = outpath[:outpath.rfind(\".\")]\r\n\t\tfactor_out = f\"{outdir}_FACTOR_KEY.txt\"\r\n\t\tself.factor_file(factor_out, cols=cols, silent=True)\r\n\t\tprint(f\"Writing phenotype file to: {outpath}\")\r\n\t\tpheno_df.to_csv(outpath, index=False, sep=\"\\t\")\r\n\r\n\r\n\tdef to_vcf_chip(self, inplace=True):\r\n\t\tchips = self.sample_df[\"Microchip\"]\r\n\t\tchips = chips.str[-6:]\r\n\t\ttissue = self.sample_df[\"Tissue\"].str[0]\r\n\t\ttissue[tissue == \"T\"] += self.sample_df[\"TumourNumber\"]\r\n\t\tchips = tissue + \"-\" + chips\t\r\n\t\tif inplace:\r\n\t\t\tself.sample_df[\"Microchip\"] = chips\r\n\t\telse:\r\n\t\t\treturn chips\r\n\r\n\r\n\tdef undo_factor(self, pheno):\r\n\t\tfactor_df = pd.DataFrame(self.factor_key)\r\n\t\tfactor_pheno = factor_df[factor_df[\"pheno\"] == pheno]\r\n\t\tsample_pheno = self.sample_df[pheno]\r\n\t\tundone_factors = sample_pheno.replace(factor_pheno[\"val\"].to_list(), factor_pheno[\"key\"].to_list())\r\n\t\tself.sample_df[pheno] = undone_factors\r\n\t\tself.factor_key = factor_df[factor_df[\"pheno\"] != pheno].reset_index(drop=True).to_dict(orient='list')\r\n\r\n\r\n\tdef unique(self, col):\r\n\t\treturn self.sample_df[col].unique()\r\n\r\n\r\n\tdef update_factors(self, pheno_list):\r\n\t\tfor pheno in pheno_list:\r\n\t\t\tif pheno not in self.factor_key[\"pheno\"]:\r\n\t\t\t\tcontinue\r\n\t\t\tself.undo_factor(pheno)\r\n\t\t\tself.to_factor(pheno)\r\n\r\n\r\n\tdef write_pairs(self, outpath):\r\n\t\tpair_df = self.sample_df[[\"Microchip\", \"Tissue\", \"TumourNumber\"]].copy()\r\n\t\tpair_df = self.__handle_triplets(pair_df)\r\n\t\tpairs = pair_df.groupby(\"Microchip\")[\"Microchip\"].count()\r\n\t\tpair_loc = pairs[pairs > 1].index\r\n\t\tpair_df = pair_df[pair_df[\"Microchip\"].isin(pair_loc)]\r\n\t\ttumors = pair_df[pair_df[\"Tissue\"] == \"Tumour\"].sort_values(by=\"Microchip\").reset_index(drop=True)\r\n\t\ttumors = self.to_vcf_chip(tumors)\r\n\t\thosts = pair_df[pair_df[\"Tissue\"] == \"Host\"].sort_values(by=\"Microchip\").reset_index(drop=True)\r\n\t\thosts = self.to_vcf_chip(hosts)\r\n\t\ttumor_host = pd.concat([hosts, tumors], axis=1)\r\n\t\ttumor_host.columns = [\"Hosts\", \"Tumors\"]\r\n\t\ttumor_host.to_csv(outpath, index=False, sep=\"\\t\")\r\n\r\n\r\n\tdef __handle_triplets(self):\r\n\t\ttriplets = self.get_pairs(3)\r\n\t\tif triplets.empty:\r\n\t\t\treturn None\r\n\t\tprint(f\"Found {len(triplets)} triplets, please select only 1 of the duplicates to retain:\")\r\n\t\tremove_loc = []\r\n\t\tfor index in triplets.index:\r\n\t\t\tcurrent_triplet = self.sample_df[self.sample_df[\"Microchip\"] == index]\r\n\t\t\tif len(current_triplet[current_triplet[\"Tissue\"] == \"Tumour\"]) > 1:\r\n\t\t\t\tdup = current_triplet[current_triplet[\"Tissue\"] == \"Tumour\"]\r\n\t\t\telse:\r\n\t\t\t\tdup = dup = current_triplet[current_triplet[\"Tissue\"] == \"Host\"]\r\n\t\t\tdup_i = list(dup.index.values)\r\n\t\t\tprint(f'{dup[[\"Microchip\", \"Library number\", \"AnimalName\", \"Tissue\", \"YOB\", \"TrappingDate\", \"Site\", \"TumourID\", \"TumourNumber\"]]}')\r\n\t\t\tretain_loc = input(f\"Please select the index to be retained ({dup_i[0]} or {dup_i[1]}): \")\r\n\t\t\tdup_i.remove(int(retain_loc))\r\n\t\t\tremove_loc += dup_i\r\n\t\tself.sample_df = self.sample_df.loc[~self.sample_df.index.isin(remove_loc)]\r\n\r\n\r\n\tdef __isnum(self, x):\r\n\t\ttry:\r\n\t\t\tint(x)\r\n\t\texcept:\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\r\n\tdef __L_ID(self, paths):\r\n\t\tL_IDs = []\r\n\t\tfor path in paths:\r\n\t\t\tif path[-4:] == \".csv\":\r\n\t\t\t\tfnames = pd.read_csv(path)\r\n\t\t\t\tfnames = fnames[\"original\"].to_list()\r\n\t\t\telse:\r\n\t\t\t\tfnames = os.listdir(path)\r\n\t\t\tL_IDs += list(set([file[:5] for file in fnames]))\r\n\t\treturn L_IDs\r\n\r\n\r\n\r\n\r\nclass TumorSamples(Samples):\r\n\tdef __init__(self,\r\n\t\tsample_csv_path=\"/work_bgfs/d/dgallinson/data/pheno_data/master_corrections.csv\",\r\n\t\ttumor_csv_path=\"/work_bgfs/d/dgallinson/data/pheno_data/originals/RTumourTableRodrigo.csv\",\r\n\t\tid_paths=[],\r\n\t\ttissue=\"Host\"\r\n\t\t):\r\n\t\tsuper().__init__(sample_csv_path, id_paths)\r\n\t\ttumor_df_full = pd.read_csv(tumor_csv_path, dtype={\"TumourNumber\": str})\r\n\t\tif tissue != \"both\":\r\n\t\t\tself.sample_df = self.sample_df[self.sample_df[\"Tissue\"] == tissue]\r\n\t\tmicrochips = self.sample_df[\"Microchip\"].unique()\r\n\t\tself.tumor_df = tumor_df_full[tumor_df_full[\"Microchip\"].isin(microchips)]\r\n\t\tself.over_mmax = {\"Microchip\": [], \"volume\": []}\r\n\t\tself.failed_date_delta = []\r\n\r\n\t\r\n\t# ANALYSIS PHENOTYPE\r\n\tdef estimate_age(self):\r\n\t\ttmp_tumor_df = self.tumor_df[[\"Microchip\", \"TrapDate\", \"TumourDepth\", \"TumourLength\", \"TumourWidth\"]].copy()\r\n\t\ttmp_tumor_df[\"TrapDate\"] = pd.to_datetime(tmp_tumor_df[\"TrapDate\"])\r\n\t\tdates_df = self.__init_tumor_date(tmp_tumor_df)\r\n\t\tYOB = self.sample_df.set_index(\"Microchip\")[\"YOB\"]\r\n\t\tYOB = YOB[~YOB.index.duplicated(keep=\"first\")]\r\n\t\tYOB = pd.to_datetime(YOB)\r\n\t\tmerged = pd.concat([dates_df, YOB[dates_df.index]], axis=1)\r\n\t\tinfection_age = (merged[\"init_tumor_date\"] - merged[\"YOB\"]).dt.days\r\n\t\tinfection_age.name = \"infection_age\"\r\n\t\tinfection_age = infection_age.reset_index()\r\n\t\tself.sample_df = self.sample_df.merge(infection_age, how=\"left\", on=\"Microchip\")\r\n\r\n\r\n\tdef fast_stats_tumor(self):\r\n\t\tunique_samples = len(self.sample_df[\"Microchip\"].unique())\r\n\t\tfound_in_tumor = len(self.tumor_df[\"Microchip\"].unique())\r\n\t\tchip_date_cluster = self.tumor_df.groupby([\"Microchip\", \"TrapDate\"])[\"TrapDate\"].count()\r\n\t\ttrap_cluster = chip_date_cluster.groupby(\"Microchip\").count()\r\n\r\n\t\tprint(f\"Found in tumor DB: {(found_in_tumor/unique_samples)*100:.1f}% ({found_in_tumor}/{unique_samples})\")\r\n\t\tprint(f\"1 trap: {len(trap_cluster[trap_cluster == 1])}\")\r\n\t\tprint(f\"More than 1 trap: {len(trap_cluster[trap_cluster > 1])}\")\r\n\r\n\r\n\t# === ANALYSIS PHENOTYPE ===\r\n\t# Definition: \r\n\t# \t\t\t Obtain a proxy for devil survival similar to Margres et al. (2018; DOI: 10.1111/mec.14853) using a tumor growth back calculation\r\n\t# \t\t\t derived from Wells et al. (2017; DOI: 10.1111/ele.12776). Only devils with 2 trapping events and day_cutoff days between those\r\n\t# \t\t\t trapping events are included. After the volume of the largest tumor (from tumors recorded on the earliest trap date) is obtained,\r\n\t# \t\t\t the back calculation is used to find the number of days since the tumor was 3 mm^3. This date is then subtracted from the last\r\n\t# \t\t\t date the devil was trapped to get survival in days.\r\n\t# Arguments:\r\n\t# \t\t\t day_cutoff: \t the minimum number of days a devil must be trapped between two trapping events for inclusion in the analysis \r\n\t# \t\t\t verbose_merge: whether the intermediate calculation values should be added to the samples DF. Mostly for debugging \r\n\t# Returns:\r\n\t# \t\t\t None. Adds at least a \"devil_survival_days\" column to the samples DF\r\n\t# Steps:\r\n\t# \t\t\t 1) Obtain the difference in days from first to last trapping event\r\n\t# \t\t\t 2) Filter out devils with a single trap event or with fewer days between two events which fail the day_cutoff (these are also saved)\r\n\t# \t\t\t 3) Back calculate to find the initial tumor data (see __init_tumor_date() for more details)\r\n\t# \t\t\t 4) Obtain the date for the most recent trapping event\r\n\t# \t\t\t 5) Subtract the tumor initial date from the max trap date to obtain devil survival days\r\n\t# TODO:\r\n\t# \t\t\t The calculated dates often do not match up with Margres et al. (2018) table S1.\r\n\t# \t\t\t Attempt to change the calculation using the sum of all tumor volumes on the min trap date.\r\n\t# \t\t\t Use the Compare class to test calculation similarities\r\n\tdef survival_proxy(self, day_cutoff=40, verbose_merge=False):\r\n\t\ttmp_tumor_df = self.tumor_df[[\"Microchip\", \"TrapDate\", \"TumourDepth\", \"TumourLength\", \"TumourWidth\"]].copy()\r\n\t\ttmp_tumor_df[\"TrapDate\"] = pd.to_datetime(tmp_tumor_df[\"TrapDate\"])\r\n\t\t\r\n\t\tdate_diff = tmp_tumor_df.groupby(\"Microchip\")[\"TrapDate\"].transform(lambda x: x.max() - x.min())\r\n\t\ttmp_tumor_df[\"date_diff\"] = date_diff.dt.days\r\n\t\tself.failed_date_delta = pd.Series(tmp_tumor_df[tmp_tumor_df[\"date_diff\"] < day_cutoff][\"Microchip\"].unique())\r\n\t\ttmp_tumor_df = tmp_tumor_df[tmp_tumor_df[\"date_diff\"] >= day_cutoff]\r\n\t\t\r\n\t\tdates_df = self.__init_tumor_date(tmp_tumor_df)\r\n\t\tlast_trap = tmp_tumor_df.groupby(\"Microchip\")[\"TrapDate\"].max().loc[dates_df.index]\r\n\t\tdevil_survival_days = last_trap - dates_df[\"init_tumor_date\"]\r\n\t\t\r\n\t\tcalc_df = pd.DataFrame({\r\n\t\t\t\"Microchip\": dates_df.index.values,\r\n\t\t\t\"volume (cm^3)\": np.round(dates_df[\"tumor_volume\"].values, 2),\r\n\t\t\t\"back_calc\": dates_df[\"back_calc\"].values,\r\n\t\t\t\"first_trap\": dates_df[\"min_date\"].dt.date.values,\r\n\t\t\t\"last_trap\": last_trap.dt.date.values,\r\n\t\t\t\"init_tumor_date\": dates_df[\"init_tumor_date\"].dt.date.values,\r\n\t\t\t\"devil_survival_days\": devil_survival_days.dt.days.values})\r\n\t\tif not verbose_merge:\r\n\t\t\tcalc_df = calc_df[[\"Microchip\", \"devil_survival_days\"]]\r\n\t\tself.sample_df = self.sample_df.merge(calc_df, how=\"left\", on=\"Microchip\")\r\n\r\n\r\n\tdef get_multi_sub_40(self):\r\n\t\tif len(self.failed_date_delta) == 0:\r\n\t\t\tprint(\"Please run survival_proxy() before using this method\")\r\n\t\t\treturn None\r\n\t\tsingle_trap = self.get_trap(1)\r\n\t\treturn self.failed_date_delta[~self.failed_date_delta.isin(single_trap)]\r\n\r\n\r\n\tdef get_trap(self, trap_N):\r\n\t\ttrap_nums = self.tumor_df.groupby([\"Microchip\", \"TrapDate\"])[\"TrapDate\"].count().groupby(\"Microchip\").count()\r\n\t\treturn trap_nums[trap_nums == trap_N].index\r\n\r\n\r\n\t# This fails to get the exact tumors we sequenced but gets close. Due to mismatching info\r\n\t# between the saple CSV and tumor CSV, further resolution is impossible. Once I obtain the\r\n\t# proper phenotype data I will update this method (likely to grab based on TrapDate)\r\n\tdef get_sample_tumors(self):\r\n\t\tsample_subset = self.sample_df[self.sample_df[\"Microchip\"].isin(self.tumor_df[\"Microchip\"])]\r\n\t\tsample_tumors = sample_subset[[\"Microchip\", \"TumourNumber\"]]\r\n\t\tfound_indices = []\r\n\t\tfor i in range(sample_tumors.shape[0]):\r\n\t\t\tcurrent_sample = sample_tumors.iloc[i]\r\n\t\t\tfound_index = self.tumor_df.loc[(self.tumor_df[\"Microchip\"] == current_sample[\"Microchip\"]) & (self.tumor_df[\"TumourNumber\"] == current_sample[\"TumourNumber\"])]\r\n\t\t\tfound_index = list(found_index.index.values)\r\n\t\t\tfound_indices += found_index\r\n\t\treturn self.tumor_df.loc[found_indices]\r\n\r\n\r\n\tdef no_tumor_entry(self):\r\n\t\thosts = self.sample_df[self.sample_df[\"Tissue\"] == \"Host\"]\r\n\t\thosts_not_in = hosts[~hosts[\"Microchip\"].isin(self.tumor_df[\"Microchip\"].unique())]\r\n\t\treturn hosts_not_in\r\n\r\n\r\n\tdef print_over_mmax(self):\r\n\t\tif not self.over_mmax[\"Microchip\"]:\r\n\t\t\tprint(\"No back calculations have been done yet\")\r\n\t\t\treturn None\r\n\t\tprint(\"Microchip\\tVolume (cm^3)\")\r\n\t\tN = len(self.over_mmax[\"Microchip\"])\r\n\t\tfor i in range(N):\r\n\t\t\tprint(f\"{self.over_mmax['Microchip'][i]}\\t{self.over_mmax['volume'][i]:.3f}\")\r\n\t\tprint(f\"Length: {N}\")\r\n\r\n\r\n\t# ANALYSIS PHENOTYPE\r\n\tdef tumor_count(self):\r\n\t\tnum_tumors = self.tumor_df.groupby([\"Microchip\"])[\"TumourNumber\"].unique().str.len()\r\n\t\tnum_tumors = num_tumors.to_frame().reset_index()\r\n\t\tnum_tumors = num_tumors.rename(columns={\"TumourNumber\": \"tumor_count\"})\r\n\t\tself.sample_df = self.sample_df.merge(num_tumors, how=\"left\", on=\"Microchip\")\r\n\r\n\r\n\t# Definition: \r\n\t# \t\t\t A logistic growth back calculation derived from Wells et al. (2017; DOI: 10.1111/ele.12776) which was based on data from WPP.\r\n\t# \t\t\t Output is a negative number in days representing days from the min capture date since the tumor was 3 mm^3.\r\n\t# \t\t\t The model is not accurate enough to estimate beyond day resolution, and thus rounding to the nearest day \r\n\t# \t\t\t (default round_flag) is advised. This also stores any volumes greater than mmax.\r\n\t# Arguments:\r\n\t# \t\t\t tumor_volumes: a vector/Series of tumor volumes \r\n\t# \t\t\t mmax: \t\t max tumor size in cm^3 (from growth model) \r\n\t# \t\t\t init_size: \t initial tumor size (from growth model) \r\n\t# \t\t\t alpha: \t\t scale parameter of the logistic growth curve (from growth model) \r\n\t# \t\t\t is_cm: \t\t flag if the incoming volume is in centimeters \r\n\t# \t\t\t round_flag: \t flag if the back calculated volumes should be rounded to the nearest int \r\n\t# Returns:\r\n\t# \t\t\t numpy array containing negative numbers representing days since tumor volume = 3 mm^3\r\n\t# Steps:\r\n\t# \t\t\t 1) Convert mm to cm if is_cm=True\r\n\t# \t\t\t 2) Determine volumes >= mmax, setting those volumes to NaN and storing only those microchips and volumes not already in the over_mmax dict\r\n\t# \t\t\t 3) Perform the back calculation\r\n\t# \t\t\t 4) Round the calculations to the nearest int if round_flag=True and return\r\n\t# Gotchas:\r\n\t# \t\t\t Formula domain: (0, mmax)\r\n\t# \t\t\t Formula range: (-inf, 0); as volume -> mmax, days -> -inf\r\n\tdef __growth_back_calculation(self, tumor_volumes, mmax=202, init_size=0.0003, alpha=0.03, is_cm=False, round_flag=True):\r\n\t\ttumor_volumes = tumor_volumes.copy()\r\n\t\tif not is_cm:\r\n\t\t\ttumor_volumes /= 1000\r\n\t\tover = tumor_volumes[(tumor_volumes + 1) >= mmax]\r\n\t\tif over.size > 0:\r\n\t\t\tvstring = \"volumes\" if len(over) > 1 else \"volume\"\r\n\t\t\tprint(f\"*WARNING* found {len(over)} {vstring} greater than mmax while performing the back calculation (try print_over_mmax() for details)\")\r\n\t\t\ttumor_volumes[(tumor_volumes + 1) >= mmax] = np.nan\r\n\t\tif self.over_mmax[\"Microchip\"]:\r\n\t\t\tover = over.drop(self.over_mmax[\"Microchip\"])\r\n\t\tif len(over > 0):\r\n\t\t\tself.over_mmax[\"Microchip\"] += list(over.index.values)\r\n\t\t\tself.over_mmax[\"volume\"] += list(over.values)\r\n\t\tback_calculation = np.log((mmax / (tumor_volumes.values + 1) - 1) / (mmax - init_size)) / alpha\r\n\t\tif round_flag:\r\n\t\t\tback_calculation = np.round(back_calculation)\r\n\t\treturn back_calculation\r\n\r\n\r\n\t# Definition: \r\n\t# \t\t\t Obtain the tumor volume Series for the minimum capture date with microchips as the index.\r\n\t# \t\t\t When a host has multiple tumors, different grouping functions can be used (e.g., max or sum). \r\n\t# \t\t\t If this is being used with the growth back calculation, max should set for mode. Drops rows with an NA measurement.\r\n\t# Arguments:\r\n\t#\t\t\t tumor_df: a DF with at least the following cols: Microchip, TrapDate, TumourDepth, TumourLength, TumourWidth \r\n\t# \t\t\t mode: \tmethod of selecting a single volume when multiple tumors are present at the same date \r\n\t# Returns: \r\n\t# \t\t\t a DF containing Microchip as the index and cols tumor_volume (in starting units) and min_date (representing the minimum date for a sample)\r\n\t# Steps:\r\n\t#\t\t\t 1) get min date for a sample (group on Microchip)\r\n\t# \t\t\t 2) Iterate through each of these microchips, find all rows with the min date, and generate a DF from these\r\n\t# \t\t\t 3) Obrain tumor volumes via length x width x depth\r\n\t# \t\t\t 4) Group these volumes on Microchip and select a single volume for each microchip (i.e., max, mean, sum)\r\n\t# Gotchas:\r\n\t# \t\t\t Drops rows if any NA is found in TumourDepth, TumourLength, or TumourWidth\r\n\t# TODO:\r\n\t# \t\t\t The loop is an inelegant and inefficient solution, this should be vectorized\r\n\tdef __min_cap_volume(self, tumor_df, mode=\"max\"):\r\n\t\ttumor_df = tumor_df.dropna(subset=[\"TumourDepth\", \"TumourLength\", \"TumourWidth\"])\r\n\t\tmin_date_groups = tumor_df.groupby(\"Microchip\")[\"TrapDate\"].min()\r\n\t\tmin_date_groups.name = \"min_date\"\r\n\t\tmin_dates = {\"Microchip\": min_date_groups.index, \"TrapDate\": min_date_groups.values}\r\n\t\tfirst_tumor_list = []\r\n\t\t\r\n\t\tfor i in range(len(min_dates[\"Microchip\"])):\r\n\t\t\tchip = min_dates['Microchip'][i]\r\n\t\t\tdate = min_dates['TrapDate'][i]\r\n\t\t\tchip_group = tumor_df[tumor_df[\"Microchip\"] == chip]\r\n\t\t\tfirst_tumor_list.append(chip_group[chip_group[\"TrapDate\"] == date])\r\n\t\t\r\n\t\tfirst_tumor_df = pd.concat(first_tumor_list)\r\n\t\tfirst_tumor_df[\"tumor_volume\"] = first_tumor_df[\"TumourDepth\"] * first_tumor_df[\"TumourLength\"] * first_tumor_df[\"TumourWidth\"]\r\n\t\tvolume_cluster = first_tumor_df.groupby(\"Microchip\")[\"tumor_volume\"]\r\n\t\tif mode == \"mean\":\r\n\t\t\tvolume_cluster = volume_cluster.mean()\r\n\t\telif mode == \"sum\":\r\n\t\t\tvolume_cluster = volume_cluster.sum()\r\n\t\telse:\r\n\t\t\tvolume_cluster = volume_cluster.max()\r\n\t\treturn pd.concat([volume_cluster, min_date_groups], axis=1)\r\n\r\n\r\n\t# Definition:\r\n\t# \t\t\t This is a convenience function combining the functionality of __min_cap_volume()\r\n\t# \t\t\t and __growth_back_calculation() to find the initial tumor date.\r\n\t# Arguments:\r\n\t# \t\t\t tumor_df: a DF with at least the following cols: Microchip, TrapDate, TumourDepth, TumourLength, TumourWidth \r\n\t# Returns:\r\n\t# \t\t\t a DF with Microchip as the index and the following cols: tumor_volume, min_date, back_calc, init_tumor_date\r\n\t# Steps:\r\n\t# \t\t\t 1) Run __min_cap_volume() and __growth_back_calculation\r\n\t# \t\t\t 2) Get the initial tumor date by subtracting the back calc from the min date\r\n\t# \t\t\t 3) Combine everything into a DF\r\n\tdef __init_tumor_date(self, tumor_df):\r\n\t\tvolumes = self.__min_cap_volume(tumor_df)\r\n\t\tvolumes[\"tumor_volume\"] /= 1000\r\n\t\tback_calc = self.__growth_back_calculation(volumes[\"tumor_volume\"], is_cm=True)\r\n\t\tinit_tumor_dates = volumes[\"min_date\"] - pd.to_timedelta(-1 * back_calc, unit=\"d\")\r\n\t\tinit_tumor_dates.name = \"init_tumor_date\"\r\n\t\tback_calc = pd.Series(back_calc, name=\"back_calc\", index=volumes.index)\r\n\t\treturn pd.concat([volumes, back_calc, init_tumor_dates], axis=1)\r\n\r\n\r\n\r\nclass Compare:\r\n\tdef __init__(self, df, cols):\r\n\t\tself.df = df\r\n\t\tself.cols = cols\r\n\t\tself.matched = pd.DataFrame([])\r\n\t\tself.unmatched = pd.DataFrame([])\r\n\r\n\r\n\tdef compare_strict(self):\r\n\t\tself.matched = self.df[self.df[self.cols[0]] == self.df[self.cols[1]]]\r\n\t\tself.unmatched = self.df[self.df[self.cols[0]] != self.df[self.cols[1]]]\r\n\r\n\r\n\tdef diff(self, n=1, biggest=True):\r\n\t\tfirst = self.unmatched[self.cols[0]]\r\n\t\tsecond = self.unmatched[self.cols[1]]\r\n\t\tabs_diffs = sorted((first - second).abs(), reverse=biggest)\r\n\t\treturn abs_diffs[0:n]\r\n\r\n\r\n\tdef diff_stats(self):\r\n\t\tfirst = self.unmatched[self.cols[0]]\r\n\t\tsecond = self.unmatched[self.cols[1]]\r\n\t\tabs_diffs = (first - second).abs()\r\n\t\tmean = abs_diffs.mean()\r\n\t\tmedian = abs_diffs.median()\r\n\t\treturn mean, median\r\n\r\n\r\n\tdef stats(self):\r\n\t\tif self.matched.empty and self.unmatched.empty:\r\n\t\t\tprint(\"Please run a comparison method before printing stats!\")\r\n\t\t\treturn None\r\n\t\tmean, median = self.diff_stats()\r\n\t\tprint(f\"Matched: {self.matched.shape[0]}\")\r\n\t\tprint(f\"Unmatched: {self.unmatched.shape[0]}\")\r\n\t\tprint(\"----- UNMATCHED -----\")\r\n\t\tprint(f\"Biggest difference: {self.diff()[0]}\")\r\n\t\tprint(f\"Smallest difference: {self.diff(biggest=False)[0]}\")\r\n\t\tprint(f\"Mean difference: {mean:.1f}\")\r\n\t\tprint(f\"Median difference: {median}\")","sub_path":"utility/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":27757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506801082","text":"\"\"\"A demo app that can be used as a skeleton template for bootstrapping the\ncreation of simple Plotly Dash apps.\n\n\"\"\"\n\nfrom collections import Counter\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, State, Output, Event\n\n\napp = dash.Dash(__name__)\napp.title = 'Dash Skeleton'\n\n# If you need to run your app locally\n#app.scripts.config.serve_locally = True\n\napp.layout = html.Div([\n dcc.Markdown(\"\"\"\n# Dash Demo app\n\nThis demo app counts the number of characters in the text box and updates a bar\nchart with their frequency as you type.\"\"\"),\n html.Div(\n dcc.Textarea(\n id='text-input',\n value='Type some text into me!',\n style={'width':'40em', 'height': '5em'},\n )\n ),\n html.Div('Sort by:'),\n dcc.RadioItems(\n id='sort-type',\n options=[\n {'label': 'Frequency', 'value': 'frequency'},\n {'label': 'Character code', 'value': 'code'},\n ],\n value='frequency'\n ),\n html.Div('Normalize character case?'),\n dcc.RadioItems(\n id='normalize',\n options=[\n {'label': 'No', 'value': 'no'},\n {'label': 'Yes', 'value': 'yes'},\n ],\n value='no'\n ),\n dcc.Graph(id='graph', className='red')\n])\n\n\n@app.callback(\n Output('graph', 'figure'), # Output\n [Input('text-input', 'value'), # Inputs\n Input('sort-type', 'value'),\n Input('normalize', 'value')], \n [], # States\n [] # Events\n)\ndef callback(text, sort_type, normalize):\n if normalize == 'yes':\n text = text.lower()\n\n if sort_type == 'frequency':\n sort_func = lambda x:-x[1]\n else:\n sort_func = lambda x:ord(x[0])\n \n counts = Counter(text)\n\n if len(counts) == 0:\n x_data = []\n y_data = []\n else:\n x_data, y_data = zip(*sorted(\n counts.items(),\n key=sort_func))\n return {\n 'data': [\n {'x': x_data, 'y':y_data, 'type': 'bar', 'name': 'trace1'},\n ],\n 'layout': {\n 'title': 'Frequency of Characters',\n 'height': '600',\n 'font': {'size': 16}\n },\n }\n\n\nif __name__ == '__main__':\n # To make this app publicly available, supply the parameter host='0.0.0.0'.\n # You should also disable debug mode in production.\n app.run_server(debug=True, port=8051)\n","sub_path":"dash_skeleton.py","file_name":"dash_skeleton.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314435976","text":"import matplotlib.patches as patches\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import LogNorm\nfrom mpl_toolkits.mplot3d import Axes3D\n\n############################################################################################################################\n\n\"\"\" draw ellipse associated to a covariance matrix \"\"\"\ndef draw_ellipse(position, covariance, ax=None, **kwargs):\n \"\"\"Draw an ellipse with a given position and covariance\"\"\"\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 5):\n ax.add_patch(patches.Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))\n\n\"\"\" plot data and corresponding GMM prediction \"\"\"\ndef plot_gmm(means, covariances, weights, X, scale = None, labels=None, ax=None):\n \n num_features = np.shape(X)[1]\n if num_features == 2:\n\n ax = ax or plt.gca()\n\n w_factor = 0.3 / weights.max()\n for pos, covar, w in zip(means, covariances, weights):\n draw_ellipse(pos, covar, alpha=w * w_factor)\n \n if labels is not None:\n if scale is not None:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=scale, cmap='viridis', zorder=0)\n else:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=0) \n else:\n if scale is not None:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=scale, cmap='viridis', zorder=0)\n else:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s= 40, zorder=0)\n ax.set_xlim(min(min(X[:, 0]), min(means[:,0]) ), max(max(X[:, 0]), max(means[:,0]) ))\n ax.set_ylim(min(min(X[:, 1]), min(means[:,1]) ), max(max(X[:, 1]), max(means[:,1]) ))\n plt.ylabel(r'$z_2$',fontsize=18)\n plt.xlabel(r'$z_1$',fontsize=18)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n elif num_features ==3:\n fig = plt.figure()\n ax = ax or Axes3D(fig)\n ax.scatter3D(X[:, 0], X[:, 1], X[:,2], c=labels, s=scale, cmap='viridis')\n\n'''\n'''\ndef plot_kmeans(centers, X, labels):\n plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')\n plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5);","sub_path":"clustering_plotting_tools.py","file_name":"clustering_plotting_tools.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325168554","text":"'''\r\nChapter 1\r\n Task 2\r\n Enhanced Unit Calculator\r\n\r\n Write a program which can convert kilometers to miles, vice versa; kilograms to pounds and vice versa; Celsius to Fahrehnheit, vice versa;\r\n\r\n\r\n '''\r\ndef print_menu():\r\n print('1.Kilometers to Miles.')\r\n print('2. Miles to Kilometers.')\r\n print('3. Kilograms to Pounds.')\r\n print('4. Pounds to Kilograms.')\r\n print('5. Degrees Celsius to degrees Fahrenheit.')\r\n print('6. Degrees Fahrenheit to degrees Celsius.')\r\n\r\ndef km_miles():\r\n km=float(input('Please enter the distance in Kilometers: '))\r\n miles=km/1.609\r\n\r\n print('The above given distance is {0} Miles.'.format(miles))\r\n\r\ndef miles_km():\r\n miles=float(input('Please enter the distance in Miles: '))\r\n km=miles*1.609\r\n\r\n print('The above given distance is {0} Kilometers.'.format(km))\r\n\r\ndef kg_pounds():\r\n kg=float(input('Please enter the desired mass in Kilograms: '))\r\n pounds=kg*2.204\r\n\r\n print('The above given mass is {0} pounds.'.format(pounds))\r\n\r\ndef pounds_kg():\r\n pounds=float(input('Please enter the desired mass in Pounds: '))\r\n kg=pounds/2.204\r\n\r\n print('The above given mass is {0} Kilograms.'.format(kg))\r\n\r\ndef c_f():\r\n c=float(input('Please enter the desired temperature in degrees Celsius: '))\r\n f=c*(9/5)+32\r\n\r\n print('The above given temperature is {0} degrees Fahrenheit.'.format(f))\r\n\r\ndef f_c():\r\n f=float(input('Please enter the desired temperature in degrees Fahrenheit: '))\r\n c=(f-32)*(5/9)\r\n\r\n print('The above given temperature value is {0} fegrees Celsius.'.format(c))\r\n\r\nif __name__=='__main__':\r\n while True:\r\n print_menu()\r\n choice=input('Which conversion would you like? ')\r\n \r\n if choice=='1':\r\n km_miles()\r\n \r\n if choice=='2':\r\n miles_km()\r\n \r\n if choice=='3':\r\n kg_pounds()\r\n \r\n if choice=='4':\r\n pounds_kg()\r\n \r\n if choice=='5':\r\n c_f()\r\n \r\n if choice=='6':\r\n f_c()\r\n\r\n answer=input('Do you want to exit? (y) for yes. ')\r\n if answer=='y':\r\n break\r\n \r\n \r\n","sub_path":"Excersises/Chapter 1/5.Give Exit power to the user/Enhanced Unit Calculator.py","file_name":"Enhanced Unit Calculator.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282075089","text":"# coding: utf-8\n\nfrom ext.db import db\n\n# '''\n# CREATE TABLE `terminal_log` (\n# `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增id',\n# `no` varchar(80) NOT NULL COMMENT '日志序列号',\n# `terminal_id` bigint(20) NOT NULL COMMENT '终端id(terminal.id)',\n# `operate_time` datetime NOT NULL COMMENT '操作时间',\n# `operate_type` varchar(20) NOT NULL COMMENT '操作类���(1001:激活、1002:认证)',\n# `operate_result` varchar(20) DEFAULT NULL COMMENT '操作结果(101:、102:)',\n# `request_ip` varchar(50) DEFAULT NULL COMMENT '请求ip',\n# `request_desc` varchar(255) DEFAULT NULL COMMENT '请求描述',\n# `duration` int(20) DEFAULT NULL COMMENT '处理时长(单位:ms)',\n# `remark` varchar(255) DEFAULT NULL COMMENT '备注',\n# PRIMARY KEY (`id`)\n# ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='终端操作日志';\n# '''\n\nclass TerminalLog(db.Model):\n __tablename__ = 'terminal_log'\n\n id = db.Column(db.BIGINT(), nullable=False, autoincrement=True, primary_key=True) #, comment='自增id')\n no = db.Column(db.VARCHAR(80), nullable=False) #, comment='日志序列号')\n terminal_id = db.Column(db.BIGINT(), nullable=False,) # comment='终端id(terminal.id)')\n operate_time = db.Column(db.DATETIME, nullable=False) #, comment='操作时间')\n operate_type = db.Column(db.VARCHAR(20), nullable=False) #, comment='操作类型(1001:激活、1002:认证)')\n operate_result = db.Column(db.VARCHAR(20), default=None) #, comment='操作结果(101:、102:)')\n request_ip = db.Column(db.VARCHAR(50), default=None) #, comment='请求ip')\n request_desc = db.Column(db.VARCHAR(255), default=None) #, comment='请求描述')\n duration = db.Column(db.INT(), default=None) #, comment='处理时长(单位:ms)')\n remark = db.Column(db.VARCHAR(255), default=None) #, comment='备注')\n\n def __init__(self, no, terminal_id, operate_time, operate_type, operate_result=None, request_ip=None, request_desc=None, duration=None, remark=None):\n self.no = no\n self.terminal_id = terminal_id\n self.operate_time = operate_time\n self.operate_type = operate_type\n self.operate_result = operate_result\n self.request_ip = request_ip\n self.request_desc = request_desc\n self.duration = duration\n self.remark = remark\n\n # 映射关系\n relation = [\n ('operate_time', 'action_request_time',),\n ('operate_type', 'action_type'),\n ('operate_result', 'action_result'),\n ('request_ip', 'action_request_ip'),\n ('request_desc', 'action_request_body',),\n ('duration', 'action_process_duration',),\n ('remark', 'action_desc',),\n ]\n\n def __repr__(self):\n dct = self.__dict__\n lst = ['{}: {}'.format(k, dct[k]) for k in dct]\n return '< {}\\n{} >\\n'.format(self.__class__.__name__, '\\n'.join(lst))\n","sub_path":"sdwan/hive_bee_action_tracker(Operation Logger)/src/sql_module/terminal_log.py","file_name":"terminal_log.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"37569977","text":"from browser import document, html, ajax, window, alert\nimport urllib.request\nfrom javascript import JSON\n\nmoogleid = '904000103'\ncontainerid = '904000115'\ncontainername = {'en':'Prism Moogle', 'ko':'프리즘 모그리', 'zh':'信賴度莫古利(原晶)', 'es':'Moguri prismático', 'de':'Prismamogry', 'fr':'Prismog'}\n\ndef loadlanguage(event):\n global code\n code = event.target.value\n for b in lang_buttons:\n if 'active' in b.classList:\n b.classList.remove('active')\n event.target.classList.add('active')\n\ndef moogles(event):\n filestatus.style.display = 'inline'\n if 'ion-md-checkmark-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-checkmark-circle')\n filestatus.classList.add('ion-md-hourglass')\n elif 'ion-md-close-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-close-circle')\n filestatus.classList.add('ion-md-hourglass')\n languages = ['de', 'es', 'fr', 'ko', 'zh']\n try:\n reqw = ajax.ajax()\n requ = ajax.ajax()\n if code in languages:\n reqw.open('GET', 'https://raw.githubusercontent.com/lyrgard/ffbeEquip/master/static/GL/data_' + code + '.json', False)\n requ.open('GET', 'https://raw.githubusercontent.com/lyrgard/ffbeEquip/master/static/GL/units_' + code + '.json', False)\n else:\n reqw.open('GET', 'https://raw.githubusercontent.com/lyrgard/ffbeEquip/master/static/GL/data.json', False)\n requ.open('GET', 'https://raw.githubusercontent.com/lyrgard/ffbeEquip/master/static/GL/units.json', False)\n reqw.bind('complete', reqwComplete)\n requ.bind('complete', requComplete)\n requ.send()\n reqw.send()\n except:\n alert(\"Error: Can't connect to ffbebuilder!\")\n if 'ion-md-checkmark-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-checkmark-circle')\n filestatus.classList.add('ion-md-close-circle')\n elif 'ion-md-hourglass' in filestatus.classList:\n filestatus.classList.remove('ion-md-hourglass')\n filestatus.classList.add('ion-md-close-circle')\n reader = window.FileReader.new()\n try:\n reader.readAsText(choosefile.files[0])\n reader.bind('load', process_inventory)\n if 'ion-md-close-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-close-circle')\n filestatus.classList.add('ion-md-checkmark-circle')\n elif 'ion-md-hourglass' in filestatus.classList:\n filestatus.classList.remove('ion-md-hourglass')\n filestatus.classList.add('ion-md-checkmark-circle')\n\n except IndexError:\n alert('Error: No file was selected!')\n if 'ion-md-checkmark-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-checkmark-circle')\n filestatus.classList.add('ion-md-close-circle')\n elif 'ion-md-hourglass' in filestatus.classList:\n filestatus.classList.remove('ion-md-hourglass')\n filestatus.classList.add('ion-md-close-circle')\n except:\n alert('Error: Invalid file!')\n if 'ion-md-checkmark-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-checkmark-circle')\n filestatus.classList.add('ion-md-close-circle')\n elif 'ion-md-hourglass' in filestatus.classList:\n filestatus.classList.remove('ion-md-hourglass')\n filestatus.classList.add('ion-md-close-circle')\n \n\ndef process_inventory(event):\n global inventory\n try:\n inventory = JSON.parse(event.target.result) \n fusedict = {}\n mooglelist = [u['tmrId'] for u in inventory if u['id'] == moogleid]\n containerlist = [u['tmrId'] for u in inventory if u['id'] == containerid if u['tmr'] <1000]\n ownedunits = [u['id'][:-1] for u in inventory if u['id'][0] != '9' if u['tmr'] <1000 ]\n for tmrId in mooglelist:\n tmrName = weapondict[tmrId]['name']\n tmrUnit = weapondict[tmrId]['tmrUnit']\n unitName = unitdict[tmrUnit]['name']\n if tmrUnit[:-1] in ownedunits:\n if tmrId in fusedict.keys():\n fusedict[tmrId]['count'] += 1\n else:\n fusedict.update({tmrId:{'tmr':tmrName, 'unit':unitName, 'count':1, 'unitid':tmrUnit}})\n if tmrId in containerlist:\n if tmrId in fusedict.keys():\n fusedict[tmrId]['count'] += 1\n else:\n fusedict.update({tmrId:{'tmr':tmrName, 'unit':containername[code], 'count':1, 'unitid':containerid}}) \n fusesort = sorted(fusedict.items(), key = lambda item: item[1]['tmr'])\n document['results'].clear()\n table = html.TABLE(Class='table table-hover')\n tbody = html.TBODY()\n table <= tbody\n for l in fusesort:\n tbody <= html.TR([html.TD(l[1]['tmr']), html.TD('\\tx' + str(l[1]['count']) + '\\t'), html.TD(l[1]['unit']), html.TD(html.IMG(src='https://ffbeequip.com/img/units/unit_icon_' + l[1]['unitid'] + '.png' ) )] )\n document['results'] <= table\n except:\n alert('Error: Invalid file!')\n if 'ion-md-checkmark-circle' in filestatus.classList:\n filestatus.classList.remove('ion-md-checkmark-circle')\n filestatus.classList.add('ion-md-close-circle')\n elif 'ion-md-hourglass' in filestatus.classList:\n filestatus.classList.remove('ion-md-hourglass')\n filestatus.classList.add('ion-md-close-circle') \n\ndef reqwComplete(request):\n global weapondict\n weapondata = JSON.parse(request.responseText)\n weapondict = {w['id']:{'name':w['name'], 'tmrUnit':w['tmrUnit']} for w in weapondata if 'tmrUnit' in w.keys()}\n\ndef requComplete(request):\n global unitdict\n unitdict = JSON.parse(request.responseText)\n \n\ncode = 'en'\ndocument['privatebrowsing'].style.display = 'none'\ntoolbar = html.DIV(id='toolbar', Class='container-fluid mx-3 mb-1')\ndocument <= toolbar\nlangbar = html.DIV(id=\"langbar\", Class=\"btn-group btn-group-toggle px-3\", data_toggle=\"buttons\", role=\"group\", aria_label=\"Select language\")\nlanglabel = html.BUTTON(Class='btn-secondary rounded-left', disabled=True)\nlangtext = document.createTextNode('Select language: ')\nlanglabel <= langtext\nlangbar <= langlabel\ntoolbar <= langbar\nfilestatus = html.I(Class='icon ion-md-checkmark-circle ml-3')\nfilestatus.style.display = 'none'\ndocbar = html.DIV(id='docbar', Class='btn-group px-3')\ndoclabel = html.BUTTON(Class='btn-secondary rounded-left', disabled=True)\ndoctext = document.createTextNode('Select file: ')\ndoclabel <= doctext\ndocbar <= doclabel\ntoolbar <= docbar\nchoosefile = html.INPUT(type='file', Class='btn btn-outline-primary')\ndocbar <= choosefile\nbuttonrun = html.BUTTON(id='buttonrun', Class='btn btn-primary px-3')\nbuttontext = document.createTextNode('Show moogles!')\nbuttonrun <= buttontext\ntoolbar <= buttonrun\ntoolbar <= filestatus\n\nresults = html.DIV(id='results', Class='container-fluid')\ndocument <= results\n\nfooter = html.DIV(id='footer', Class='container-fluid')\nhomebutton = html.A(href='https://bosoneando.github.io', Class='btn btn-primary')\nhomelabel = document.createTextNode('Home')\nhomeicon = html.I(Class='icon ion-md-home pr-3')\nhomebutton <= homeicon\nhomebutton <= homelabel\nfooter <= homebutton\nhelpbutton = html.A(href='https://bosoneando.github.io/mooglefuser/help.html', Class='btn btn-primary')\nhelplabel = document.createTextNode('Help')\nhelpicon = html.I(Class='icon ion-md-help-circle pr-3')\nhelpbutton <= helpicon\nhelpbutton <= helplabel\nfooter <= helpbutton\naboutbutton = html.A(href='https://bosoneando.github.io/mooglefuser/about.html', Class='btn btn-primary')\naboutlabel = document.createTextNode('About')\nabouticon = html.I(Class='icon ion-md-information-circle pr-3')\naboutbutton <= abouticon\naboutbutton <= aboutlabel\nfooter <= aboutbutton\ngitbutton = html.A(href='https://github.com/bosoneando/mooglefuser', Class='btn btn-primary')\ngitlabel = document.createTextNode('See code on GitHub')\ngiticon = html.I(Class='icon ion-logo-github pr-3')\ngitbutton <= giticon\ngitbutton <= gitlabel\nfooter <= gitbutton\nbuilderbutton = html.A(href='https://ffbeequip.com/', Class='btn btn-primary')\nbuilderlabel = document.createTextNode('Visit ffbeequip')\nbuildericon = html.I(Class='icon ion-md-cog pr-3')\nbuilderbutton <= buildericon\nbuilderbutton <= builderlabel\nfooter <= builderbutton\ndocument <= footer\n\nunitdict = {}\nweapondict = {}\ninventory = {}\nflags = {'de':'germany', 'en':'great-britain', 'es':'spain', 'fr':'france', 'ko':'south-korea','zh':'china'}\nlang_buttons = []\nfor l, f in flags.items():\n if l == 'en':\n elt = html.BUTTON(Class='btn btn-outline-primary active', type='radio', name='lang', value=l, checked='true', data_toggle='button', title=l)\n else:\n elt = html.BUTTON(Class='btn btn-outline-primary', type='radio', name='lang', value=l, data_toggle='button', title=l)\n txt = html.IMG(src='https://img.icons8.com/color/24/000000/' + flags[l] + '.png', value=l)\n elt <= txt\n elt.bind('click', loadlanguage)\n langbar.appendChild(elt)\n lang_buttons.append(elt)\n\n\n\nbuttonrun.bind('click', moogles)\n","sub_path":"mooglefuser.py","file_name":"mooglefuser.py","file_ext":"py","file_size_in_byte":9290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433601898","text":"import string\nfrom wordcloud import WordCloud\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\nclass EmojiCloud:\n def __init__(self,\n font_path='Symbola.ttf',\n color='yellow'):\n self.font_path = font_path\n self.color = color\n self.word_cloud = self.initialize_wordcloud()\n self.emoji_probability = None\n\n def initialize_wordcloud(self):\n word_cloud = WordCloud(font_path=self.font_path,\n width=2000,\n height=1000,\n background_color='white',\n random_state=42,\n collocations=False)\n return word_cloud\n\n def color_func(self, word, font_size, position, orientation, random_state=None,\n **kwargs):\n hue_saturation = {\n 'yellow': '42, 88%',\n 'blue': '194, 49%',\n 'green': '159, 42%',\n 'grey': '45, 2%'\n }.get(self.color)\n\n current_emoji_probability = self.emoji_probability[word]\n if current_emoji_probability >= 0.20:\n opacity = 50\n else:\n opacity = 75 - current_emoji_probability/0.2 * 5\n return f\"hsl({hue_saturation},{opacity}%)\"\n\n def generate(self, emojis):\n emoji_frequencies = Counter(emojis)\n total_count = len(emojis)\n self.emoji_probability = {emoji: count/total_count for emoji, count in emoji_frequencies.items()}\n wc = self.word_cloud.generate_from_frequencies(emoji_frequencies)\n plt.imshow(wc.recolor(color_func=self.color_func, random_state=42),\n interpolation=\"bilinear\")\n plt.axis(\"off\")","sub_path":"gists/emoji_cloud_class.py","file_name":"emoji_cloud_class.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630964810","text":"from sqlalchemy.types import TypeDecorator, LargeBinary\nimport msgpack\n\nfrom alpaca.common.persistence.types.mutable_dictionary import (\n MutableDictionary,\n)\n\n\nclass SerializedDictionary(TypeDecorator):\n\n impl = LargeBinary\n encoding = 'utf-8'\n\n def process_bind_param(self, value, dialect):\n if value is not None:\n value = msgpack.packb(value.data, encoding=self.encoding)\n return value\n\n def process_result_value(self, value, dialect):\n if value is not None:\n value = msgpack.unpackb(value, encoding=self.encoding)\n return value\n\n\nMutableDictionary.associate_with(SerializedDictionary)\n","sub_path":"alpaca/common/persistence/types/serialized_dictionary.py","file_name":"serialized_dictionary.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"320601344","text":"# Write your code below this line 👇\n\nimport math\n\n\ndef prime_checker(number):\n prime = True\n if number <= 1:\n prime = False\n for divisor in range(2, math.ceil(math.sqrt(number + 1))):\n if (number % divisor == 0):\n prime = False\n if prime:\n print(\"It's a prime number. \")\n else:\n print(\"It's not a prime number. \")\n\n\n# Write your code above this line 👆\n\n# Do NOT change any of the code below👇\nn = int(input(\"Check this number: \"))\nprime_checker(number=n)\n","sub_path":"Day_8_82_Interactive_Coding_Exercise_Prime_Number_Checker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"300051830","text":"import random\r\nimport numpy as np\r\nfrom nprime.pyprime import miller_rabin\r\nimport json\r\n\r\nfirstBinary = ''\r\nsecondBinary = ''\r\nxorData = []\r\nfirstBinaryArr = []\r\nsecondBinaryArr = []\r\nproNum1Arr = []\r\n\r\nfor x in range(50000):\r\n numList = []\r\n\r\n def checkPrime():\r\n count = 0\r\n while count < 4:\r\n num = str(random.randint(1000000000, 2000000000))\r\n last = num[len(num) - 1]\r\n if int(last) != 0 and int(last) != 5 and int(last) % 2 != 0:\r\n num = int(num)\r\n\r\n prime = miller_rabin(num, 40)\r\n\r\n if prime:\r\n # print(num)\r\n # print(\"YES\")\r\n numList.append(num)\r\n count += 1\r\n # else:\r\n # print(\"NO\")\r\n\r\n # count += 1\r\n if count == 2:\r\n break\r\n\r\n def getBinary():\r\n checkPrime()\r\n # print(numList)\r\n # print()\r\n\r\n # BASE 10 Operations\r\n # print('IN BASE 10:')\r\n onebin = numList[0]\r\n twobin = numList[1]\r\n proNum = onebin * twobin\r\n\r\n results = [proNum, onebin, twobin]\r\n return results\r\n\r\n getBinaryValues = getBinary()\r\n\r\n # Collect the binary values and their product\r\n proNum1 = str(getBinaryValues[0])\r\n firstBinary = str(getBinaryValues[1])\r\n secondBinary = str(getBinaryValues[2])\r\n\r\n firstBinaryArr.append(firstBinary)\r\n secondBinaryArr.append(secondBinary)\r\n proNum1Arr.append(proNum1)\r\n\r\n# print(percentData, len(percentData))\r\n\r\nf = open(\"week4.txt\", \"w\")\r\nfor x in range(len(proNum1Arr)):\r\n f.write(firstBinaryArr[x] + ' ' + secondBinaryArr[x] + ' ' + proNum1Arr[x] + '\\n')\r\nf.close()\r\n\r\n# listFile = []\r\n# with open('myFile.txt') as f:\r\n# for line in f:\r\n# listFile.append(line.strip())\r\n \r\n# listFileline = (listFile[0]).split(' ')\r\n# print(listFileline)\r\n# f.writelines(str(percentData))\r\n# for x in range(len(percentData)):\r\n# f.write(percentData[x])\r\n\r\n# f = open(\"myFile.txt\", \"r\")\r\n# print(f.read())\r\n# f.close()","sub_path":"MyPython/nitro.py","file_name":"nitro.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"488962413","text":"def hessian_free_newton(oracle: NewtonOracle, x: np.array, tolerance, max_iter, c1, display=True):\n benchmark = tolerance * norm(oracle.grad(x)) ** 2\n\n message = \"iterations_exceeded\"\n for i in range(max_iter + 1):\n func_val = oracle.func(x)\n grad = oracle.grad(x)\n grad_norm = norm(grad)\n\n # recorder.make_record_opt(func_val, grad_norm, x_k)\n\n if not valid_value(func_val):\n if display:\n print(\"computational error because of func_val: {}\".format(func_val))\n message = \"computational_error\"\n break\n\n if grad_norm ** 2 <= benchmark:\n message = \"success\"\n break\n\n hess = oracle.hess(x)\n\n theta = min(0.5, grad_norm ** 0.5)\n # print(\"theta = {}\".format(theta))\n direction, _ = cg(hess, -grad, -grad, theta)\n # print(\"np.dot(dir, grad) = {}\".format(np.dot(direction, grad)))\n\n while np.dot(direction, grad) >= 0:\n print(\"theta = {}\".format(theta))\n theta /= 10\n direction, _ = cg(hess, -grad, direction, theta)\n if np.invert(valid_value(direction)).any():\n if display:\n print(\"computational error because of invalid direction: {}\".format(direction))\n message = \"computational_error\"\n break\n\n if message == \"computational_error\":\n break\n\n max_alpha = get_alpha_max(x, direction)\n # print(\"alpha max = {}\".format(max_alpha))\n alpha = armijo_backtracking(oracle.func, grad, direction, x, get_alpha_max(x, direction), c1)\n # print(\"alpha = {}\".format(alpha))\n if not valid_value(alpha):\n if display:\n print(\"computational error because of alpha: {}\".format(alpha))\n message = \"computational_error\"\n break\n\n x = x + direction * alpha\n\n return x, message # , recorder.history\n\n\ndef conjugate_gradients(matvec, b, x_0, tolerance, max_iter=None):\n \"\"\"\n Solves system Ax=b using Conjugate Gradients method.\n Parameters\n ----------\n matvec : function\n Implement matrix-vector product of matrix A and arbitrary vector x\n b : 1-dimensional np.array\n Vector b for the system.\n x_0 : 1-dimensional np.array\n Starting point of the algorithm\n tolerance : float\n Epsilon value for stopping criterion.\n Stop optimization procedure and return x_k when:\n ||Ax_k - b||_2 <= tolerance * ||b||_2\n max_iter : int, or None\n Maximum number of iterations. if max_iter=None, set max_iter to n, where n is\n the dimension of the space\n Returns\n -------\n x_star : np.array\n The point found by the optimization procedure\n \"\"\"\n x = np.copy(x_0)\n\n if max_iter is None:\n max_iter = len(b)\n\n benchmark = tolerance * norm(b)\n Ax = matvec(x)\n g = Ax - b\n d = -g\n Ad = matvec(d)\n for _ in range(max_iter + 1):\n if norm(g) < benchmark:\n break\n x, Ax, g, d, Ad = _do_conjugate_grad_iter(x, Ax, g, d, Ad, b, matvec)\n\n return x\n\n\ndef _do_conjugate_grad_iter(x, Ax, g, d, Ad, b, matvec):\n \"\"\"Helper function for conjugate gradient method for solving equations\"\"\"\n alpha = np.dot(g, g) / np.dot(Ad, d)\n x_next = x + alpha * d\n Ax_next = Ax + alpha * Ad\n\n g_next = Ax_next - b\n d_next = -g_next + np.dot(g_next, g_next) / np.dot(g, g) * d\n Ad_next = matvec(d_next)\n\n return x_next, Ax_next, g_next, d_next, Ad_next\n","sub_path":"practical assignments/task 3/hessian_free_newton_stuff.py","file_name":"hessian_free_newton_stuff.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"296057329","text":"\nclass Create_Prism(object):\n\n def __init__(self, length, width, height, ID):\n self.length = length\n self.width = width\n self.ID = ID\n self.height = height\n\n def __str__(self):\n strprint = \"\\nPrism ID: \" + str(self.ID) + \" units\\n\"\n strprint += \"Length: \"+ str(self.length) + \" units\\n\"\n strprint += \"Width: \" + str(self.width) + \" units\\n\"\n strprint += \"Surface Area: \" + str(2 * ((self.width * self.length) + (self.height * self.length) + (self.height * self.width))) + \" units\\n\"\n strprint += \"Volume: \" + str((self.height) * (self.width) * (self.length)) + \" units\"\n strprint += \"\\n\"\n return strprint\n\n\nlistofboxes = []\ntotalBoxes = 1\nmenu = \"\"\"\n 0 - Create A Prism\n 1 - Print a Prism\n 2 - Print all Prisms\n 3 - Exit\n \"\"\"\n\nprint(menu)\n\noption = input(\"\\nWhat would you like to do? (0-3): \")\n\nwhile option != \"3\":\n\n \n if option == \"0\":\n length = int(input(\"Length: \"))\n width = int(input(\"Width: \"))\n height = int(input(\"Height: \"))\n ID = totalBoxes\n\n if totalBoxes == 1:\n prism = Create_Prism(length, width, height, ID)\n listofboxes.append(prism)\n elif totalBoxes == 2:\n prism_2 = Create_Prism(length, width, height, ID)\n listofboxes.append(prism_2)\n elif totalBoxes == 3:\n prism_3 = Create_Prism(length, width, height, ID)\n listofboxes.append(prism_3)\n totalBoxes += 1\n if option == \"1\":\n thisIsTrue = 1\n while thisIsTrue:\n try:\n if listofboxes:\n print(\"There are \" + str(len(listofboxes)) + \" boxes already created.\\n\")\n else:\n print(\"You have not created any prisms yet.\")\n the_prism = int(input(\"Which one, (0-3) 0 being the first: \"))\n print(listofboxes[the_prism])\n thisIsTrue = False\n except (IndexError, ValueError):\n print(\"That prism does not exist.\")\n if option == \"2\":\n if listofboxes:\n for i in listofboxes:\n print(i)\n else:\n print(\"There are no boxes to print\")\n\n print(menu)\n option = input(\"\\nWhat would you like to do?: \")\n\n","sub_path":"Excercise 2.py","file_name":"Excercise 2.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"72039729","text":"# reference http://www.cnblogs.com/huangcong/archive/2011/08/29/2158268.html\n# https://www.codecademy.com/en/tracks/python\n# \n# http://www.linuxidc.com/Linux/2012-02/53764.htm\n# \n# http://blog.csdn.net/andy812110/article/details/43153895\n# print(\"hello world\")\n\n# a = input(\"a\")\n\n# 题目描述\n\n# 请用python编写一个计算器的控制台程序,支持加减乘除、乘方、括号、小数点,运算符优先级为括号>乘方>乘除>加减,同级别运算按照从左向右的顺序计算。\n\n# 输入描述\n\n# 数字包括\"0123456789\",小数点为\".\",运算符包括:加(\"+\"),减(\"-\"),乘(\"*\"),除(\"/\"),乘方(\"^\"),括号(\"()\")\n# 需要从命令行参数读入输入,例如提交文件为main.py,可以用python main.py 1+2-3+4的方式进行调用\n# 输入需要支持空格,即 python main.py 1 + 2 - 3 + 4 也需要程序能够正确给出结果\n# 输出描述\n\n# 数字需要支持小数点,输出结果取10位有效数字,有效数字位数不足时不需要补0\n# 对于格式不合法(例如括号不匹配,等等)的输入,输出 FORMAT ERROR\n# 对于不符合运算符接收的参数范围(例如)的输入,输出VALUE ERROR\n# 对于不在输入描述内的输入,输出INPUT ERROR\n# 限制\n\n# 所有测试用例中参与运算的非零数字输入的绝对值范围保证在 10^9-10^(-10) 之内, 应该输出运算结果时非零运算结果绝对值也保证在该范围内\n\n# 样例\n\n# 输入: 1 + 2 - 3 + 4\n\n# 输出: 4\n\n# 输入: 1 + 2 - 3 + 1 / 3\n\n# 输出: 0.3333333333\n\n# 输入: 1 + + 2\n\n# 输出: FORMAT ERROR\n\n# 输入: 1 / 0\n\n# 输出: VALUE ERROR\n\n# 输入: a + 1\n\n# 输出: INPUT ERROR\n\n\n# print(a)\n\ndelimiter = ''\n# mylist = ['Brazil', 'Russia', 'India', 'China']\n# print (delimiter.join(mylist))\n\nimport sys\n\nprint(__name__)\n# print (\"脚本名:\", sys.argv[0])\n# for i in range(1, len(sys.argv)):\n# print (\"参数\", i, sys.argv[i] )\n# print(delimiter.join(sys.argv[1:]))\n# print(eval(sys.argv[1]))\n# \ninput_string = delimiter.join(sys.argv[1:])\n\nprint()\n\n# print(input_string)\n\n\n\n# check parence\n\nbrackets_pairs = True\nfor x in input_string:\n\tif x == '(' :\n\t\tbrackets_pairs = False\n\t# elif brackets_pairs == False and x==')':\n\telif x==')':\n\t\tif brackets_pairs == False :\n\t\t\tbrackets_pairs = True\n\t\telse :\n\t\t\tbrackets_pairs = False\n\t\t\tbreak\n# print(brackets_pairs)\nif not brackets_pairs :\n\tprint(\"FORMAT ERROR\")\n\tsys.exit()\n\t# return\n\nprint(eval(input_string))\n","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180463711","text":"import os\n\nfolder_mama = r'C:\\Users\\Victor\\Desktop\\Tiktok' + '\\\\'\n\nfor folder, dir, files in os.walk(folder_mama):\n #print(folder.split())\n arr_folder = folder.split()\n arr_folder.pop(0)\n separator = ''\n nume_grafica = separator.join(arr_folder)\n #print(nume_grafica)\n for file in os.listdir(folder):\n #print(file)\n os.chdir(folder)\n #print(os.getcwd())\n nume_fisier_intreg = file.split('.')\n nume_fisier = nume_fisier_intreg[0]\n #print(nume_fisier)\n new_name = '{} {}.jpg'.format(nume_fisier, nume_grafica)\n print(new_name)\n #os.rename(file, new_name)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"100187961","text":"from unittest import TestCase, main\nfrom selenium import webdriver\nfrom pyunitreport import HTMLTestRunner\nfrom google_page import GooglePage\n\nclass GoogleTest(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome(executable_path='./chromedriver.exe')\n\n def test_search(self):\n google = GooglePage(self.driver)\n google.open()\n google.search(\"Platzi\")\n\n self.assertEqual('Platzi', google.keyword)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n\n\nif __name__ == \"__main__\":\n main(verbosity=2, testRunner=HTMLTestRunner(\n output=\"google\", report_name=\"test_google\"))\n","sub_path":"pom/test_google.py","file_name":"test_google.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"331732517","text":"model = dict(\n type='FCOSMono3D',\n pretrained='open-mmlab://detectron2/resnet101_caffe',\n backbone=dict(\n type='ResNet',\n depth=101,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=False),\n norm_eval=True,\n style='caffe'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n start_level=1,\n add_extra_convs='on_output',\n num_outs=5,\n relu_before_extra_convs=True),\n bbox_head=dict(\n type='FCOSMono3DHead',\n num_classes=10,\n in_channels=256,\n stacked_convs=2,\n feat_channels=256,\n use_direction_classifier=True,\n diff_rad_by_sin=True,\n pred_attrs=True,\n pred_velo=True,\n dir_offset=0.7854, # pi/4\n strides=[8, 16, 32, 64, 128],\n group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo\n cls_branch=(256, ),\n reg_branch=(\n (256, ), # offset\n (256, ), # depth\n (256, ), # size\n (256, ), # rot\n () # velo\n ),\n dir_branch=(256, ),\n attr_branch=(256, ),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n loss_dir=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_attr=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_centerness=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n norm_on_bbox=True,\n centerness_on_reg=True,\n center_sampling=True,\n conv_bias=True,\n dcn_on_last_conv=True),\n train_cfg=dict(\n allowed_border=0,\n code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05],\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n use_rotate_nms=True,\n nms_across_levels=False,\n nms_pre=1000,\n nms_thr=0.8,\n score_thr=0.05,\n min_bbox_size=0,\n max_per_img=200))\n","sub_path":"autonomous_driving/occupancy_prediction/projects/configs/_base_/models/fcos3d.py","file_name":"fcos3d.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"266931417","text":"import csv\r\nimport sys\r\nimport keras\r\nimport numpy as np\r\nfrom keras.models import Sequential, model_from_json, Model\r\nfrom keras.callbacks import EarlyStopping, Callback\r\nfrom keras.layers import Dense, Embedding, Dropout, Bidirectional, Flatten\r\nfrom keras.layers import Dot,Add,Concatenate,Input,Reshape,Merge\r\n\r\n\t\r\ndef Normalize_all(data):\r\n\tdata_return = np.zeros(shape = data.shape)\r\n\tfor i in range(data.shape[1]):\r\n\t\tmean = np.mean(data[:,i])\r\n\t\tstd = np.mean(data[:,i])\r\n\t\tdata_return[:,i] = (data[:,i]-mean)/std\r\n\treturn data_return\r\n\t\r\ndef deNormalize(y):\r\n\tnorm_y = (y*std)+mean\r\n\tnorm_y = norm_y.reshape(y.shape[0],1)\r\n\treturn norm_y\r\n\t\r\nnp.set_printoptions(suppress=True)\r\nnp.random.seed(20171209)\r\nTestFilePath = (sys.argv)[1]\r\noutFilePath = (sys.argv)[2]\r\n\r\nuser_test = []\r\nmovies_test = []\r\ndata = []\r\n\r\nbatch_size = 256\r\nmean = 0#3.58171\r\nstd = 1#1.1169\r\n\r\nprint('Parsing Data...')\r\n# reading csv to data\r\nwith open(TestFilePath, 'r') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n count = 0\r\n for row in spamreader:\t\r\n if(count == 0):\r\n count += 1\r\n continue\r\n data.append(row)\r\n \r\ndata = np.array(data).astype('int32')\r\n#np.random.shuffle(data)\r\n\r\nnumOfUser = 6040 #np.max(data[:,1])\r\nnumOfMovies = 3952 #np.max(data[:,2]) \r\n#print(len(userIdMap)) # 6040\r\n#print(len(movieIdMap)) # 3688\r\n\t\r\nuser_test = np.array(data[:,1]).reshape(data.shape[0],1).astype('int32')\r\nmovies_test = np.array(data[:,2]).reshape(data.shape[0],1).astype('int32')\r\n\r\nprint('user_test.shape :',user_test.shape)\r\nprint('movies_test.shape :',movies_test.shape)\r\n\r\n# load json and create model\r\njson_file = open('MF_model_frame.json', 'r')\r\nloaded_model_json = json_file.read()\r\njson_file.close()\r\nmodel = model_from_json(loaded_model_json)\r\nmodel.load_weights(\"MF_model_weight.h5\")\r\n\r\nmodel.summary()\r\n\r\nmodel.compile(loss='mse', optimizer=keras.optimizers.Adamax())#,decay=0.0001\r\n\r\n\r\ny_pred = model.predict([user_test, movies_test], verbose = 1, batch_size=batch_size)\r\ny_pred = deNormalize(y_pred)\r\n\r\nprint('y_pred.shape = ', y_pred.shape)\r\nprint(y_pred[:20,:])\r\n\r\n\r\n# writing output csv \r\nwith open(outFilePath, 'w', newline='') as csvfile:\r\n\tspamwriter = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n\tspamwriter.writerow(['TestDataID,Rating'])\r\n\tfor i in range(y_pred.shape[0]):\r\n\t\trow_list = [','.join([str(i+1),str(float(y_pred[i,0]))])]\r\n\t\tspamwriter.writerow(row_list)\r\n","sub_path":"hw5/hw5_MF_test.py","file_name":"hw5_MF_test.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"409341691","text":"import re\n\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\ndef link_seek(value):\n if not value:\n return None\n for mins, secs in re.findall(\"(\\d\\d?):(\\d\\d)\", value):\n time = int(mins)*60 + int(secs)\n link = \"\"\"%s:%s\"\"\" % (str(time), mins, secs)\n value = value.replace(\":\".join((mins,secs)), link)\n return mark_safe(value)\nregister.filter('link_seek', link_seek)\n","sub_path":"encoder/templatetags/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"34454983","text":"#!/usr/bin/env python\n#-*- coding:utf8 -*-\n\nimport os, sys, logging\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n filename='/var/log/teleport_restart.log'\n)\n\ndef run():\n try:\n res1 = os.popen('ps -C tp_core -o pid,cmd').readlines()\n res2 = os.popen('ps -C tp_web -o pid,cmd').readlines()\n if len(res1) < 2 or len(res2) < 2:\n logging.warning(\"teleport need to restart\")\n if os.system(\"/etc/init.d/teleport restart\") == 0: # 0表示命令执行成功,1表示失败\n logging.info(\"teleport restart successful\")\n except:\n logging.error(sys.exc_info()[1])\n\nif __name__ == '__main__':\n run()\n","sub_path":"teleport_restart(crond).py","file_name":"teleport_restart(crond).py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"172375055","text":"import bpy, json, os, os.path\n\ndef load_json():\n fr = open('test_profile.json','r')\n jsonData = json.load(fr)\n # print(json.dump(jsonData, fr, indent=2))\n # print(jsonData)\n return jsonData\n\n# レンダレイヤの設定\ndef load_render_layers(context, Rlayers):\n # print(Rlayers)\n for Rlayer in Rlayers:\n layers = Rlayers[Rlayer][\"layer\"]\n exclude = Rlayers[Rlayer][\"exclude\"]\n mask = Rlayers[Rlayer][\"mask\"]\n\n # print(\"\\n\" + Rlayer + \": \")\n # print(\"\\tlayer : \" + str(layers))\n # print(\"\\texclude : \" + str(exclude))\n # print(\"\\tmask : \" + str(mask))\n\n new_render_layer = context.scene.render.layers.new(Rlayer)\n for i in range(1,20):\n if i in layers:\n new_render_layer.layers[i] = True\n else:\n new_render_layer.layers[i] = False\n if i in exclude:\n new_render_layer.layers_exclude[i] = True\n else:\n new_render_layer.layers_exclude[i] = False\n if i in mask:\n new_render_layer.layers_zmask[i] = True\n else:\n new_render_layer.layers_zmask[i] = False\n\ndef load_objects(context, blend_file, objects):\n blend_file = blend_file + \"\\\\Object\\\\\"\n for obj_name in objects:\n bpy.ops.wm.append(filename = obj_name, directory = blend_file)\n obj = bpy.data.objects[obj_name]\n for i in range(1,20):\n if i in objects[obj_name][\"obj_layers\"]:\n obj.layers[i] = True\n else:\n obj.layers[i] = False\n\ndef load_initial_settings(context, initial_settings):\n # 解像度\n context.scene.render.resolution_x = initial_settings[\"resolution\"][\"x\"]\n context.scene.render.resolution_y = initial_settings[\"resolution\"][\"y\"]\n # カラーマネジメント\n context.scene.view_settings.view_transform = initial_settings[\"color_management\"][\"view\"]\n context.scene.view_settings.exposure = initial_settings[\"color_management\"][\"exposure\"]\n context.scene.view_settings.gamma = initial_settings[\"color_management\"][\"gamma\"]\n context.scene.view_settings.look = initial_settings[\"color_management\"][\"look\"]\n\ncontext = bpy.context\nfp = bpy.data.filepath\ndirectory = os.path.dirname(fp)\nos.chdir(directory)\nappend_file = \"G:\\\\共有ドライブ\\\\10 PJフォルダ\\\\三陽商会\\\\SnS_CD\\\\design_work\\\\2019AW\\\\MENS_SETUP\\\\10_PRODUCTION\\\\04_blender_data\\\\master_rendering.blend\"\n\njsonData = load_json()\n# load_render_layers(jsonData[\"Rlayers\"])\n# load_objects(context, append_file, jsonData[\"objects\"])\nload_initial_settings(context, jsonData[\"initial_settings\"])","sub_path":"blender_scripts/json_to_blender_settings.py","file_name":"json_to_blender_settings.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"188226025","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\xlrd\\xlsx.py\n# Compiled at: 2013-10-17 14:03:42\nfrom __future__ import print_function, unicode_literals\nDEBUG = 0\nimport sys, re\nfrom .timemachine import *\nfrom .book import Book, Name\nfrom .biffh import error_text_from_code, XLRDError, XL_CELL_BLANK, XL_CELL_TEXT, XL_CELL_BOOLEAN, XL_CELL_ERROR\nfrom .formatting import is_date_format_string, Format, XF\nfrom .sheet import Sheet\nDLF = sys.stdout\nET = None\nET_has_iterparse = False\n\ndef ensure_elementtree_imported(verbosity, logfile):\n global ET\n global ET_has_iterparse\n if ET is not None:\n return\n else:\n if b'IronPython' in sys.version:\n import xml.etree.ElementTree as ET\n else:\n try:\n import xml.etree.cElementTree as ET\n except ImportError:\n try:\n import cElementTree as ET\n except ImportError:\n try:\n import lxml.etree as ET\n except ImportError:\n try:\n import xml.etree.ElementTree as ET\n except ImportError:\n try:\n import elementtree.ElementTree as ET\n except ImportError:\n raise Exception(b'Failed to import an ElementTree implementation')\n\n if hasattr(ET, b'iterparse'):\n _dummy_stream = BYTES_IO(b'')\n try:\n ET.iterparse(_dummy_stream)\n ET_has_iterparse = True\n except NotImplementedError:\n pass\n\n if verbosity:\n etree_version = repr([ (item, getattr(ET, item)) for item in ET.__dict__.keys() if item.lower().replace(b'_', b'') == b'version'\n ])\n print(ET.__file__, ET.__name__, etree_version, ET_has_iterparse, file=logfile)\n return\n\n\ndef split_tag(tag):\n pos = tag.rfind(b'}') + 1\n if pos >= 2:\n return (tag[:pos], tag[pos:])\n return (\n b'', tag)\n\n\ndef augment_keys(adict, uri):\n for x in list(adict.keys()):\n adict[uri + x] = adict[x]\n\n\n_UPPERCASE_1_REL_INDEX = {}\nfor _x in xrange(26):\n _UPPERCASE_1_REL_INDEX[b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[_x]] = _x + 1\n\nfor _x in b'123456789':\n _UPPERCASE_1_REL_INDEX[_x] = 0\n\ndel _x\n\ndef cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX):\n colx = 0\n charx = -1\n try:\n for c in cell_name:\n charx += 1\n lv = letter_value[c]\n if lv:\n colx = colx * 26 + lv\n else:\n colx = colx - 1\n assert 0 <= colx < X12_MAX_COLS\n break\n\n except KeyError:\n raise Exception(b'Unexpected character %r in cell name %r' % (c, cell_name))\n\n rowx = int(cell_name[charx:]) - 1\n return (rowx, colx)\n\n\nerror_code_from_text = {}\nfor _code, _text in error_text_from_code.items():\n error_code_from_text[_text] = _code\n\nU_SSML12 = b'{http://schemas.openxmlformats.org/spreadsheetml/2006/main}'\nU_ODREL = b'{http://schemas.openxmlformats.org/officeDocument/2006/relationships}'\nU_PKGREL = b'{http://schemas.openxmlformats.org/package/2006/relationships}'\nU_CP = b'{http://schemas.openxmlformats.org/package/2006/metadata/core-properties}'\nU_DC = b'{http://purl.org/dc/elements/1.1/}'\nU_DCTERMS = b'{http://purl.org/dc/terms/}'\nXML_SPACE_ATTR = b'{http://www.w3.org/XML/1998/namespace}space'\nXML_WHITESPACE = b'\\t\\n \\r'\nX12_MAX_ROWS = 1048576\nX12_MAX_COLS = 16384\nV_TAG = U_SSML12 + b'v'\nF_TAG = U_SSML12 + b'f'\nIS_TAG = U_SSML12 + b'is'\n\ndef unescape(s, subber=re.compile(b'_x[0-9A-Fa-f]{4,4}_', re.UNICODE).sub, repl=lambda mobj: unichr(int(mobj.group(0)[2:6], 16))):\n if b'_' in s:\n return subber(repl, s)\n return s\n\n\ndef cooked_text(self, elem):\n t = elem.text\n if t is None:\n return b''\n else:\n if elem.get(XML_SPACE_ATTR) != b'preserve':\n t = t.strip(XML_WHITESPACE)\n return ensure_unicode(unescape(t))\n\n\ndef get_text_from_si_or_is(self, elem, r_tag=U_SSML12 + b'r', t_tag=U_SSML12 + b't'):\n \"\"\"Returns unescaped unicode\"\"\"\n accum = []\n for child in elem:\n tag = child.tag\n if tag == t_tag:\n t = cooked_text(self, child)\n if t:\n accum.append(t)\n elif tag == r_tag:\n for tnode in child:\n if tnode.tag == t_tag:\n t = cooked_text(self, tnode)\n if t:\n accum.append(t)\n\n return (b'').join(accum)\n\n\ndef map_attributes(amap, elem, obj):\n for xml_attr, obj_attr, cnv_func_or_const in amap:\n if not xml_attr:\n setattr(obj, obj_attr, cnv_func_or_const)\n continue\n if not obj_attr:\n continue\n raw_value = elem.get(xml_attr)\n cooked_value = cnv_func_or_const(raw_value)\n setattr(obj, obj_attr, cooked_value)\n\n\ndef cnv_ST_Xstring(s):\n if s is None:\n return b''\n else:\n return ensure_unicode(s)\n\n\ndef cnv_xsd_unsignedInt(s):\n if not s:\n return None\n else:\n value = int(s)\n assert value >= 0\n return value\n\n\ndef cnv_xsd_boolean(s):\n if not s:\n return 0\n if s in ('1', 'true', 'on'):\n return 1\n if s in ('0', 'false', 'off'):\n return 0\n raise ValueError(b'unexpected xsd:boolean value: %r' % s)\n\n\n_defined_name_attribute_map = (\n (\n b'name', b'name', cnv_ST_Xstring),\n (\n b'comment', b'', cnv_ST_Xstring),\n (\n b'customMenu', b'', cnv_ST_Xstring),\n (\n b'description', b'', cnv_ST_Xstring),\n (\n b'help', b'', cnv_ST_Xstring),\n (\n b'statusBar', b'', cnv_ST_Xstring),\n (\n b'localSheetId', b'scope', cnv_xsd_unsignedInt),\n (\n b'hidden', b'hidden', cnv_xsd_boolean),\n (\n b'function', b'func', cnv_xsd_boolean),\n (\n b'vbProcedure', b'vbasic', cnv_xsd_boolean),\n (\n b'xlm', b'macro', cnv_xsd_boolean),\n (\n b'functionGroupId', b'funcgroup', cnv_xsd_unsignedInt),\n (\n b'shortcutKey', b'', cnv_ST_Xstring),\n (\n b'publishToServer', b'', cnv_xsd_boolean),\n (\n b'workbookParameter', b'', cnv_xsd_boolean),\n ('', 'any_err', 0),\n ('', 'any_external', 0),\n ('', 'any_rel', 0),\n ('', 'basic_formula_len', 0),\n ('', 'binary', 0),\n ('', 'builtin', 0),\n ('', 'complex', 0),\n ('', 'evaluated', 0),\n ('', 'excel_sheet_index', 0),\n ('', 'excel_sheet_num', 0),\n ('', 'option_flags', 0),\n ('', 'result', None),\n ('', 'stack', None))\n\ndef make_name_access_maps(bk):\n name_and_scope_map = {}\n name_map = {}\n num_names = len(bk.name_obj_list)\n for namex in xrange(num_names):\n nobj = bk.name_obj_list[namex]\n name_lcase = nobj.name.lower()\n key = (name_lcase, nobj.scope)\n if key in name_and_scope_map:\n msg = b'Duplicate entry %r in name_and_scope_map' % (key,)\n if bk.verbosity:\n print(msg, file=bk.logfile)\n name_and_scope_map[key] = nobj\n if name_lcase in name_map:\n name_map[name_lcase].append((nobj.scope, nobj))\n else:\n name_map[name_lcase] = [\n (\n nobj.scope, nobj)]\n\n for key in name_map.keys():\n alist = name_map[key]\n alist.sort()\n name_map[key] = [ x[1] for x in alist ]\n\n bk.name_and_scope_map = name_and_scope_map\n bk.name_map = name_map\n\n\nclass X12General(object):\n\n def process_stream(self, stream, heading=None):\n if self.verbosity >= 2 and heading is not None:\n fprintf(self.logfile, b'\\n=== %s ===\\n', heading)\n self.tree = ET.parse(stream)\n getmethod = self.tag2meth.get\n for elem in self.tree.getiterator():\n if self.verbosity >= 3:\n self.dump_elem(elem)\n meth = getmethod(elem.tag)\n if meth:\n meth(self, elem)\n\n self.finish_off()\n return\n\n def finish_off(self):\n pass\n\n def dump_elem(self, elem):\n fprintf(self.logfile, b'===\\ntag=%r len=%d attrib=%r text=%r tail=%r\\n', split_tag(elem.tag)[1], len(elem), elem.attrib, elem.text, elem.tail)\n\n def dumpout(self, fmt, *vargs):\n text = (b' ' + fmt + b'\\n') % vargs\n self.logfile.write(text)\n\n\nclass X12Book(X12General):\n\n def __init__(self, bk, logfile=DLF, verbosity=False):\n self.bk = bk\n self.logfile = logfile\n self.verbosity = verbosity\n self.bk.nsheets = 0\n self.bk.props = {}\n self.relid2path = {}\n self.relid2reltype = {}\n self.sheet_targets = []\n self.sheetIds = []\n\n core_props_menu = {U_CP + b'lastModifiedBy': (\n b'last_modified_by', cnv_ST_Xstring), \n U_DC + b'creator': (\n b'creator', cnv_ST_Xstring), \n U_DCTERMS + b'modified': (\n b'modified', cnv_ST_Xstring), \n U_DCTERMS + b'created': (\n b'created', cnv_ST_Xstring)}\n\n def process_coreprops(self, stream):\n if self.verbosity >= 2:\n fprintf(self.logfile, b'\\n=== coreProps ===\\n')\n self.tree = ET.parse(stream)\n getmenu = self.core_props_menu.get\n props = {}\n for elem in self.tree.getiterator():\n if self.verbosity >= 3:\n self.dump_elem(elem)\n menu = getmenu(elem.tag)\n if menu:\n attr, func = menu\n value = func(elem.text)\n props[attr] = value\n\n self.bk.user_name = props.get(b'last_modified_by') or props.get(b'creator')\n self.bk.props = props\n if self.verbosity >= 2:\n fprintf(self.logfile, b'props: %r\\n', props)\n self.finish_off()\n\n def process_rels(self, stream):\n if self.verbosity >= 2:\n fprintf(self.logfile, b'\\n=== Relationships ===\\n')\n tree = ET.parse(stream)\n r_tag = U_PKGREL + b'Relationship'\n for elem in tree.findall(r_tag):\n rid = elem.get(b'Id')\n target = elem.get(b'Target')\n reltype = elem.get(b'Type').split(b'/')[(-1)]\n if self.verbosity >= 2:\n self.dumpout(b'Id=%r Type=%r Target=%r', rid, reltype, target)\n self.relid2reltype[rid] = reltype\n if target.startswith(b'/'):\n self.relid2path[rid] = target[1:]\n else:\n self.relid2path[rid] = b'xl/' + target\n\n def do_defined_name(self, elem):\n if 0 and self.verbosity >= 3:\n self.dump_elem(elem)\n nobj = Name()\n bk = self.bk\n nobj.bk = bk\n nobj.name_index = len(bk.name_obj_list)\n bk.name_obj_list.append(nobj)\n nobj.name = elem.get(b'name')\n nobj.raw_formula = None\n nobj.formula_text = cooked_text(self, elem)\n map_attributes(_defined_name_attribute_map, elem, nobj)\n if nobj.scope is None:\n nobj.scope = -1\n if nobj.name.startswith(b'_xlnm.'):\n nobj.builtin = 1\n if self.verbosity >= 2:\n nobj.dump(header=b'=== Name object ===')\n return\n\n def do_defined_names(self, elem):\n for child in elem:\n self.do_defined_name(child)\n\n make_name_access_maps(self.bk)\n\n def do_sheet(self, elem):\n bk = self.bk\n sheetx = bk.nsheets\n rid = elem.get(U_ODREL + b'id')\n sheetId = int(elem.get(b'sheetId'))\n name = unescape(ensure_unicode(elem.get(b'name')))\n reltype = self.relid2reltype[rid]\n target = self.relid2path[rid]\n if self.verbosity >= 2:\n self.dumpout(b'sheetx=%d sheetId=%r rid=%r type=%r name=%r', sheetx, sheetId, rid, reltype, name)\n if reltype != b'worksheet':\n if self.verbosity >= 2:\n self.dumpout(b'Ignoring sheet of type %r (name=%r)', reltype, name)\n return\n state = elem.get(b'state')\n visibility_map = {None: 0, \n b'visible': 0, \n b'hidden': 1, \n b'veryHidden': 2}\n bk._sheet_visibility.append(visibility_map[state])\n sheet = Sheet(bk, position=None, name=name, number=sheetx)\n sheet.utter_max_rows = X12_MAX_ROWS\n sheet.utter_max_cols = X12_MAX_COLS\n bk._sheet_list.append(sheet)\n bk._sheet_names.append(name)\n bk.nsheets += 1\n self.sheet_targets.append(target)\n self.sheetIds.append(sheetId)\n return\n\n def do_workbookpr(self, elem):\n datemode = cnv_xsd_boolean(elem.get(b'date1904'))\n if self.verbosity >= 2:\n self.dumpout(b'datemode=%r', datemode)\n self.bk.datemode = datemode\n\n tag2meth = {b'definedNames': do_defined_names, \n b'workbookPr': do_workbookpr, \n b'sheet': do_sheet}\n augment_keys(tag2meth, U_SSML12)\n\n\nclass X12SST(X12General):\n\n def __init__(self, bk, logfile=DLF, verbosity=0):\n self.bk = bk\n self.logfile = logfile\n self.verbosity = verbosity\n if ET_has_iterparse:\n self.process_stream = self.process_stream_iterparse\n else:\n self.process_stream = self.process_stream_findall\n\n def process_stream_iterparse(self, stream, heading=None):\n if self.verbosity >= 2 and heading is not None:\n fprintf(self.logfile, b'\\n=== %s ===\\n', heading)\n si_tag = U_SSML12 + b'si'\n elemno = -1\n sst = self.bk._sharedstrings\n for event, elem in ET.iterparse(stream):\n if elem.tag != si_tag:\n continue\n elemno = elemno + 1\n if self.verbosity >= 3:\n fprintf(self.logfile, b'element #%d\\n', elemno)\n self.dump_elem(elem)\n result = get_text_from_si_or_is(self, elem)\n sst.append(result)\n elem.clear()\n\n if self.verbosity >= 2:\n self.dumpout(b'Entries in SST: %d', len(sst))\n if self.verbosity >= 3:\n for x, s in enumerate(sst):\n fprintf(self.logfile, b'SST x=%d s=%r\\n', x, s)\n\n return\n\n def process_stream_findall(self, stream, heading=None):\n if self.verbosity >= 2 and heading is not None:\n fprintf(self.logfile, b'\\n=== %s ===\\n', heading)\n self.tree = ET.parse(stream)\n si_tag = U_SSML12 + b'si'\n elemno = -1\n sst = self.bk._sharedstrings\n for elem in self.tree.findall(si_tag):\n elemno = elemno + 1\n if self.verbosity >= 3:\n fprintf(self.logfile, b'element #%d\\n', elemno)\n self.dump_elem(elem)\n result = get_text_from_si_or_is(self, elem)\n sst.append(result)\n\n if self.verbosity >= 2:\n self.dumpout(b'Entries in SST: %d', len(sst))\n return\n\n\nclass X12Styles(X12General):\n\n def __init__(self, bk, logfile=DLF, verbosity=0):\n self.bk = bk\n self.logfile = logfile\n self.verbosity = verbosity\n self.xf_counts = [0, 0]\n self.xf_type = None\n self.fmt_is_date = {}\n for x in list(range(14, 23)) + list(range(45, 48)):\n self.fmt_is_date[x] = 1\n\n self.bk._xf_index_to_xl_type_map[0] = 2\n return\n\n def do_cellstylexfs(self, elem):\n self.xf_type = 0\n\n def do_cellxfs(self, elem):\n self.xf_type = 1\n\n def do_numfmt(self, elem):\n formatCode = ensure_unicode(elem.get(b'formatCode'))\n numFmtId = int(elem.get(b'numFmtId'))\n is_date = is_date_format_string(self.bk, formatCode)\n self.fmt_is_date[numFmtId] = is_date\n fmt_obj = Format(numFmtId, is_date + 2, formatCode)\n self.bk.format_map[numFmtId] = fmt_obj\n if self.verbosity >= 3:\n self.dumpout(b'numFmtId=%d formatCode=%r is_date=%d', numFmtId, formatCode, is_date)\n\n def do_xf(self, elem):\n if self.xf_type != 1:\n return\n xfx = self.xf_counts[self.xf_type]\n self.xf_counts[self.xf_type] = xfx + 1\n xf = XF()\n self.bk.xf_list.append(xf)\n self.bk.xfcount += 1\n numFmtId = int(elem.get(b'numFmtId', b'0'))\n xf.format_key = numFmtId\n is_date = self.fmt_is_date.get(numFmtId, 0)\n self.bk._xf_index_to_xl_type_map[xfx] = is_date + 2\n if self.verbosity >= 3:\n self.dumpout(b'xfx=%d numFmtId=%d', xfx, numFmtId)\n self.dumpout(repr(self.bk._xf_index_to_xl_type_map))\n\n tag2meth = {b'cellStyleXfs': do_cellstylexfs, \n b'cellXfs': do_cellxfs, \n b'numFmt': do_numfmt, \n b'xf': do_xf}\n augment_keys(tag2meth, U_SSML12)\n\n\nclass X12Sheet(X12General):\n\n def __init__(self, sheet, logfile=DLF, verbosity=0):\n self.sheet = sheet\n self.logfile = logfile\n self.verbosity = verbosity\n self.rowx = -1\n self.bk = sheet.book\n self.sst = self.bk._sharedstrings\n self.warned_no_cell_name = 0\n self.warned_no_row_num = 0\n if ET_has_iterparse:\n self.process_stream = self.own_process_stream\n\n def own_process_stream(self, stream, heading=None):\n if self.verbosity >= 2 and heading is not None:\n fprintf(self.logfile, b'\\n=== %s ===\\n', heading)\n getmethod = self.tag2meth.get\n row_tag = U_SSML12 + b'row'\n self_do_row = self.do_row\n for event, elem in ET.iterparse(stream):\n if elem.tag == row_tag:\n self_do_row(elem)\n elem.clear()\n elif elem.tag == U_SSML12 + b'dimension':\n self.do_dimension(elem)\n\n self.finish_off()\n return\n\n def do_dimension(self, elem):\n ref = elem.get(b'ref')\n if ref:\n last_cell_ref = ref.split(b':')[(-1)]\n rowx, colx = cell_name_to_rowx_colx(last_cell_ref)\n self.sheet._dimnrows = rowx + 1\n self.sheet._dimncols = colx + 1\n\n def do_row(self, row_elem):\n\n def bad_child_tag(child_tag):\n raise Exception(b'cell type %s has unexpected child <%s> at rowx=%r colx=%r' % (cell_type, child_tag, rowx, colx))\n\n row_number = row_elem.get(b'r')\n if row_number is None:\n self.rowx += 1\n explicit_row_number = 0\n if self.verbosity and not self.warned_no_row_num:\n self.dumpout(b'no row number; assuming rowx=%d', self.rowx)\n self.warned_no_row_num = 1\n else:\n self.rowx = int(row_number) - 1\n explicit_row_number = 1\n assert 0 <= self.rowx < X12_MAX_ROWS\n rowx = self.rowx\n colx = -1\n if self.verbosity >= 3:\n self.dumpout(b' row_number=%r rowx=%d explicit=%d', row_number, self.rowx, explicit_row_number)\n letter_value = _UPPERCASE_1_REL_INDEX\n for cell_elem in row_elem:\n cell_name = cell_elem.get(b'r')\n if cell_name is None:\n colx += 1\n if self.verbosity and not self.warned_no_cell_name:\n self.dumpout(b'no cellname; assuming rowx=%d colx=%d', rowx, colx)\n self.warned_no_cell_name = 1\n else:\n colx = 0\n charx = -1\n try:\n for c in cell_name:\n charx += 1\n lv = letter_value[c]\n if lv:\n colx = colx * 26 + lv\n else:\n colx = colx - 1\n assert 0 <= colx < X12_MAX_COLS\n break\n\n except KeyError:\n raise Exception(b'Unexpected character %r in cell name %r' % (c, cell_name))\n\n if explicit_row_number and cell_name[charx:] != row_number:\n raise Exception(b'cell name %r but row number is %r' % (cell_name, row_number))\n xf_index = int(cell_elem.get(b's', b'0'))\n cell_type = cell_elem.get(b't', b'n')\n tvalue = None\n formula = None\n if cell_type == b'n':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == V_TAG:\n tvalue = child.text\n elif child_tag == F_TAG:\n formula = cooked_text(self, child)\n else:\n raise Exception(b'unexpected tag %r' % child_tag)\n\n if not tvalue:\n if self.bk.formatting_info:\n self.sheet.put_cell(rowx, colx, XL_CELL_BLANK, b'', xf_index)\n else:\n self.sheet.put_cell(rowx, colx, None, float(tvalue), xf_index)\n elif cell_type == b's':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == V_TAG:\n tvalue = child.text\n elif child_tag == F_TAG:\n formula = child.text\n else:\n bad_child_tag(child_tag)\n\n if not tvalue:\n if self.bk.formatting_info:\n self.sheet.put_cell(rowx, colx, XL_CELL_BLANK, b'', xf_index)\n else:\n value = self.sst[int(tvalue)]\n self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, value, xf_index)\n elif cell_type == b'str':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == V_TAG:\n tvalue = cooked_text(self, child)\n elif child_tag == F_TAG:\n formula = cooked_text(self, child)\n else:\n bad_child_tag(child_tag)\n\n self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index)\n elif cell_type == b'b':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == V_TAG:\n tvalue = child.text\n elif child_tag == F_TAG:\n formula = cooked_text(self, child)\n else:\n bad_child_tag(child_tag)\n\n self.sheet.put_cell(rowx, colx, XL_CELL_BOOLEAN, int(tvalue), xf_index)\n elif cell_type == b'e':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == V_TAG:\n tvalue = child.text\n elif child_tag == F_TAG:\n formula = cooked_text(self, child)\n else:\n bad_child_tag(child_tag)\n\n value = error_code_from_text[tvalue]\n self.sheet.put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)\n elif cell_type == b'inlineStr':\n for child in cell_elem:\n child_tag = child.tag\n if child_tag == IS_TAG:\n tvalue = get_text_from_si_or_is(self, child)\n else:\n bad_child_tag(child_tag)\n\n assert tvalue is not None\n self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index)\n else:\n raise Exception(b'Unknown cell type %r in rowx=%d colx=%d' % (cell_type, rowx, colx))\n\n return\n\n tag2meth = {b'row': do_row}\n augment_keys(tag2meth, U_SSML12)\n\n\ndef getzflo(zipfile, member_path):\n try:\n return zipfile.open(member_path)\n except AttributeError:\n return BYTES_IO(zipfile.read(member_path))\n\n\ndef open_workbook_2007_xml(zf, component_names, logfile=sys.stdout, verbosity=0, use_mmap=0, formatting_info=0, on_demand=0, ragged_rows=0):\n ensure_elementtree_imported(verbosity, logfile)\n bk = Book()\n bk.logfile = logfile\n bk.verbosity = verbosity\n bk.formatting_info = formatting_info\n if formatting_info:\n raise NotImplementedError(b'formatting_info=True not yet implemented')\n bk.use_mmap = False\n bk.on_demand = on_demand\n if on_demand:\n if verbosity:\n print(b'WARNING *** on_demand=True not yet implemented; falling back to False', file=bk.logfile)\n bk.on_demand = False\n bk.ragged_rows = ragged_rows\n x12book = X12Book(bk, logfile, verbosity)\n zflo = getzflo(zf, b'xl/_rels/workbook.xml.rels')\n x12book.process_rels(zflo)\n del zflo\n zflo = getzflo(zf, b'xl/workbook.xml')\n x12book.process_stream(zflo, b'Workbook')\n del zflo\n props_name = b'docProps/core.xml'\n if props_name in component_names:\n zflo = getzflo(zf, props_name)\n x12book.process_coreprops(zflo)\n x12sty = X12Styles(bk, logfile, verbosity)\n if b'xl/styles.xml' in component_names:\n zflo = getzflo(zf, b'xl/styles.xml')\n x12sty.process_stream(zflo, b'styles')\n del zflo\n sst_fname = b'xl/sharedStrings.xml'\n x12sst = X12SST(bk, logfile, verbosity)\n if sst_fname in component_names:\n zflo = getzflo(zf, sst_fname)\n x12sst.process_stream(zflo, b'SST')\n del zflo\n for sheetx in range(bk.nsheets):\n fname = x12book.sheet_targets[sheetx]\n zflo = getzflo(zf, fname)\n sheet = bk._sheet_list[sheetx]\n x12sheet = X12Sheet(sheet, logfile, verbosity)\n heading = b'Sheet %r (sheetx=%d) from %r' % (sheet.name, sheetx, fname)\n x12sheet.process_stream(zflo, heading)\n del zflo\n sheet.tidy_dimensions()\n\n return bk","sub_path":"pycfiles/pythonetl_xlrd-0.9.3dev-py2.7/xlsx.py","file_name":"xlsx.py","file_ext":"py","file_size_in_byte":25737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"516562169","text":"from sklearn.ensemble import RandomForestClassifier\n\nimport pickle, json\nimport numpy as np\n\nx_train = []\ny_train = []\n\nwith open(\"data/x_train.txt\", \"r\") as file:\n x_train = np.array(json.loads(''.join(list(map(lambda x: x.strip(), file.readlines())))))\n\nwith open(\"data/y_train.txt\", \"r\") as file:\n y_train = np.array(json.loads(''.join(list(map(lambda x: x.strip(), file.readlines())))))\n\n\nmodel = RandomForestClassifier(n_estimators = 100, max_depth = 9, random_state = 42)\nmodel.fit(x_train, y_train)\n\npickle.dump(model, open(\"model.pkl\", \"wb\"))\n","sub_path":"2_stage/task1/rfc_load.py","file_name":"rfc_load.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"85940954","text":"import os\nfrom typing import Dict, List\n\nfrom PIL import Image\nimport unittest\nimport numpy as np\nimport cv2\n\nfrom mystique.utils import load_od_instance\nfrom mystique.predict_card import PredictCard\nfrom mystique.arrange_card import CardArrange\nfrom mystique.extract_properties import CollectProperties\n\ncurr_dir = os.path.dirname(__file__)\n\n\nclass TestUtil:\n\n def collect_json_objects(self, image: Image,\n model_instance: PredictCard) -> Dict:\n \"\"\"\n Returns the dict of design objects collected from the prediction\n @param image: input PIL image\n @param model_instance: model instance object\n @return: dict of design objects\n \"\"\"\n image = image.convert(\"RGB\")\n image_np = np.asarray(image)\n image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)\n output_dict = model_instance.od_model.get_objects(\n image_np=image_np, image=image\n )\n return model_instance.collect_objects(output_dict=output_dict,\n pil_image=image)\n\n def collect_image_sizes(self, json_objects: Dict, image: Image) -> List:\n \"\"\"\n Returns the list of extracted image object sizes of the input image\n @param json_objects: dict of design objects\n @param image: input PIL image\n @return: list of image object sizes\n \"\"\"\n collect_properties = CollectProperties(image)\n image_objects = [image_object for image_object in\n json_objects[\"objects\"]\n if image_object[\"object\"] == \"image\"]\n for design_object in image_objects:\n property_object = getattr(collect_properties,\n design_object.get(\"object\"))\n design_object.update(property_object(design_object.get(\"coords\")))\n return [design_object.get(\"size\", \"\") for design_object in\n image_objects]\n\n\nclass TestIOU(unittest.TestCase):\n card_arrange = CardArrange()\n\n def setUp(self):\n self.image_path = os.path.join(\n curr_dir, \"../tests/test_images/test01.png\")\n self.test_util = TestUtil()\n self.model_instance = load_od_instance()\n self.model_instance = PredictCard(self.model_instance)\n\n self.image = Image.open(self.image_path)\n self.json_objects, _ = self.test_util.collect_json_objects(\n self.image, self.model_instance)\n\n def test_object_collection(self):\n \"\"\"Tests the collected object's count\"\"\"\n self.assertEqual(len(self.json_objects[\"objects\"]), 23)\n\n def test_iou(self):\n \"\"\"Tests the iou based noise removal count\"\"\"\n self.card_arrange.remove_noise_objects(self.json_objects)\n self.assertEqual(len(self.json_objects[\"objects\"]), 19)\n\n def test_image_size(self):\n \"\"\"Tests the image object sizes\"\"\"\n extracted_sizes = self.test_util.collect_image_sizes(self.json_objects,\n self.image)\n self.assertEqual(extracted_sizes, [\"Small\", \"Small\"])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"source/pic2card/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"267649879","text":"\n\nfrom xai.brain.wordbase.nouns._breakwater import _BREAKWATER\n\n#calss header\nclass _BREAKWATERS(_BREAKWATER, ):\n\tdef __init__(self,): \n\t\t_BREAKWATER.__init__(self)\n\t\tself.name = \"BREAKWATERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"breakwater\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_breakwaters.py","file_name":"_breakwaters.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"50613329","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Colormap\n\ndef show(X, y, clf):\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\n xx, yy = np.meshgrid( np.arange(x_min, x_max, 0.1),\n np.arange(y_min, y_max, 0.1))\n\n fig, ax = plt.subplots(1, 1, sharex='col', sharey='row', figsize=(10, 8))\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n ax.contourf(xx, yy, Z, alpha=0.4, cmap=plt.cm.RdYlBu)\n ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdYlBu)\n ax.set_title(\"Test\")\n\n plt.show()","sub_path":"Day 44/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196064341","text":"while(True):\n str_=input(\"nhap day 2 \")\n \n if len(str_)<8 :\n print(\"Nhap lai pass \")\n continue\n print(str_)\n break\n\n\n\n\n\n\n\n\n\n\n\n\ndayS0=\"1234567890\"\nstr_=input(\"nhap day \")\ni=0\n\n\nv=0\nwhile(True):\n if dayS0[i]==str_[v]:\n print(\"co so\")\n break\n else:\n if len(dayS0)-1==i:\n i=0\n v+=1\n elif len(str_)-1==v:\n print(\"khong co\")\n break\n i+=1\n\n\n# n=int(input(\"nhap so\"))\n# dem=0\n# while(True):\n# print(\"n= \",n)\n# n+=1\n# if n>=1000:\n# break","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131215767","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module provides tables for RNA secondary\nstructures and tools\n\"\"\"\n\n# import itertools\n\n__author__ = \"Bulak Arpat\"\n__copyright__ = \"Copyright 2017, Bulak Arpat\"\n__license__ = \"GPLv3\"\n__version__ = \"0.2.0\"\n__maintainer__ = \"Bulak Arpat\"\n__email__ = \"Bulak.Arpat@unil.ch\"\n__status__ = \"Development\"\n\n\n# G-quadruplex translation table\nGQUAD_LETTERS = [\"N\", \"G\"]\n# GQUAD_TABLE = {codon: GQUAD_LETTERS[\"G\" in codon] for codon in\n# [\"\".join(word) for word in itertools.product(\n# GQUAD_LETTERS, repeat=3)]}\n\n# NGN is intentially undefined. We should never come across this key\n# if we do it must be an error\nGQUAD_TABLE = {\n \"GGG\": \"G\",\n \"GGN\": \"G\",\n \"GNN\": \"G\",\n \"NNN\": \"N\",\n \"NNG\": \"G\",\n \"NGG\": \"G\",\n \"GNG\": \"G\"\n}\n","sub_path":"venv/lib/python3.4/site-packages/genomics/rna_struct.py","file_name":"rna_struct.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"127011653","text":"from bs4 import BeautifulSoup\n\nimport sites.biqu_parse as biqu\nfrom sites.site import BaseNovel\n\n__all__ = [\"Biqubook\"]\n\n\nclass Biqubook(BaseNovel):\n _encode = \"gbk\"\n\n source_site = \"http://www.biqubook.com\"\n source_title = \"笔趣阁\"\n\n def parse_base_info(self, content):\n biqu.parse_info(self, BeautifulSoup(content, \"html.parser\"), content)\n\n def parse_chapter_list(self, content):\n biqu.parse_chapters(self, BeautifulSoup(content, \"html.parser\"), False, \".listmain dd\")\n","sub_path":"sites/biqubook.py","file_name":"biqubook.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"318426770","text":"from django.shortcuts import render\nfrom forms import UserForm\nfrom django.contrib.auth import login\nfrom django.http import HttpResponseRedirect\n\ndef adduser(request):\n if request.method == \"POST\":\n form = UserForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(**form.cleaned_data)\n login(new_user)\n return HttpResponseRedirect('main.html')\n else:\n form = UserForm() \n\n return render(request, 'adduser.html', {'form': form}) \n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"155411097","text":"\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n# Imports\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom project.apicc import views\n\n\n#router = routers.DefaultRouter()\n#router.register(r'users', views.UserViewSet)\n\napp_name = 'apicc'\nurlpatterns = [\n# url(r'^', include(router.urls)),\n url(r'^$', views.index, name='index'),\n url(r'^updateDB/$', views.save_cryptoasset, name='updateDB'),\n url(r'^momentum_plot/(?P\\d+)/$', views.momentum_plot, name='momentum_plot'),\n url(r'^draw_mpl/$', views.draw_mpl, name='draw_mpl'),\n url(r'^momentum_plot_model/(?P\\d+)/$', views.momentum_plot, name='momentum_plot_model'),\n url(r'^draw_mpl_model/$', views.draw_mpl_model, name='draw_mpl_model'),\n]","sub_path":"project/apicc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"332861554","text":"from rest_framework import serializers\n\nfrom .models import Product\nfrom webshop.models import ProductVariant, Brand\n\n\nclass BrandSerializer(serializers.ModelSerializer):\n class Meta:\n model = Brand\n fields = [\n 'id',\n 'name'\n ]\n\n\nclass ProductVariantSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductVariant\n fields = [\n 'id',\n 'color',\n 'price',\n 'size',\n 'image',\n 'quantity'\n ]\n\n def get_image(self, product_variant):\n return self.context['request'].build_absolute_uri(product_variant.image)\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n variants = ProductVariantSerializer(many=True)\n brand = BrandSerializer()\n\n class Meta:\n model = Product\n fields = [\n 'id',\n 'name',\n 'brand',\n 'description',\n 'variants'\n ]\n","sub_path":"webshop/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"315953696","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom .models import Project, Rate, Review\nfrom django.contrib.auth.models import User\nfrom .forms import( UserRegisterForm, UserUpdateForm, ProfileUpdateForm, ProjectPostForm, RateForm, ReviewForm)\nfrom django.contrib.auth.decorators import login_required\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, 'registration/register.html', {'form':form})\n\ndef index(request):\n return render(request, 'index.html')\n\n@login_required\ndef home(request):\n try:\n projects = Project.objects.all()\n except Exception as e:\n raise Http404()\n return render(request, 'home.html', {'projects':projects})\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST,request.FILES, instance=request.user.profile)\n\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Account updated successfully!')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form':u_form,\n 'p_form':p_form,\n }\n\n return render(request, 'users/profile.html', context)\n\n@login_required\ndef post_project(request):\n current_user = request.user\n if request.method=='POST':\n form = ProjectPostForm(request.POST,request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = current_user\n post.save()\n return redirect(\"home\")\n else:\n form = ProjectPostForm()\n return render(request,'project_post.html',{'form':form})\n\n@login_required\ndef project_details(request,project_id):\n projects = Project.objects.filter(id=project_id)\n all_rates = Rate.objects.filter(project=project_id)\n \n count = 0\n for i in all_rates:\n count+=i.usability\n count+=i.design\n count+=i.content\n\n if count > 0:\n av_rate = round(count/3,1) \n else:\n av_rate = 0\n\n if request.method=='POST':\n form = RateForm(request.POST)\n if form.is_valid():\n rate = form.save(commit=False)\n rate.user = request.user\n rate.project = project_id\n rate.save()\n return redirect('project_details',project_id)\n else:\n form = RateForm()\n\n votes = Rate.objects.filter(project=project_id)\n usability = []\n design = []\n content = []\n\n for i in votes:\n usability.append(i.usability)\n design.append(i.design)\n content.append(i.content)\n if len(usability) > 0 or len(design)> 0 or len(content) >0:\n av_usability = round(sum(usability)/len(usability),1)\n av_design = round(sum(design)/len(design),1)\n av_content = round(sum(content)/len(content),1)\n avRating = round((av_content + av_design + av_usability)/3,1)\n else:\n av_usability = 0.0\n av_design = 0.0\n av_content = 0.0\n avRating = 0.0\n\n '''\n Restricting user to rate only once\n '''\n arr1=[]\n for use in votes:\n arr1.append(use.user_id)\n auth = arr1\n\n if request.method=='POST':\n r_form = ReviewForm(request.POST)\n if r_form.is_valid():\n review = r_form.save(commit=False)\n review.user = request.user\n review.profile_id = project_id\n review.save()\n return redirect('project_details',project_id)\n else:\n r_form = ReviewForm()\n r_form = ReviewForm()\n user_review = Review.objects.filter(profile_id=project_id)\n\n context = {\n 'projects':projects,\n 'form':form,\n 'usability':av_usability, \n 'design':av_design,\n 'content':av_content,\n 'average':avRating,\n 'auth':auth,\n 'all_rates':all_rates,\n 'av_rate':av_rate,\n 'r_form':r_form,\n 'review':user_review\n }\n return render(request,'project_details.html', context)\n\n@login_required\ndef search(request):\n if 'search' in request.GET and request.GET['search']:\n search_term = request.GET.get('search')\n results = Project.search_project(search_term)\n return render(request,'search.html',{'projects': results})\n else:\n message=\"You have not searched any project\"\n return render(request,'search.html',{'message': message})\n\n@login_required\ndef apiView(request):\n user = request.user\n title=\"Api\"\n profile = Profile.objects.filter(user=user)[0:1]\n return render(request,'api.html',{\"title\":title,'profile':profile})\n\n","sub_path":"awwards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"497192715","text":"#! /usr/bin/env python3\n\nimport pandas as pd\n\nd1 = pd.read_csv(\"RMS_all_patient_malignant_largeVis_embeddings_IDheader.txt\", sep=\"\\t\", header = 0, engine='python')\nd2 = pd.read_csv(\"RMS_all_patient_malignant_metadata_IDHeader.txt\", sep=\"\\t\", header = 0, engine='python')\nd3 = pd.merge(d1, d2)\n\nd3.to_csv(\"merge_RMS_data.csv\")\n\n","sub_path":"merge_rds.py","file_name":"merge_rds.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"1866798","text":"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torchvision\nimport torchvision.transforms as transforms\nfrom tensorboard_logger import configure, log_value\n\nimport os, sys, random\nimport argparse\n\nfrom autoaugment import CIFAR10Policy\n\n# from seperable_net import *\n# from snn_graph import *\nfrom snn_graph_2 import *\n# from snn_graph_3 import *\nfrom utils import progress_bar\n\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--split', default=1, type=int, help='number of devices to split the model')\nparser.add_argument('--epoch', default=200, type=int, help='epoch')\nparser.add_argument('--batch', default=128, type=int, help='batch')\nparser.add_argument('--schedule', default=50, type=int, help='schedule to decay learning rate')\nparser.add_argument('--cuda', default=-1, type=int, help='gpu index')\nparser.add_argument('--resume', '-r', default='', type=str, help='checkpoint path to resume')\nparser.add_argument('--save', default='exp', type=str, help='checkpoint path to save')\nargs = parser.parse_args()\n\nprint('===== parameter settings =====')\nprint('learining rate: %.4f'%args.lr)\nprint('split: %d'%args.split)\nprint('epoch: %d'%args.epoch)\nprint('schedule: %d'%args.schedule)\nprint('cuda: %d'%args.cuda)\nprint('resume: %s'%args.resume)\nprint('save: %s'%args.save)\nprint('===== parameter settings =====')\n\ntorch.backends.cudnn.benchmark = True\nif args.cuda==-1:\n device = 'cuda'\nelse:\n device = 'cuda:%d'%args.cuda #'cuda'#\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),\n# transforms.RandomRotation(5),\n CIFAR10Policy(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch, shuffle=True, num_workers=10)\n\ntestset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=args.batch, shuffle=False, num_workers=10)\n\n\n# Model\nprint('==> Building model..')\n# net = sresnet164_cifar(num_classes=100)\nnet = resneXt_cifar(56, 4, 16, num_classes=100, is_separate=True)\n# net = densenet_BC_cifar(250, 24, num_classes=100)\nif args.cuda==-1:\n net = nn.DataParallel(net)\nnet = net.to(device)\n# net2 = resneXt_cifar(56, 1, 16, num_classes=100, is_separate=True)\n# net2 = net2.to(device)\n# net3 = resneXt_cifar(56, 1, 16, num_classes=100, is_separate=True)\n# net3 = net3.to(device)\n# net4 = resneXt_cifar(56, 1, 16, num_classes=100, is_separate=True)\n# net4 = net4.to(device)\n# net5 = resneXt_cifar(56, 1, 16, num_classes=100, is_separate=True)\n# net5 = net4.to(device)\n\n# for p in net.parameters():\n# print(p.nelement())\nprint(net)\nprint(sum(p.numel() for p in net.parameters() if p.requires_grad))\n\nif args.resume:\n\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/%s'%args.resume)\n# checkpoint = torch.load('./checkpoint/resnext_56_1_16_167.t7')\n# checkpoint2 = torch.load('./checkpoint/resnext_56_1_16_re2_198.t7')\n# checkpoint5 = torch.load('./checkpoint/resnext_56_1_16_re5_157.t7')\n# checkpoint3 = torch.load('./checkpoint/resnext_56_1_16_re3_155.t7')\n# checkpoint4 = torch.load('./checkpoint/resnext_56_1_16_re4_159.t7')\n \n# for key in checkpoint['net'].keys():\n# if 'layer' in key and 'num_batches_tracked' not in key:\n# checkpoint['net'][key] = torch.cat([checkpoint['net'][key], checkpoint2['net'][key], checkpoint3['net'][key], checkpoint4['net'][key]], 0)\n# x = checkpoint['net'][key]\n# shape = x.shape\n# shape[0] = shape[0]//4\n# r = random.randint(0, shape[0]*3)\n# checkpoint['net'][key] = torch.cat([x, x, x, x], 0)\n# tensor = torch.randn(checkpoint['net'][key].shape)/50\n# checkpoint['net'][key] = checkpoint['net'][key].to('cpu')+tensor\n# substring = key.split('device_')\n# from_key = substring[0]+'device_1'+substring[1][1:] if len(substring)>=2 else key\n# if key!=from_key:\n# checkpoint['net'][key] = checkpoint['net'][from_key]\n\n\n net.load_state_dict(checkpoint['net'])\n# net2.load_state_dict(checkpoint2['net'])\n# net3.load_state_dict(checkpoint3['net'])\n# net4.load_state_dict(checkpoint4['net'])\n# net5.load_state_dict(checkpoint5['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\n# print(checkpoint['net'].keys())\n# sys.exit()\n\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\n# Training\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n log_value('train_loss', train_loss/(batch_idx+1), epoch)\n log_value('train_accuracy', correct/total, epoch)\n\ndef test(epoch):\n global best_acc\n net.eval()\n# net2.eval()\n# net3.eval()\n# net4.eval()\n# net5.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n# outputs = (net(inputs)+net2(inputs)+net3(inputs)+net5(inputs))\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n log_value('test_loss', test_loss/(batch_idx+1), epoch)\n log_value('test_accuracy', correct/total, epoch)\n\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n if epoch > args.schedule*2:\n print('Saving..')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n net.to('cpu')\n torch.save(state, './checkpoint/%s_%d.t7'%(args.save, epoch))\n net.to(device)\n best_acc = acc\n\nconfigure('cv/'+args.save+'/log', flush_secs=5)\nfor epoch in range(args.epoch):\n \n# train(epoch)\n test(epoch)\n schedule = args.schedule\n \n if epoch in [schedule, schedule*2, schedule*3]:\n optimizer.param_groups[0]['lr'] /= 10\n\n print('now learning rate is %.4f'%optimizer.param_groups[0]['lr'])\n \n\n#python3 main.py --lr 0.1 --split 1 --epoch 200 --batch 128 --schedule 50 --cuda 1 --save resnext_56_2_16","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"205209115","text":"import torch.utils.data as data\nimport torch\nfrom torchvision import transforms\nimport torchvision.datasets as dset\nimport nltk\nimport numpy as np\nfrom torchvision import transforms\nimport torchvision.datasets as dset\nfrom PIL import Image, ImageDraw\n\nclass CocoCaptionDetection(data.Dataset):\n def __init__(self, root, captionAnnFile, instanceAnnFile, vocab, transform=None, target_transform=None):\n self.name = \"CocoCaptionDetection\"\n self.vocab = vocab\n self.captionSet = dset.CocoCaptions(root = root, annFile = captionAnnFile,\n transform=transforms.ToTensor())\n self.instanceSet = dset.CocoDetection(root = root, annFile = instanceAnnFile,\n transform=transforms.ToTensor())\n # assert len(self.captionSet) == len(self.instanceSet)\n assert self.captionSet.coco.imgs.keys() == self.instanceSet.coco.imgs.keys()\n\n # instances = json.load(open('/net/zf14/xy4cm/Projects/words2text/neuraltalk2/coco/annotations/instances_train2014.json','r'))\n # category_ids = [cat['id'] for cat in instances['categories']]\n coco_category_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27,\n 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53,\n 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80,\n 81, 82, 84, 85, 86, 87, 88, 89, 90]\n self.coco_category_id_map = {}\n for i in range(len(coco_category_ids)):\n self.coco_category_id_map[coco_category_ids[i]] = i\n\n def __getitem__(self, index, with_img=False):\n img, captions = self.captionSet[index]\n img_detection, target_detection = self.instanceSet[index]\n\n # make sure annotations refer to the same image\n if not torch.equal(img_detection, img):\n raise ValueError(\"annotations not refering to the same image\")\n\n # convert to percentage coords\n height, width = img.size()[1:]\n labels = np.array([self.coco_category_id_map[t['category_id']] for t in target_detection])\n # if there is no label put a dummy label\n if len(labels) == 0:\n labels = np.array([0])\n\n boxes = np.array([t['bbox'] for t in target_detection])\n if boxes.shape[0] == 0:\n boxes = np.array([[0.0, 0.0, width, height]])\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1]\n boxes[:, 0] /= width\n boxes[:, 2] /= width\n boxes[:, 1] /= height\n boxes[:, 3] /= height\n assert np.all(boxes <= 1)\n target_detection = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n\n caption = captions[np.random.randint(len(captions))]\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(self.vocab(''))\n caption.extend([self.vocab(token) for token in tokens])\n caption.append(self.vocab(''))\n target_caption = torch.LongTensor(caption)\n\n if with_img:\n return img, target_caption, target_detection\n return target_caption, target_detection\n\n def __len__(self):\n return len(self.captionSet)\n\n def show_item(self, index):\n img, target_caption, target_detection = self.__getitem__(index, True)\n height, width = img.size()[1:]\n print(\"Image Size: \", img.size())\n print(target_caption)\n imgshow = transforms.ToPILImage()(img)\n draw = ImageDraw.Draw(imgshow)\n for box in target_detection[:, :4]:\n draw.rectangle([box[0]*width, box[1]*height, box[2]*width, box[3]*height])\n imgshow.show()\n\n\ndef collate_caption(data):\n \"\"\"Creates mini-batch tensors from the list of tuples (image, caption).\n\n We should build custom collate_fn rather than using default collate_fn,\n because merging caption (including padding) is not supported in default.\n Args:\n data: list of tuple (image, caption).\n - image: torch tensor of shape (3, 256, 256).\n - caption: torch tensor of shape (?); variable length.\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n targets: torch tensor of shape (batch_size, padded_length).\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length (descending order).\n data.sort(key=lambda x: len(x[0]), reverse=True)\n # images, captions, detections = zip(*data)\n captions, detections = zip(*data)\n\n detection_targets = [torch.FloatTensor(d) for d in detections]\n # Merge images (from tuple of 3D tensor to 4D tensor).\n # images = torch.stack(images, 0)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n caption_targets = torch.LongTensor(len(captions), max(lengths)).zero_()\n for i, cap in enumerate(captions):\n end = lengths[i]\n caption_targets[i, :end] = cap[:end]\n # return images, targets, lengths, detections\n return caption_targets, lengths, detection_targets","sub_path":"coco_dataset.py","file_name":"coco_dataset.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"55563676","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# 运算\na = 21\nb = 23\nc = 88\n\nc = a + b\nprint (c)\n\nc = a/b\nprint (float(c))\n\nc = a*b\nd = (a + b)*c - 1 # 运算符\nprint (c)\nprint (d)\n\n\n# 比较\na = 21\nb = 23\nc = 20\n\nif a == b:\n print (\"a equle b\")\n\nelse:\n print (\"a do not equal b\")\n","sub_path":"Learn/function/Calculate/Calculate.py","file_name":"Calculate.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"439558889","text":"#===============================================================================\n# DESCRIPTION\n# Contain the statistical model to perform the downscalling\n#===============================================================================\n\n# Todo\n# -> Thinking about merging other codes function into this module\n\n#===============================================================================\n# Library\n#===============================================================================\n\n\n# from mailib.stanet.lcbstanet\n\n# from stadata.lib.LCBnet_lib import *\nfrom scipy.optimize import curve_fit\nimport statsmodels.api as sm\n\nfrom sklearn.decomposition import PCA, KernelPCA\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import QuantileTransformer\nfrom scipy.stats import pearsonr\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport numpy as np\n\nfrom mailib.model.nn.structures import RNN_ribeirao, DNN_ribeirao\nfrom mailib.toolbox.tools import get_df_lags\nfrom mailib.esd.quantile_maping_santander import bias_correction\n\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\nfrom mailib.esd.quantile_mapping import qm\n\nclass StaMod():\n \"\"\"\n Contain function and plot to perform Empirical Orthogonal Function\n \n PARAMETERS\n df: dataframe with a variable for each stations data\n AttSta: att_sta_object with the attribut of the stations\n \"\"\"\n def __init__(self, df, AttSta):\n self.df = df\n \n self.AttSta = AttSta\n \n self.nb_PC = []\n self.eigenvalues = [] # or explained variance\n self.eigenvectors = [] # or loadings\n self.eigenpairs = [] # contain the eigen pairs of the PCA (scores and loadings)\n self.scores = pd.DataFrame([]) # contain the PC scores\n self.params_loadings =[] # contains the fit parameters for the PC loadings\n self.params_scores = [] # contains the fit parameters for the PC scores \n self.topo_index = [] # contain the topographic index at each station point\n self.standard = False # flag to see if the input data has been standardize\n self.scores_model = {} # contain the models for the scores\n self.loadings_model = {} # contain the models for the loadings\n\n def pca_transform(self, nb_PC=4,remove_mean0=False,remove_mean1=False, \n standard = False, sklearn=False,sklearn_kernel=False, cov=True):\n \"\"\"\n Perform the Principal component analysis with SKlearn\n using singular value fft\n The dataframe is standardize\n \n \n \n parameters:\n standard: default = True, standardize the dataframe\n nb_PC: default = 4, number of principal components to be used\n sklearn: if True (default=False) use svd by sklearn\n cov: if true (by default) sue the correlation matrix to perform the PCA analysis\n \n Stock in the object\n Dataframe with:\n eigenvalues\n eigenvectors\n scores\n list of vectors:\n eigenpairs\n \n NOTE:\n By default sklearn remove the mean from the dataset. So I cant use it to perform the downscalling\n \n References:\n http://sebastianraschka.com/Articles/2015_pca_in_3_steps.html#projection-onto-the-new-feature-space\n \"\"\"\n df = self.df\n self.nb_PC = nb_PC\n\n if remove_mean0:\n print('remove_mean0')\n df = df.subtract(df.mean(axis=0), axis='columns')\n\n if remove_mean1:\n print('remove_mean1')\n df = df.subtract(df.mean(axis=1), axis='index')\n print(df)\n \n if standard:\n # standardize\n# df_std = StandardScaler().fit_transform(df)\n self.standard = True\n df = (df - df.mean(axis=0)) / df.std(axis=0) # another way to standardise\n \n \n #=======================================================================\n # Sklearn\n #=======================================================================\n if sklearn:\n print(\"o\"*80)\n print(\"SVD sklearn used\")\n print(\"o\"*80)\n\n if sklearn_kernel:\n print('sklearn_kernel')\n pca = KernelPCA(nb_PC, kernel=\"rbf\", fit_inverse_transform=True, gamma=10)\n \n #Create a PCA model with nb_PC principal components\n else:\n pca = PCA(nb_PC)\n # fit data\n pca.fit(df)\n \n #Get the components from transforming the original data.\n scores = pca.transform(df) # or PCs\n eigenvalues = pca.explained_variance_\n eigenvectors = pca.components_ # or loading \n \n # Make a list of (eigenvalue, eigenvector) tuples \n self.eigpairs = [(np.abs(self.eigenvalues[i]), self.eigenvector[i,:]) for i in range(len(self.eigenvalues))]\n\n #=======================================================================\n # Covariance Matrix\n #=======================================================================\n if cov:\n print(\"o\"*80)\n print(\"Covariance used\")\n print(\"o\"*80)\n \n X = df.values\n cov_mat = np.cov(X.T)\n eigenvalues, eigenvectors = np.linalg.eig(cov_mat)\n\n scores = X.dot(eigenvectors)\n scores = pd.DataFrame(scores, columns = np.arange(1,len(df.columns)+1), index=df.index)\n eigenvalues = pd.Series(eigenvalues, index= np.arange(1,len(df.columns)+1))\n eigenvectors = pd.DataFrame(eigenvectors.T, columns=df.columns, index=np.arange(1,len(df.columns)+1))\n\n\n self.scores = scores.iloc[:, 0:nb_PC]\n self.eigenvalues = eigenvalues#[0:nb_PC]\n self.eigenvectors = eigenvectors[0:nb_PC]\n\n\n tot = sum(eigenvalues)\n self.var_exp = [(i / tot)*100 for i in sorted(eigenvalues, reverse=True)]\n\n\n def pca_reconstruct(self, pcs = None):\n \"\"\"\n Reconstruct the original dataset with the \"nb_PC\" principal component\n Note:\n The idea is to reconstruct by hand to see if the downscalling is done correctly\n \n pcs: PCs to use to reconstruct the data\n \"\"\"\n eigenvectors = self.eigenvectors\n scores = self.scores\n \n# df = pd.DataFrame(columns=eigenvectors.columns, index=scores.index)\n\n if pcs == None:\n pcs = scores.columns\n\n dfs_pcs = {}\n\n for pc in pcs:\n print(pc)\n loads = pd.concat([eigenvectors.loc[pc,:] for i in range(len(scores.loc[:,pc]))], axis=1).T # loading matrix\n loads.index = scores.loc[:,pc].index\n \n sc = pd.concat([scores.loc[:,pc] for i in range(len(eigenvectors.loc[pc,:]))], axis=1) # scores matrix\n sc.columns = loads.columns\n df = sc.multiply(loads)\n dfs_pcs[pc] = df\n# for sta in eigenvectors.columns:\n# for i, PC in enumerate(pcs):\n# scores[PC]*eigenvectors[sta][PC]\n# else:\n# df[sta] = df[sta] + scores[PC]*eigenvectors[sta][PC]\n# print df\n return dfs_pcs\n\n# def curvfit_loadings(self, predictors=None, fit=None):\n# \"\"\"\n# DESCRIPTION\n# Fit the loadings of the principal components with input independant variable\n# (In theapplication of the LCB it is most probably the altitude or its derivative)\n# RETURN\n# The parameters of the linear regression\n# predictors: topographic parameters to fit the loadings\n# namerasters = [\n# \"8x8\",\n# \"topex\",\n# \"xx8_10000_8000____\",\n# \"xx8_20000_18000____\"\n# ]\n# params_sep: list of parameters value to perfom 2 linear regression\n# curvfit: type of fit that you want to use, linear or poly\n# \"\"\"\n#\n#\n# if not predictors:\n# params = ['Alt']* self.nb_PC\n#\n# if not fit:\n# fit = [lin]* self.nb_PC\n#\n# fit_parameters = []\n#\n# for PC, row in self.eigenvectors.iterrows():\n# X = np.array(self.AttSta.getatt(self.df.keys(), predictors[PC - 1]))\n# self.topo_index.append(X)\n#\n#\n# popt, pcov = curve_fit(fit[PC-1], X, row)\n# fit_parameters.append([x for x in popt])\n#\n# # print fit_parameters\n# # fit_parameters = np.vstack(fit_parameters)\n# # print\n# self.predictors = [pd.DataFrame(fit_parameters, index =range(1,self.nb_PC+1), columns = range(4))]\n# self.curvfit_loadings = fit\n# return self.params_loadings\n#\n# def curvfit_scores(self, predictors, fit=None):\n# \"\"\"\n# DESCRIPTION\n# Fit the Scores of the principal components with a variables\n# Input\n# A serie\n# Return\n# The parameters of the linear regression\n#\n# \"\"\"\n#\n# if not fit:\n# fit = [lin]* self.nb_PC\n#\n# scores = self.scores\n#\n# fit_parameters = []\n# for i, predictor in enumerate(predictors):\n# predictor = predictor.dropna(axis=0, how='any')\n# score = scores.iloc[:,i]\n#\n# df = pd.concat([predictor, score], axis=1, join='inner')\n#\n# popt, pcov = curve_fit(fit[i-1], df.iloc[:,0], df.iloc[:,1])\n#\n# fit_parameters.append([x for x in popt])\n#\n# fit_parameters = np.vstack(fit_parameters)\n# self.params_scores = [pd.DataFrame(fit_parameters, index =range(1,self.nb_PC+1), columns = range(len(popt)))]\n# self.curvfit_scores = fit\n# return self.params_scores\n#\n# def curvfit_predict(self, predictors_loadings, predictors_scores, params_loadings=None, params_scores=None):\n# \"\"\"\n# DESCRIPTION\n# Return an 1/2d array estimated with the previously fit's parameters and predictos\n# RETURN\n# a dictionnary of 3D numpy array containing the estimated \"loadings\", \"scores\" and \"reconstructed variable\"\n# INPUT\n#\n# predictors_loadings: 1/2d array, to be used with the loading\n# parameters to create a loading 2d array\n# predictors_scores: Serie, to be used with the scores params to reconstruct the scores\n#\n# params_loadings: (Optional) A dataframe of the parameters of the linear regression from\n# curvfit_loadings. By default, the methods will look for params_loadings in the object.\n# If it does not exist you should run curvfit_loadings\n# parms_scores: (Optional) Parameters dataframe from curvfit_scores. By default, the methods will look for params_loadings in the object.\n# If it does not exist you should run curvfit_scores\n#\n# TODO\n# I should implement a way to use a pandas serie for the input \"predictor instead of a number\n# \"\"\"\n#\n# loadings = []\n# scores = []\n# predicted = []\n#\n# curvfit_loadings = self.curvfit_loadings\n# curvfit_scores = self.curvfit_scores\n#\n#\n# if (not params_loadings or not params_scores):\n# print 'Getting parameters'\n# params_loadings, params_scores = self.get_params()\n#\n#\n# # NEED TO IMPLEMENT MATRIX MULTIPLICATION!!!!!!!!!!!!!! I use to much loop\n# for PC_nb, fit_loading, fit_score, predictor_loadings, predictor_scores in zip(range(1,self.nb_PC+1),curvfit_loadings, curvfit_scores,predictors_loadings,predictors_scores ):\n# loading_est = fit_loading(predictor_loadings, *params_loadings[0].loc[PC_nb,:])\n# score_est =fit_score(predictor_scores, *params_scores[0].loc[PC_nb,:])\n#\n# score = pd.concat([score_est]*len(loading_est), axis=1)\n# predict= score.multiply(loading_est)\n#\n# loadings.append(loading_est)\n# scores.append( score_est)\n# predicted.append( predict)\n#\n# loadings = np.array(np.dstack(loadings))\n# scores = np.array(np.dstack(scores))\n# predicted = np.array(np.dstack(predicted))\n#\n#\n# res = {'loadings':loadings, 'scores':scores, 'predicted': predicted}\n# return res\n \n def fit_curvfit(self, X, Y, fits=None):\n \"\"\"\n fit using curvfit\n \n Parameters: \n X, a list of dataframes\n Y, a dataframe (scores or loading to fit)\n fits, a list of list witht the function to fit\n \n \"\"\"\n\n res_predictors_name = []\n res_fits = []\n res_fit_parameters = []\n for i in range(len(Y.keys())):\n\n x = X[i]\n y = Y.iloc[:,i]\n fit = fits[i]\n\n popt, pcov = curve_fit(fit, x.values, y.values)\n\n try:\n res_predictors_name.append([x.columns])\n except AttributeError:\n res_predictors_name.append([x.name])\n res_fit_parameters.append([popt])\n res_fits.append(fit)\n\n# fit_parameters = np.vstack(fit_parameters)\n res_predictors_name = pd.DataFrame(res_predictors_name, index =Y.keys())\n # res_predictors_name.index = Y.keys()\n res_fit_parameters = pd.DataFrame(res_fit_parameters, index =Y.keys())\n res_fits = pd.DataFrame(res_fits, index =Y.keys())\n\n df_models = pd.concat([res_predictors_name, res_fits, res_fit_parameters],axis=1)\n df_models.columns = ['predictor', 'model','params' ]\n df_models.index = Y.keys() #range(1,len(Y.keys())+1 )\n\n return df_models \n \n# def curvfit_skill(self, df_verif, predictors_scores, metrics, params_loadings=None, params_scores=None):\n# \"\"\"\n# DESCRIPTION\n# Compute bias and RMSE to assess the model performance\n# INPUT\n# df_verif: dataframe with the observed values\n# predictors: a list of pandas series which contains the predictors for the scores SHOULD NOT BE A LIST\n# metrics: sklearn metric function to be used\n# see: http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics\n# example:\n# metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function\n# metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss\n# metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss\n# metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss\n# metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.\n# \"\"\"\n\n# if (not params_loadings or not params_scores):\n# params_loadings, params_scores = self.get_params()\n#\n# topo_index = self.topo_index\n# data = np.array([])\n# res = self.predict(topo_index, predictors_scores)\n# data = res['predicted'].sum(axis=2)\n# df_rec = pd.DataFrame(data, columns = df_verif.columns, index = predictors_scores[0].index) # should improve this\n#\n# score = pd.Series()\n#\n# for sta in df_rec:\n# df = pd.concat([df_verif[sta],df_rec[sta]], axis=1, join='inner')\n# df = df.dropna(axis=0)\n# # df.columns=['True', 'Pred']\n# df.plot()\n# plt.show()\n# score[sta] = metrics(df.iloc[:,0], df.iloc[:,1])\n# return score\n def get_curvfit_params(self):\n \"\"\"\n DESCRIPTION\n Return the params loadings and scores. and return and error if the model has not being fitted\n \"\"\"\n \n try:\n params_loadings = self.params_loadings\n params_scores = self.params_scores\n except AttributeError:\n raise AttributeError( \"The model has not been fitted, run curvfit_loadings or curvfit_scores\")\n \n return params_loadings, params_scores\n\n def plot_exp_var(self, output=None):\n \"\"\"\n DESCRIPTION\n Make a plot of the variance explaine by the principal components\n \"\"\"\n print(\"Plot explained variance\")\n var_exp = self.var_exp\n cum_var_exp = np.cumsum(var_exp)\n\n var_exp = var_exp[:self.nb_PC]\n idxs = range(1,self.nb_PC+1)\n\n\n ax = plt.figure(figsize=(6, 4)).gca()\n \n plt.bar(idxs, var_exp, alpha=0.5, align='center',\n label='individual explained variance')\n plt.step(idxs, cum_var_exp[:self.nb_PC], where='mid',\n label='cumulative explained variance')\n ax.set_xticks(idxs)\n ax.set_xticklabels(idxs)\n plt.ylabel('Explained variance (%)')\n plt.xlabel('Principal components')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.grid(True, color='0.5')\n \n if output:\n plt.savefig(output)\n plt.close()\n else:\n plt.show()\n\n def plot_loading(self, params_topo=None, params_fit=None, output=False, fit=None):\n \"\"\"\n DESCRIPTION\n Plot the loadings in function of some parameters\n Parameters\n output: if given save the plot at the path indicated\n params: parameters of the linear regression beetween\n loadings and independant variables \n \"\"\"\n\n if not params_topo:\n params_topo = ['Alt']* self.nb_PC\n \n if not fit:\n fit = [None]* self.nb_PC\n \n \n for pc_nb, param_topo, func in zip(range(1,self.nb_PC+1), params_topo, fit):\n elev_real = self.AttSta.getatt(self.df.keys(),param_topo) \n fig, ax = plt.subplots()\n plt.scatter(self.eigenvectors.loc[pc_nb], elev_real, s=20)\n\n if isinstance(params_fit, pd.DataFrame):\n x = np.linspace(min(elev_real), max(elev_real),100)\n p = params_fit.loc[pc_nb,:].dropna()\n \n y = func(x, *p)\n\n plt.plot(y,x)\n plt.xlabel('PC'+str(pc_nb)+' loadings')\n plt.ylabel(\"Altitude (m)\")\n plt.grid(True, color='0.5')\n\n\n for i, txt in enumerate(self.df.columns):\n ax.annotate(txt, (self.eigenvectors.loc[pc_nb][i], elev_real[i]))\n if output:\n plt.savefig(output+str(pc_nb)+'.pdf', transparent=True)\n\n plt.show()\n \n def plot_scores(self, predictors, params_fit =None,fit=None, output=False):\n \"\"\"\n DESCRIPTION\n Make a scatter plot of the Principal component scores in function of another variables\n INPUT\n var: time serie with the same index than the dataframe sued in the pca\n \"\"\"\n \n \n scores = self.scores\n \n for i, predictor, func in zip(range(0,self.nb_PC), predictors, fit):\n predictor = predictor.dropna(axis=0, how='any')\n score = scores.iloc[:,i]\n df = pd.concat([predictor, score], axis=1, join='inner')\n\n plt.scatter(df.iloc[:,0],df.iloc[:,1] )\n \n x = np.linspace(min(predictor), max(predictor),100)\n p = params_fit.loc[i+1,:]\n y = func(x, *p)\n \n plt.figure()\n plt.plot(x,y)\n plt.grid(True)\n plt.show()\n\n if output:\n plt.savefig(output)\n else:\n plt.show()\n \n def plot_scores_ts(self, output=False):\n \"\"\"\n DESCRIPTION\n Plot scores time serie\n Parameters\n output: if given save the plot at the path indicated\n \"\"\"\n scores = self.scores\n scores.plot(subplots=True)\n plt.xlabel('Time')\n plt.ylabel(\"PCs time series\")\n plt.grid(True, color='0.5')\n if output:\n plt.savefig(output)\n else:\n plt.show()\n\n def predict_model(self, predictors_loadings, predictors_scores=None,\n model_loadings=None, model_scores=None, model_loading_curvfit=None, model_scores_curvfit=None,\n observed_scores = False, dnn_model_scores =None, rnn_model_scores =None, use_predict_func_loading = None,\n nb_PC = None, qm_maps=None,obs_scores=False, idx_test=None, idx_train=None, eqm=False):\n\n \"\"\"\n\n\n :param predictors_loadings:\n :param predictors_scores:\n :param model_loadings:\n :param model_scores:\n :param model_loading_curvfit:\n :param model_scores_curvfit:\n :param observed_scores: Use the observed scores instead of the modeled scores for the reconstruction\n :param dnn_model_scores:\n :param use_predict_func_loading:\n :param nb_PC:\n :return:\n \"\"\"\n\n\n# for PC_nb, fit_loading, predictor_loadings in zip(range(1,self.nb_PC+1),fit_loadings,predictors_loadings):\n# -\n# + print PC_nb\n# p = params_loadings[0].loc[PC_nb,:].dropna()\n# p = params_loadings[0].loc[PC_nb,:].dropna()\n# loading_est = fit_loading(predictor_loadings, *p)\n# loading_est = fit_loading(predictor_loadings, *p)\n\n loadings = []\n scores = []\n predicted = []\n\n for PC_nb in range(1,nb_PC+1):\n\n if not isinstance(obs_scores, type(False)):\n index = obs_scores.index\n y_scores = obs_scores.values\n y_scores = y_scores[:, PC_nb-1]\n else:\n\n # get the predictors\n x_scores = predictors_scores.loc[:, model_scores['predictor'][PC_nb]] # TOdo warning a bug on the selection of predicotors\n model_scores_pc = model_scores['model'][PC_nb]\n\n # get the models\n x_loadings = predictors_loadings.loc[:, model_loadings['predictor'][PC_nb]]\n model_loading_pc = model_loadings['model'][PC_nb]\n\n if model_scores_curvfit:\n pass\n elif dnn_model_scores:\n\n x_scores = pd.DataFrame(model_scores_pc.scaler_x.transform(x_scores),index=x_scores.index, columns=x_scores.columns) # scale as for training\n\n y_scores = model_scores_pc.predict(x_scores)\n y_scores = model_scores_pc.scaler_y.inverse_transform(y_scores).flatten()\n\n index = x_scores.index\n\n elif rnn_model_scores:\n\n x_scores = pd.DataFrame(model_scores_pc.scaler_x.transform(x_scores), index=x_scores.index,\n columns=x_scores.columns) # scale as for training\n # preprocessing for RNN\n X_lag = get_df_lags(x_scores, lags=range(model_scores_pc.sequence_length))\n index = X_lag.index\n # Y = Y.loc[X_lag.index, :] # get same index\n X_lag = X_lag.values.reshape(X_lag.shape[0], model_scores_pc.sequence_length, int(X_lag.shape[1] / model_scores_pc.sequence_length))\n\n y_scores = model_scores_pc.predict(X_lag)\n y_scores = model_scores_pc.scaler_y.inverse_transform(y_scores).flatten()\n\n elif not isinstance(obs_scores, pd.DataFrame):\n\n if model_scores_pc.k_constant == 1 :\n x_scores = sm.add_constant(x_scores)\n\n if isinstance(observed_scores, pd.DataFrame): # use observed score instead of predicted scores\n y_scores = observed_scores.loc[:,PC_nb].values\n else:\n y_scores = model_scores_pc.predict(x_scores)\n index = x_scores.index\n\n # Model loadings\n if model_loading_curvfit:\n y_loading = model_loadings['model'][PC_nb](x_loadings.values, *model_loadings['params'][PC_nb])\n elif use_predict_func_loading:\n pass\n else:\n if model_loading_pc.k_constant == 1 :\n x_loadings = sm.add_constant(x_loadings)\n y_loading = model_loading_pc.predict(x_loadings)\n\n if eqm:\n obs_sc= self.scores\n sim_sc = y_scores\n y_scores = bias_correction(obs_sc.loc[idx_train,PC_nb], sim_sc.loc[idx_train],\n sim_sc.loc[idx_test], method='eqm' ,extrapolate='constant', nbins=50)\n index = idx_test\n print('eqm')\n\n\n if qm_maps != None:\n\n qmap = qm_maps[PC_nb]\n old_y_scores = y_scores\n y_scores = qmap.map(old_y_scores)\n\n y_loading_test = np.array([y_loading for l in range(len(y_scores))])\n\n\n predict= y_loading_test * y_scores[:,None]\n loadings.append(y_loading)\n scores.append( y_scores)\n predicted.append( predict)\n \n loadings = np.array(np.dstack(loadings))\n scores = np.array(np.dstack(scores))\n predicted = np.array(np.dstack(predicted))\n\n # K.clear_session()\n # gc.collect()\n\n res = {'loadings':loadings, 'scores':scores, 'predicted': predicted,'index':index}\n return res\n\n def stepwise_model(self, df_PCs, predictors, lim_nb_predictors=None, constant=True, log=False, manual_selection=None):\n \"\"\"\n Find best linear predictors to fit the scores\n Linear model designed by forward selection.\n \n Parameters:\n -----------\n data : pandas DataFrame with all possible predictors and response\n \n response: string, name of response column in data\n \n log: True, print a log of the score and associated candidate\n \n Returns:\n --------\n model: an \"optimal\" fitted statsmodels linear model\n with an intercept\n selected by forward selection\n evaluated by adjusted R-squared\n \"\"\"\n models = []\n predictors_name = []\n params = []\n dic_log ={} # contain the log\n\n # remove columns not float\n predictors = predictors.select_dtypes(exclude=['object'])\n\n print(\"O\"*10)\n if lim_nb_predictors:\n print(\"Number of predictors limited to \" + str(lim_nb_predictors))\n print(\"O\"*10)\n \n for column in df_PCs:\n dic_log[column] = {}\n\n pc = df_PCs.loc[:,column]\n remaining = set(predictors.columns)\n\n selected = []\n best_score, new_best_score = 0.0, 0.0\n\n print(\"==\"*20)\n print(\"Pc\"+str(column))\n print(\"==\"*20)\n\n if not manual_selection:\n for i in range(lim_nb_predictors):\n scores_with_candidates = []\n log_pearson_with_candidates = []\n for candidate in remaining:\n x = predictors[selected + [candidate]]\n if constant:\n x = sm.add_constant(x)\n\n y = pc\n\n score = sm.OLS(y.values,x.values).fit().rsquared_adj\n log_pearson_with_candidates.append([pearsonr(x.values[:,1],y.values),candidate])\n # score = sm.OLS(y.values,x.values).fit().rsquared\n scores_with_candidates.append([score, candidate])\n\n scores_with_candidates = pd.DataFrame(scores_with_candidates, columns = ['score', \"candidate\"])\n scores_with_candidates.sort_values('score', ascending=False, inplace=True)\n\n log_pearson_with_candidates= pd.DataFrame(log_pearson_with_candidates, columns = ['score', \"candidate\"])\n log_pearson_with_candidates.sort_values('score', ascending=False, inplace=True)\n\n new_best_score, best_candidate = scores_with_candidates.iloc[0,:]\n\n if log:\n dic_log[column][str(i)] = log_pearson_with_candidates\n\n if best_score < new_best_score:\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n print(selected)\n print(new_best_score)\n best_score = new_best_score\n else:\n selected = manual_selection[column]\n\n if selected == []: # if the stepwise regression was not able to select variables to create a model\n model = np.nan\n else:\n if constant:\n model = sm.OLS(pc.values, sm.add_constant(predictors[selected].values)).fit()\n print(model.rsquared_adj)\n else:\n model = sm.OLS(pc.values, predictors[selected].values).fit()\n params.append(model.params)\n models.append(model)\n predictors_name.append(selected)\n \n \n predictors_name = pd.Series(predictors_name)\n models = pd.Series(models)\n params = pd.Series(params)\n\n\n\n print(\"Saving\")\n df_models = pd.concat([predictors_name,models, params],axis=1)\n df_models.columns = ['predictor', 'model', 'params']\n df_models.index = range(1,len(df_PCs.columns)+1 )\n print(\"Done\")\n if log:\n return df_models, dic_log\n else:\n return df_models\n\n def skill_model(self, df_obs, pred, metrics, use_bias=None, hours=False, plot_summary=False, summary=None, mean_data=False, type='predicted'):\n \"\"\"\n DESCRIPTION\n Compute bias and RMSE to assess the model performance \n INPUT\n df_verif: dataframe with the observed values\n predictors: a list of pandas series which contains the predictors for the scores SHOULD NOT BE A LIST\n metrics: sklearn metric function to be used\n see: http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics\n example:\n metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function\n metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss\n metrics.mean_squared_error(y_true, y_pred[, ...]) Mean squared error regression loss\n metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss\n metrics.r2_score(y_true, y_pred[, ...]) R^2 (coefficient of determination) regression score function.\n summary: True, print the mean statistics\n \"\"\"\n # if (not params_loadings or not params_scores):\n # params_loadings, params_scores = self.get_curvfit_params()\n\n if type == 'predicted':\n data = pred['predicted'].sum(axis=2)\n else:\n data = pred\n \n if isinstance(mean_data, pd.core.series.Series):\n df_rec = pd.DataFrame(data, columns=df_obs.columns, index=pred['index']) # should improve this\n df_rec = df_rec.add(mean_data,axis=0)\n df_obs = df_obs.add(mean_data,axis=0)\n else:\n df_rec = pd.DataFrame(data, columns = df_obs.columns, index =pred['index']) # should improve this\n \n if not hours:\n hours = df_rec.index.hour\n hours = sorted(hours)\n hours = list(set(hours))\n hours = [str(str(hour)+':00').rjust(5, '0') for hour in hours]\n\n score = pd.DataFrame(columns= df_rec.columns)\n\n # for hour in hours:\n for sta in df_rec:\n df = pd.concat([df_obs[sta], df_rec[sta]], axis=1, join='inner')\n # df = df.between_time(hour,hour)\n df = df.dropna(axis=0)\n\n if metrics == pearsonr:\n score.loc[0,sta] = metrics(df.iloc[:,0], df.iloc[:,1])[0]\n elif use_bias == True:\n bias = df.iloc[:,0] - df.iloc[:,1]\n score.loc[0, sta] = bias.mean(axis=0)\n else:\n score.loc[0, sta] = metrics(df.iloc[:, 0], df.iloc[:, 1])\n\n if summary:\n score.loc['Total_hours',:] = score.mean(axis=0)\n score.loc[:,'Total_stations'] = score.mean(axis=1)\n if plot_summary:\n plt.figure()\n c = plt.pcolor(score, cmap=\"viridis\")\n cbar = plt.colorbar()\n cbar.ax.tick_params(labelsize=14) \n# show_values(c)\n plt.title(\"Validation summary\")\n# print type(score)\n# sns.heatmap(score)\n plt.yticks(np.arange(0.5, len(score.index), 1), score.index, fontsize=14)\n plt.xticks(np.arange(0.5, len(score.columns), 1), score.columns, fontsize=14,rotation='vertical')\n plt.show()\n print(score)\n return score\n\n def hovermoller_combined(self, dfs_pcs_combined, stanames_LCB=None):\n \n for pc in dfs_pcs_combined.keys():\n \n var = dfs_pcs_combined[pc].loc[:,'T'].groupby(lambda x: x.hour).mean()\n U = dfs_pcs_combined[pc].loc[:,'U'].groupby(lambda x: x.hour).mean()\n V = dfs_pcs_combined[pc].loc[:,'V'].groupby(lambda x: x.hour).mean()\n \n AttSta = self.AttSta\n if not stanames_LCB:\n stanames_LCB = AttSta.get_sta_id_in_metadata(values = ['Head'])\n \n print(stanames_LCB)\n var.columns = stanames_LCB\n U.columns = stanames_LCB\n V.columns = stanames_LCB\n \n sort = AttSta.sortsta(stanames_LCB, 'Lon')\n sorted_sta = sort['stanames']\n sorted_lon = sort['metadata']\n position, time = np.meshgrid(sorted_lon, var.index)\n \n levels_contour=np.linspace(var.min().min(),var.max().max(),100)\n levels_colorbar=np.linspace(var.min().min(),var.max().max(),10)\n \n cmap = plt.cm.get_cmap(\"RdBu_r\")\n fig = plt.figure()\n \n cnt = plt.contourf(position, time, var.loc[:,sorted_sta].values, levels = levels_contour)\n for c in cnt.collections:\n c.set_edgecolor(\"face\")\n \n cbar = plt.colorbar(ticks=levels_colorbar)\n plt.quiver(position,time,U.values,V.values)\n # cbar.ax.tick_params()\n # qk = plt.quiverkey(a, 0.9, 1.05, 1, r'$1 \\frac{m}{s}$',\n # labelpos='E',\n # fontproperties={'weight': 'bold'})\n cbar.set_label('Some Units')\n plt.ylabel(r\"Hours\")\n plt.xlabel( r\"Longitude (Degree)\")\n plt.grid(True, color=\"0.5\")\n plt.tick_params(axis='both', which='major')\n # plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n \n plt.title(\"Principal component \" + str(pc))\n plt.show()\n\n def skill_scores(self, scores_true, scores_pred, metrics):\n\n metric_scores = {}\n for i, pc_nb in enumerate(scores_true.columns):\n metric_scores[pc_nb]= metrics( scores_true.iloc[:, i], scores_pred[:, i])\n\n return metric_scores\n\n def dnn_model(self, X, Y, epochs=None, model_path=None):\n models =[]\n\n # Scaling\n scaler_x = MinMaxScaler(feature_range=(0, 1))\n X = pd.DataFrame(scaler_x.fit_transform(X.values), columns = X.columns, index=X.index)\n\n predictor_names = []\n\n for pc_nb in Y.columns:\n scaler_y = MinMaxScaler(feature_range=(0, 1))\n\n # qt = QuantileTransformer()\n # y = qt.fit_transform(Y.loc[:, pc_nb].values.reshape(-1, 1))\n # Y_scaled = scaler_y.fit_transform(y)\n\n\n Y_scaled = scaler_y.fit_transform(Y.loc[:, pc_nb].values.reshape(-1, 1))\n\n\n print('='*100)\n model, history = DNN_ribeirao(X, Y_scaled, epochs=epochs, model_path=model_path)\n\n # save scalers\n model.scaler_x = scaler_x\n model.scaler_y = scaler_y\n\n predictor_names.append(X.columns.tolist())\n models.append(model)\n\n df_models = pd.DataFrame([predictor_names,models]).T\n df_models.columns = ['predictor', 'model']\n df_models.index = range(1,len(Y.columns)+1 )\n print(\"Done\")\n print('2')\n\n # K.clear_session()\n # gc.collect()\n return df_models\n\n def rnn_model(self, X, Y, SEQUENCE_LENGTH=8, epochs=2, model_path=None):\n models =[]\n predictor_names = []\n # Scaling\n scaler_x = MinMaxScaler(feature_range=(0, 1))\n X = pd.DataFrame(scaler_x.fit_transform(X.values), columns = X.columns, index=X.index)\n\n # preprocessing for RNN\n X_lag = get_df_lags(X,lags=range(SEQUENCE_LENGTH))\n Y = Y.loc[X_lag.index,:] # get same index\n X_lag = X_lag.values.reshape(X_lag.shape[0], SEQUENCE_LENGTH, int(X_lag.shape[1] / SEQUENCE_LENGTH))\n\n for pc_nb in Y.columns:\n scaler_y = MinMaxScaler(feature_range=(0, 1))\n Y_scaled = scaler_y.fit_transform(Y.loc[:,pc_nb].values.reshape(-1,1))\n\n model, history = RNN_ribeirao(X_lag, Y_scaled, epochs=epochs, model_path=model_path)\n\n # save scalers\n model.scaler_x = scaler_x\n model.scaler_y = scaler_y\n\n model.sequence_length = SEQUENCE_LENGTH\n predictor_names.append(X.columns.tolist())\n models.append(model)\n df_models = pd.DataFrame([predictor_names,models]).T\n df_models.columns = ['predictor', 'model']\n df_models.index = range(1,len(Y.columns)+1 )\n print(\"Done\")\n # K.clear_session()\n # gc.collect()\n return df_models\n","sub_path":"mailib/model/statmod/statmod_lib.py","file_name":"statmod_lib.py","file_ext":"py","file_size_in_byte":38200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"435111010","text":"class Producto:\n def __init__(self,id_producto,nombre,descripcion,color,material,cantidad,ancho,alto,espesor,peso,precio,divisiones,accesorios,tipo_producto_id_tipo_producto):\n self.id_producto = id_producto\n self.nombre = nombre\n self.descripcion = descripcion\n self.color = color \n self.material = material\n self.cantidad = cantidad \n self.ancho = ancho\n self.alto = alto \n self.espesor = espesor \n self.peso = peso\n self.precio = precio\n self.divisiones = divisiones\n self.accesorios = accesorios\n self.tipo_producto_id_tipo_producto = tipo_producto_id_tipo_producto \n#prueba de clase producto\n#auxProducto = Producto(0,\"puerta\",\"puerta de madera nativa\",\"rojo\",\"rauli\",2,90,210,3.5,50,80000,2,\"españoleta\",\"puerta\")\n#print(auxProducto.nombre + \" \" +str(auxProducto.espesor))","sub_path":"claseProducto.py","file_name":"claseProducto.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"165013087","text":"import sys\ndef happyNumber(input_num):\n\tsequence = []\n\toriginal_num = input_num\n\tif original_num == \"1\":\n\t\treturn (\"happy 0\")\n\t\n\tsequence.append(original_num)\n\ttemp = square_digits(original_num)\n\twhile (sequence.count(temp) == 0):\n\t\tsequence.append(temp)\n\t\ttemp = square_digits(temp)\n\t\n\ttemp = sequence.pop()\n\tif temp == 1:\n\t\treturn \"happy \"+ str(len(sequence))\n\telse:\n\t\treturn \"unhappy \" + str(len(sequence)+1)\n\ndef square_digits(number):\n\tnumber_string = str(number)\n\tresult = 0\n\tfor ch in number_string:\n\t\tresult = result + int(ch)*int(ch)\n\treturn result\n\ndef main():\n\tc = input()\n\tinput_numbers = []\n\tresult = []\n\twhile c != \"\":\n\t\tinput_numbers.append(c)\n\t\tc = input()\n\t\n\tinput_numbers.reverse()\n\twhile len(input_numbers) != 0:\n\t\ttemp1 = input_numbers.pop()\n\t\ttemp2 = happyNumber(temp1)\n\t\tresult.append(temp2)\n\t\n\tresult.reverse()\n\twhile len(result) != 0:\n\t\ttemp = result.pop()\n\t\tprint(temp)\n\t\n\tprint()\nmain()\t\n","sub_path":"algorithm/happyNumber.py","file_name":"happyNumber.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"359381533","text":"from ..database import mongo\nfrom flask_login import login_required, current_user\nfrom bson import ObjectId\nfrom pymongo import UpdateOne\nimport datetime\n\nclass AppTemplate(object):\n def __init__(self, name):\n self.name = name\n self.user = current_user.get_id()\n\n def add(self):\n return mongo.db.apptemplates.insert({\n 'name': self.name,\n 'active': 'true',\n 'user': self.user,\n 'create_date': datetime.datetime.utcnow(),\n 'update_date': datetime.datetime.utcnow()\n })\n\n @staticmethod\n def get(id):\n return mongo.db.apptemplates.find_one({\n '_id': ObjectId(id)\n })\n\n @staticmethod\n def all():\n return mongo.db.apptemplates.find({\n 'active': 'true'\n }).sort('name',1)\n\n @staticmethod\n def update(id, name):\n return mongo.db.apptemplates.update_one(\n {'_id': ObjectId(id)},\n {'$set': {\n 'name': name,\n 'update_date': datetime.datetime.utcnow()\n }\n }, upsert=False)\n\n @staticmethod\n def delete(id):\n return mongo.db.apptemplates.update_one(\n {'_id': ObjectId(id)},\n {'$set': {'active': 'false'}\n }, upsert=False)\n\n\nclass AppTemplateStep(object):\n def __init__(self, template_id, name, notes, days_before_close):\n self.template_id = template_id\n self.name = name\n self.notes = notes\n self.days_before_close = days_before_close\n\n def add(self):\n template = AppTemplate.get(self.template_id)\n next_order = template['order'] + 1 if 'order' in template else 1\n\n return mongo.db.apptemplates.update_one({\n '_id': ObjectId(self.template_id)\n },{\n '$set': { 'update_date': datetime.datetime.utcnow() },\n '$inc': {'order': 1}, #increment the project order count to keep track of # of project steps\n '$push': {\n 'steps':\n {\n '_id': ObjectId(),\n 'name': self.name,\n 'notes': self.notes,\n 'days_before_close': self.days_before_close,\n 'active': True,\n 'order': next_order, #set the new project step to the next number\n 'create_date': datetime.datetime.utcnow(),\n 'update_date': datetime.datetime.utcnow()\n }\n }\n }, upsert=False)\n\n @staticmethod\n def get(id, step_id):\n return mongo.db.apptemplates.find_one({\n '_id': ObjectId(id),\n 'steps._id': ObjectId(step_id)\n },\n {'steps.$':1})\n\n @staticmethod\n def all(id):\n return mongo.db.apptemplates.aggregate([\n { '$unwind' : '$steps' },\n { '$match' : {\n '_id' : ObjectId(id),\n 'steps.active': True,\n }\n },\n { '$sort' : { 'steps.order' : 1 } }\n ])\n\n @staticmethod\n def update(id, step_id, name, notes, days_before_close):\n return mongo.db.apptemplates.update_one({\n '_id': ObjectId(id),\n 'steps._id': ObjectId(step_id)\n },{\n '$set': {\n 'steps.$.name': name,\n 'steps.$.notes': notes,\n 'steps.$.days_before_close': days_before_close,\n 'steps.$.update_date': datetime.datetime.utcnow()\n }\n }, upsert=False)\n\n @staticmethod\n def delete(id, step_id):\n return mongo.db.apptemplates.update_one({\n '_id': ObjectId(id),\n 'steps._id': ObjectId(step_id)\n },{\n '$set': {\n 'steps.$.active': False,\n 'steps.$.update_date': datetime.datetime.utcnow()\n }\n }, upsert=False)\n\n @staticmethod\n def sort(id, step_ids):\n steps = step_ids.split(',')\n operations = []\n order = 1\n\n for step_id in steps:\n operations.append(UpdateOne({\n '_id': ObjectId(id),\n 'steps._id': ObjectId(step_id)\n },{\n '$set': {\n 'steps.$.order': order\n }\n }, upsert=False))\n\n order += 1\n\n return mongo.db.apptemplates.bulk_write(operations)\n","sub_path":"app/configuration/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"586369217","text":"import paho.mqtt.client as paho #mqtt library\nimport os\nimport json\nimport time\nfrom datetime import datetime\n\n#host name is localhost because both broker and python are Running on same \n#machine/Computer.\nbroker=\"192.168.0.38\"#\"earthron.vacustech.in\" #host name , Replace with your IP address.\nport=1883 #MQTT data listening port\nACCESS_TOKEN='M7OFDCmemyKoi461BJ4j' #not manditory\n\ndef on_publish(client,userdata,result): #create function for callback\n print(\"published data is : \")\n pass\n\nclient1= paho.Client(\"GatewayController\") #create client object\nclient1.on_publish = on_publish #assign function to callback\nclient1.username_pw_set(ACCESS_TOKEN) #access token from thingsboard device\n#client1.connect(broker,port,keepalive=60) #establishing connection\n\ndef systemcon():\n st=0\n try :\n print(\"try exception\") \n st=client1.connect(broker,port,keepalive=60) #establishing connection\n \n except:\n print(\"exception\")\n st=1;\n \n finally:\n print(\"finalyy exception\") \n if(st!=0):\n time.sleep(5)\n systemcon();\n\nsystemcon();\n\n#publishing after every 5 secs\nwhile True:\n\n payload=\"{\"\n payload+=\"\\\"status\\\":1\";\n payload+=\"}\"\n ret= client1.publish(\"Gatewaystatus\",payload) #topic name is test\n if(ret[0]!=0):\n systemcon();\n #print(payload);\n #print(\"Please check data on your Subscriber Code \\n\")\n time.sleep(30)\n \n","sub_path":"SocialDistancingGateway/general/GatewayPostman.py","file_name":"GatewayPostman.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"232911160","text":"import pickle\n\nfrom keras.models import load_model\n\nif __name__ == \"__main__\":\n with open(\"../data/netdata.pkl\", \"rb\") as f:\n tmp = pickle.load(f)\n model = load_model(\"../data/model\")\n trainx = tmp[0]\n trainy = tmp[1]\n testx = tmp[2]\n testy = tmp[3]\n\n y = model.predict(testx)\n tt = []\n for i in range(len(y)):\n s = 0\n for j in range(48):\n s += abs(y[i][j][0] - testy[i][j][0]) * 2 / (y[i][j][0] + testy[i][j][0])\n tt.append(s / 48)\n print(sum(tt) / len(tt))\n","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"235857528","text":"from threading import Thread\nfrom collections import deque\nimport logging\nfrom WarehouseServer import com, servo\nimport time\n\nimport os\n\nlogger = logging.getLogger(__name__)\n\n\nclass SinglePoint:\n def __init__(self, x, y, z, move_type):\n self.x = x\n self.y = y\n self.z = z\n self.move_type = move_type\n\n\nclass WarehousePathfinder(Thread):\n movesQueue = deque()\n\n def __init__(self, level=logging.INFO):\n Thread.__init__(self)\n logging.basicConfig()\n logger.setLevel(level)\n formatter = logging.Formatter(\"%(asctime)s %(threadName)-11s %(levelname)-10s %(message)s\")\n\n def run(self):\n while True:\n if len(self.movesQueue) > 0:\n self.go_to_point()\n time.sleep(0.1)\n\n def add_absolute_point(self, x, y, z):\n point = SinglePoint(x, y, z, \"absolute\")\n self.add_point_to_move(point)\n\n def add_relative_point(self, x, y, z):\n point = SinglePoint(x, y, z, \"relative\")\n self.add_point_to_move(point)\n\n def add_point_to_move(self, single_point):\n self.movesQueue.append(single_point)\n\n def go_to_point(self):\n point = self.movesQueue.popleft()\n\n if point.move_type == \"relative\":\n self.move_relative(point.x, point.y, point.z)\n\n if point.move_type == \"absolute\":\n self.move_absolute(point.x, point.y, point.z)\n\n @staticmethod\n def move_x_absolute(value, blocking=False):\n com.send(0, value)\n if blocking:\n while (com.x - 5 > value) or (com.x + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_y_absolute(value, blocking=False):\n com.send(1, value)\n if blocking:\n while (com.y - 5 > value) or (com.y + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_z_absolute(value, blocking=False):\n com.send(2, value)\n if blocking:\n while (com.z - 5 > value) or (com.z + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_x_relative(value, blocking=False):\n com.send(0, com.x + value)\n value += com.x\n if blocking:\n while (com.x - 5 > value) or (com.x + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_y_relative(value, blocking=False):\n com.send(1, com.y + value)\n value += com.y\n if blocking:\n while (com.y - 5 > value) or (com.y + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_z_relative(value, blocking=False):\n com.send(2, com.z + value)\n value += com.z\n if blocking:\n while (com.z - 5 > value) or (com.z + 5 < value):\n time.sleep(0.1)\n\n @staticmethod\n def move_servo(value):\n servo.move_percent(value)\n\n def move_relative(self, x, y, z):\n self.move_x_relative(x)\n self.move_y_relative(y)\n self.move_z_relative(z)\n wx = x + com.x\n wy = y + com.y\n wz = z + com.z\n while (abs(com.x - wx) > 2) or (abs(com.y - wy) > 2) or (abs(com.z - wz) > 2):\n time.sleep(0.1)\n print (\"relative move done!\")\n\n def move_absolute(self, x, y, z):\n self.move_x_absolute(x)\n self.move_y_absolute(y)\n self.move_z_absolute(z)\n while (abs(com.x - x) > 2) or (abs(com.y - y) > 2) or (abs(com.z - z) > 2):\n time.sleep(0.1)\n print (\"absolute move done!\")\n\n def go_to_transition_a(self):\n self.move_absolute(1350, 8900, 3500)\n\n def sequence(self):\n self.move_absolute(2280, 2975, 5000)\n self.move_relative(0, 0, 400)\n self.move_relative(0, 370, 0)\n self.move_relative(0, 0, -2500)\n self.move_absolute(1370, 8900, 2900)\n os.system(\"echo 4=%d%% > /dev/servoblaster\" % 18)\n self.move_absolute(1972, 6500, 400)\n self.move_relative(0, -1350, 0)\n self.move_relative(0, 0, -250)\n self.move_relative(0, 2500, 0)\n self.move_relative(0, 0, 3000)\n os.system(\"echo 4=%d%% > /dev/servoblaster\" % 85.6)\n self.move_absolute(1970, 5150, 3300)\n self.move_absolute(1970, 5150, 2912)\n self.move_relative(0, 2500, 0)\n self.move_absolute(1350, 8700, 2000)\n self.move_absolute(1350, 7000, 50)\n time.sleep(5)\n self.move_absolute(1350, 8900, 3500)\n\n def sequence_backward(self):\n self.move_absolute(1350, 7000, 50)\n time.sleep(5)\n self.move_relative(0, 0, -1000)\n self.move_absolute(1978, 7000, 3000)\n\n self.move_relative(0, -1500, 0)\n self.move_relative(0, 0, -250)\n self.move_relative(0, 1500, 0)\n os.system(\"echo 4=%d%% > /dev/servoblaster\" % 18)\n\n self.move_absolute(1975, 7000, 200)\n\n\n self.move_relative(0, -2500, 0)\n # self.move_absolute(1950, 5150, 2912)\n # self.move_absolute(1950, 5150, 3300)\n # self.move_relative(0, 0, -3000)\n # self.move_relative(0, -2500, 0)\n\n\n # os.system(\"echo 4=%d%% > /dev/servoblaster\" % 85.6)\n\n\n\n # self.move_absolute(1350, 8900, 3500)\n\n\n\n @staticmethod\n def reset_stm():\n com.send(99)\n\n def home_x(self):\n com.send(4)\n\n def home_y(self):\n com.send(5)\n\n def home_z(self):\n com.send(6)\n\n def home_all(self):\n self.home_y()\n while True:\n time.sleep(0.5)\n if com.y == 0:\n time.sleep(0.5)\n if com.y == 0:\n break\n\n self.move_absolute(0, 8900, 0)\n\n self.home_z()\n while True:\n time.sleep(0.5)\n if com.z == 0:\n time.sleep(0.5)\n if com.z == 0:\n break\n\n self.home_x()\n while True:\n time.sleep(0.5)\n if com.x == 0:\n time.sleep(0.5)\n if com.x == 0:\n break\n\n time.sleep(0.5)\n\n self.go_to_transition_a()\n","sub_path":"WarehouseServer/path_thread.py","file_name":"path_thread.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42535817","text":"# Import required modules\nfrom datetime import date, time, datetime\nimport time\nimport RPi.GPIO as GPIO\n\n# Initialize GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(8, GPIO.OUT, initial=GPIO.LOW)\n\n\n# Define current, ON, and OFF times\nnow = datetime.now()\non_time = now.replace(hour=8, minute=00, second=00)\noff_time = now.replace(hour=18, minute=00, second=00)\n\n# Loop to control GPIO output\nwhile True:\n if (now > on_time and now < off_time):\n GPIO.output(8, GPIO.HIGH)\n sleep(30)\n else:\n GPIO.output(8, GPIO.LOW)\n\n\n","sub_path":"Hydroponics.py","file_name":"Hydroponics.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107807979","text":"\nfrom epicduels.content import game_state as CT\nfrom epicduels.content import decks\n\n# Light is_main Characters\nANAKIN_SKYWALKER = {'name': 'Anakin Skywalker',\n 'hp': 18,\n 'is_main': True,\n 'max_hp': 18,\n 'type': CT.ANAKIN_SKYWALKER,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.ANAKIN_SKYWALKER_DECK}\n\nHAN_SOLO = {'name': 'Han Solo',\n 'hp': 13,\n 'is_main': True,\n 'max_hp': 13,\n 'type': CT.HAN_SOLO,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.HAN_SOLO_DECK}\n\nLUKE_SKYWALKER = {'name': 'Luke Skywalker',\n 'hp': 17, 'is_main': True,\n 'max_hp': 17,\n 'type': CT.LUKE_SKYWALKER,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.LUKE_SKYWALKER_DECK}\n\nMACE_WINDU = {'name': 'Mace Windu',\n 'hp': 19,\n 'is_main': True,\n 'max_hp': 19,\n 'type': CT.MACE_WINDU,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.MACE_WINDU_DECK}\n\nOBI_WAN = {'name': 'Obi-Wan Kenobi',\n 'hp': 18,\n 'is_main': True,\n 'max_hp': 18,\n 'type': CT.OBI_WAN,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.OBI_WAN_KENOBI_DECK}\n\nYODA = {'name': 'Yoda',\n 'hp': 15,\n 'is_main': True,\n 'max_hp': 15,\n 'type': CT.YODA,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.YODA_DECK}\n\n# minor Characters\nCHEWBACCA = {'name': 'Chewbacca',\n 'hp': 15,\n 'is_main': False,\n 'max_hp': 15,\n 'type': CT.CHEWBACCA,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.CHEWBACCA_DECK}\n\nLEIA_SKYWALKER = {'name': 'Leia Skywalker',\n 'hp': 10,\n 'is_main': False,\n 'max_hp': 10,\n 'type': CT.LEIA_SKYWALKER,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.LEIA_SKYWALKER_DECK}\n\nPADME_AMIDALA = {'name': 'Padme Amidala',\n 'hp': 10,\n 'is_main': False,\n 'max_hp': 10,\n 'type': CT.PADME_AMIDALA,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.PADME_AMIDALA_DECK}\n\nTROOPER = {'name': 'Trooper',\n 'hp': 4,\n 'is_main': False,\n 'max_hp': 4,\n 'type': CT.TROOPER,\n 'state': CT.LIGHT,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.TROOPER_DECK}\n\n# Dark is_main Characters\nBOBA_FETT = {'name': 'Boba Fett',\n 'hp': 14,\n 'is_main': True,\n 'max_hp': 14,\n 'type': CT.BOBA_FETT,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.BOBA_FETT_DECK}\n\nCOUNT_DOOKU = {'name': 'Count Dooku',\n 'hp': 18,\n 'is_main': True,\n 'max_hp': 18,\n 'type': CT.COUNT_DOOKU,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.COUNT_DOOKU_DECK}\n\nDARTH_MAUL = {'name': 'Darth Maul',\n 'hp': 18,\n 'is_main': True,\n 'max_hp': 18,\n 'type': CT.DARTH_MAUL,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.DARTH_MAUL_DECK}\n\nDARTH_VADER = {'name': 'Darth Vader',\n 'hp': 20,\n 'is_main': True,\n 'max_hp': 20,\n 'type': CT.DARTH_VADER,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.DARTH_VADER_DECK}\n\nEMPEROR_PALPATINE = {'name': 'Emperor Palpatine',\n 'hp': 13,\n 'is_main': True,\n 'max_hp': 13,\n 'type': CT.EMPEROR_PALPATINE,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': False,\n 'deck': decks.EMPEROR_PALPATINE_DECK}\n\nJANGO_FETT = {'name': 'Jango Fett',\n 'hp': 15,\n 'is_main': True,\n 'max_hp': 15,\n 'type': CT.JANGO_FETT,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.JANGO_FETT_DECK}\n\n# minor Characters\nGREEDO = {'name': 'Greedo',\n 'hp': 7,\n 'is_main': False,\n 'max_hp': 7,\n 'type': CT.GREEDO,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.GREEDO_DECK}\n\nZAM_WESELL = {'name': 'Zam Wesell',\n 'hp': 10,\n 'is_main': False,\n 'max_hp': 10,\n 'type': CT.ZAM_WESELL,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.ZAM_WESELL_DECK}\n\nBATTLE_DROID = {'name': 'Battle Droid',\n 'hp': 3,\n 'is_main': False,\n 'max_hp': 3,\n 'type': CT.BATTLE_DROID,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.TROOPER_DECK}\n\nCRIMSON_GUARD = {'name': 'Crimson Guard',\n 'hp': 5,\n 'is_main': False,\n 'max_hp': 5,\n 'type': CT.CRIMSON_GUARD,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.CRIMSON_GUARD_DECK}\n\nSUPER_BATTLE_DROID = {'name': 'Super Battle Droid',\n 'hp': 5,\n 'is_main': False,\n 'max_hp': 5,\n 'type': CT.SUPER_BATTLE_DROID,\n 'state': CT.DARK,\n 'pos': None,\n 'is_range': True,\n 'deck': decks.SUPER_BATTLE_DROID_DECK}\n\nCHARACTERS = {\n CT.ANAKIN_SKYWALKER: ANAKIN_SKYWALKER,\n CT.HAN_SOLO: HAN_SOLO,\n CT.LUKE_SKYWALKER: LUKE_SKYWALKER,\n CT.MACE_WINDU: MACE_WINDU,\n CT.OBI_WAN: OBI_WAN,\n CT.YODA: YODA,\n CT.BOBA_FETT: BOBA_FETT,\n CT.COUNT_DOOKU: COUNT_DOOKU,\n CT.DARTH_MAUL: DARTH_MAUL,\n CT.DARTH_VADER: DARTH_VADER,\n CT.EMPEROR_PALPATINE: EMPEROR_PALPATINE,\n CT.JANGO_FETT: JANGO_FETT,\n CT.CHEWBACCA: CHEWBACCA,\n CT.TROOPER: TROOPER,\n CT.LEIA_SKYWALKER: LEIA_SKYWALKER,\n CT.PADME_AMIDALA: PADME_AMIDALA,\n CT.BATTLE_DROID: BATTLE_DROID,\n CT.CRIMSON_GUARD: CRIMSON_GUARD,\n CT.GREEDO: GREEDO,\n CT.SUPER_BATTLE_DROID: SUPER_BATTLE_DROID,\n CT.ZAM_WESELL: ZAM_WESELL\n}\n\nSQUADS = {\n CT.ANAKIN_SKYWALKER: [ANAKIN_SKYWALKER, PADME_AMIDALA],\n CT.HAN_SOLO: [HAN_SOLO, CHEWBACCA],\n CT.LUKE_SKYWALKER: [LUKE_SKYWALKER, LEIA_SKYWALKER],\n CT.MACE_WINDU: [MACE_WINDU, TROOPER, TROOPER],\n CT.OBI_WAN: [OBI_WAN, TROOPER, TROOPER],\n CT.YODA: [YODA, TROOPER, TROOPER],\n CT.BOBA_FETT: [BOBA_FETT, GREEDO],\n CT.COUNT_DOOKU: [COUNT_DOOKU, SUPER_BATTLE_DROID, SUPER_BATTLE_DROID],\n CT.DARTH_MAUL: [DARTH_MAUL, BATTLE_DROID, BATTLE_DROID],\n CT.DARTH_VADER: [DARTH_VADER, TROOPER, TROOPER],\n CT.EMPEROR_PALPATINE: [EMPEROR_PALPATINE, CRIMSON_GUARD, CRIMSON_GUARD],\n CT.JANGO_FETT: [JANGO_FETT, ZAM_WESELL]\n}\n","sub_path":"epicduels/content/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"490989413","text":"from collections import defaultdict\nfrom .fields import Field, Id\nfrom .db import DatabaseInterface, mongo_client, DATABASE_NAME\n\nimport json\nimport logging\nimport pymongo\n\n\nlogger = logging.getLogger(__name__)\n\n\nMAX_LIST_LENGTH = 3\n\n\nclass ModelRegistry(object):\n\n def __init__(self):\n self.models = {}\n\n def load(self, app_label, model_name):\n app_label = app_label.lower()\n model_name = model_name.lower()\n return self.models.get(app_label, {}).get(model_name)\n\n def register(self, app_label, model_name, model):\n app_label = app_label.lower()\n model_name = model_name.lower()\n if app_label in self.models:\n app_models = self.models.get(app_label)\n if model_name in app_models:\n return\n else:\n self.models[app_label][model_name] = model\n else:\n self.models[app_label] = {model_name: model}\n\n\ncache = ModelRegistry()\nload = cache.load\nregister = cache.register\n\n\ndef indexField(collection, name, field):\n if field.index or field.unique:\n kwargs = {}\n index = field.index if field.index else pymongo.ASCENDING\n kwargs['unique'] = field.unique\n kwargs['background'] = field.background\n kwargs['sparse'] = field.sparse\n if index == pymongo.GEO2D:\n kwargs['min'] = field.geo_min\n kwargs['max'] = field.geo_max\n if field.unique:\n kwargs['dropDups'] = field.drop_dups\n\n collection.create_index([(name, index), ], **kwargs)\n\n\ndef parseCompoundIndex(index):\n idx = []\n for elm in index:\n if isinstance(elm, (list, tuple)):\n idx += elm\n else:\n idx += [(elm, pymongo.ASCENDING), ]\n return idx\n\n\nclass Meta(object):\n\n def __init__(self, field_list):\n self.fields = {}\n self.fieldmap = defaultdict(list)\n for name, field in field_list:\n setattr(self, name, field)\n self.fields[name] = field\n self.fieldmap[field.__class__].append(name)\n self.fieldmap = dict(self.fieldmap)\n\n\nclass ModelBaseMeta(type):\n\n def __new__(cls, classname, bases, classDict):\n\n app_label = bases[0].__module__.split('.')[-2]\n\n # check the registry and exit if found\n model = load(app_label, classname)\n if model:\n return model\n\n # find fields from all base classes of this model\n collection_name = classDict['collection_name'] if 'collection_name' in classDict else classname.lower()\n _collection = mongo_client[DATABASE_NAME][collection_name]\n field_list = []\n for base in bases:\n # create the unique_together index\n if hasattr(base, 'unique_together'):\n idx = parseCompoundIndex(base.unique_together)\n _collection.create_index(idx, unique=True)\n if hasattr(base, 'compound_index'):\n idx = parseCompoundIndex(base.compound_index)\n _collection.create_index(idx)\n if hasattr(base, 'meta'):\n items = base.meta.fields.items()\n for name, field in items:\n # single field index creation\n indexField(_collection, name, field)\n # add the item to the list of fields to be stored in the meta\n # model\n field_list += items\n\n # clean the classDict by removing the fields from the desired\n # attributes i.e. the attributes we want to actively use on the model\n if 'unique_together' in classDict:\n idx = parseCompoundIndex(classDict['unique_together'])\n _collection.create_index(idx, unique=True)\n if 'compound_index' in classDict:\n idx = parseCompoundIndex(classDict['compound_index'])\n _collection.create_index(idx)\n for key, val in classDict.items():\n if issubclass(val.__class__, Field):\n field_list.append((key, val))\n indexField(_collection, key, val)\n del classDict[key]\n\n meta = Meta(field_list)\n\n # make the class\n new_class = type.__new__(cls, classname, bases, classDict)\n new_class.meta = meta\n\n # register the model in the ModelRegistry\n register(app_label, classname, new_class)\n\n return load(app_label, classname)\n\n\nclass Model(DatabaseInterface):\n\n __metaclass__ = ModelBaseMeta\n\n _id = Id()\n\n def __init__(self, *args, **kwargs):\n self._from_db = kwargs.get('_from_db', False)\n self._conditions = kwargs.get('_conditions')\n for fieldname, field in self.meta.fields.items():\n value = kwargs.get(fieldname, field.getDefault())\n setattr(self, fieldname, value)\n field.model = self\n field.name = fieldname\n field.value = value\n\n def __repr__(self):\n return u'<%s: %s>' % (self.__class__.__name__, self._id)\n\n def filterFields(self, field_type):\n \"\"\"\n Filters the instances within _fields for the class type specified.\n \"\"\"\n # get fieldnames\n fieldmap = self.meta.fieldmap\n if field_type in fieldmap:\n fieldnames = fieldmap[field_type]\n return [getattr(self, name) for name in fieldnames]\n return []\n\n def serialize(self, to_json=False):\n \"\"\"\n For use in model serialization. It uses the dictionary returned from\n _fields property method (below).\n \"\"\"\n data = {\n name: field.serialize(getattr(self, name), to_json=to_json)\n for name, field in self.meta.fields.items()\n if getattr(self, name) is not None\n }\n if to_json:\n return json.dumps(data)\n return data\n","sub_path":"dat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123485035","text":"import gi\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nimport Clientes\nimport Tintas\n\n\nclass Principal(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self, title=\"Menu Principal\")\n self.set_default_size(400, 200)\n self.set_resizable(False)\n\n self.tintas=None\n\n caja = Gtk.Box(spacing=0, orientation=Gtk.Orientation.VERTICAL)\n\n caja.override_background_color(0, Gdk.RGBA(0.937, 0.914, 0.898, 0.5))\n\n self.add(caja)\n\n #declaro los botones\n self.botonTintas = Gtk.Button(label=\"Tintas\", margin_top=30, margin_left=50, margin_right=50)\n self.botonClientes = Gtk.Button(label=\"Clientes\", margin_top=30, margin_left=50, margin_right=50)\n self.botonSalir = Gtk.Button(label=\"Salir\", margin_top=30, margin_left=50, margin_right=50)\n\n #añado los botones a la caja\n caja.add(self.botonTintas)\n caja.add(self.botonClientes)\n caja.add(self.botonSalir)\n\n #asocio los botones a los listener del click\n self.botonTintas.connect(\"clicked\", self.on_open_clicked)\n self.botonClientes.connect(\"clicked\",self.on_open_clientes_clicked)\n self.botonSalir.connect(\"clicked\",self.on_open_salir_clicked)\n\n self.connect(\"destroy\", Gtk.main_quit)\n self.show_all()\n\n\n def on_open_clicked(self,button):\n \"\"\"Listener boton tintas.\n\n :param button:\n :return:\n \"\"\"\n self.tintas = Tintas.Tintas()\n\n\n def on_open_clientes_clicked(self,button):\n \"\"\"Listener boton clientes.\n\n :param button:\n :return:\n \"\"\"\n\n self.clientes= Clientes.Clientes()\n\n def on_open_salir_clicked(self,evt):\n \"\"\"Listener boton salir.\n\n :param evt:\n :return:\n \"\"\"\n\n Principal.destroy(self)\n\n\n\n\n","sub_path":"Principal.py","file_name":"Principal.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541503120","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mayan.apps.common.apps import MayanAppConfig\nfrom mayan.apps.common.classes import MissingItem\nfrom mayan.apps.common.html_widgets import TwoStateWidget\nfrom mayan.apps.common.menus import (\n menu_list_facet, menu_object, menu_secondary, menu_setup\n)\nfrom mayan.apps.common.signals import post_initial_setup, post_upgrade\nfrom mayan.apps.converter.links import link_transformation_list\nfrom mayan.apps.documents.menus import menu_documents\nfrom mayan.apps.documents.signals import post_version_upload\nfrom mayan.apps.navigation.classes import SourceColumn\n\nfrom .classes import StagingFile\nfrom .dependencies import * # NOQA\nfrom .handlers import (\n handler_copy_transformations_to_version, handler_create_default_document_source,\n handler_initialize_periodic_tasks\n)\nfrom .links import (\n link_document_create_multiple, link_setup_sources,\n link_setup_source_check_now, link_setup_source_create_imap_email,\n link_setup_source_create_pop3_email, link_setup_source_create_sane_scanner,\n link_setup_source_create_watch_folder, link_setup_source_create_webform,\n link_setup_source_create_staging_folder, link_setup_source_delete,\n link_setup_source_edit, link_setup_source_logs, link_staging_file_delete,\n link_document_version_upload\n)\nfrom .widgets import StagingFileThumbnailWidget\n\n\nclass SourcesApp(MayanAppConfig):\n app_namespace = 'sources'\n app_url = 'sources'\n has_rest_api = True\n has_tests = True\n name = 'mayan.apps.sources'\n verbose_name = _('Sources')\n\n def ready(self):\n super(SourcesApp, self).ready()\n\n POP3Email = self.get_model(model_name='POP3Email')\n IMAPEmail = self.get_model(model_name='IMAPEmail')\n Source = self.get_model(model_name='Source')\n SourceLog = self.get_model(model_name='SourceLog')\n SaneScanner = self.get_model(model_name='SaneScanner')\n StagingFolderSource = self.get_model(model_name='StagingFolderSource')\n WatchFolderSource = self.get_model(model_name='WatchFolderSource')\n WebFormSource = self.get_model(model_name='WebFormSource')\n\n MissingItem(\n label=_('Create a document source'),\n description=_(\n 'Document sources are the way in which new documents are '\n 'feed to Mayan EDMS, create at least a web form source to '\n 'be able to upload documents from a browser.'\n ),\n condition=lambda: not Source.objects.exists(),\n view='sources:setup_source_list'\n )\n\n SourceColumn(\n attribute='label', is_identifier=True, is_sortable=True,\n source=Source\n )\n SourceColumn(\n attribute='class_fullname', label=_('Type'), source=Source\n )\n SourceColumn(\n attribute='enabled', is_sortable=True, source=Source,\n widget=TwoStateWidget\n )\n\n SourceColumn(\n source=StagingFile,\n label=_('Created'),\n func=lambda context: context['object'].get_date_time_created()\n )\n\n html_widget = StagingFileThumbnailWidget()\n SourceColumn(\n source=StagingFile,\n label=_('Thumbnail'),\n func=lambda context: html_widget.render(\n instance=context['object'],\n )\n )\n\n SourceColumn(\n source=SourceLog,\n label=_('Date time'),\n func=lambda context: context['object'].datetime\n )\n SourceColumn(\n source=SourceLog,\n label=_('Message'),\n func=lambda context: context['object'].message\n )\n\n menu_documents.bind_links(links=(link_document_create_multiple,))\n\n menu_list_facet.bind_links(\n links=(\n link_setup_source_logs, link_transformation_list,\n ), sources=(\n POP3Email, IMAPEmail, SaneScanner, StagingFolderSource,\n WatchFolderSource, WebFormSource\n )\n )\n\n menu_object.bind_links(\n links=(\n link_setup_source_delete, link_setup_source_edit\n ), sources=(\n POP3Email, IMAPEmail, SaneScanner, StagingFolderSource,\n WatchFolderSource, WebFormSource\n )\n )\n menu_object.bind_links(\n links=(link_staging_file_delete,), sources=(StagingFile,)\n )\n menu_object.bind_links(\n links=(link_setup_source_check_now,),\n sources=(IMAPEmail, POP3Email, WatchFolderSource,)\n )\n menu_secondary.bind_links(\n links=(\n link_setup_sources, link_setup_source_create_webform,\n link_setup_source_create_sane_scanner,\n link_setup_source_create_staging_folder,\n link_setup_source_create_pop3_email,\n link_setup_source_create_imap_email,\n link_setup_source_create_watch_folder\n ), sources=(\n POP3Email, IMAPEmail, StagingFolderSource, WatchFolderSource,\n WebFormSource, 'sources:setup_source_list',\n 'sources:setup_source_create'\n )\n )\n menu_setup.bind_links(links=(link_setup_sources,))\n menu_secondary.bind_links(\n links=(link_document_version_upload,),\n sources=(\n 'documents:document_version_list', 'documents:upload_version',\n 'documents:document_version_revert'\n )\n )\n\n post_upgrade.connect(\n receiver=handler_initialize_periodic_tasks,\n dispatch_uid='sources_handler_initialize_periodic_tasks'\n )\n post_initial_setup.connect(\n receiver=handler_create_default_document_source,\n dispatch_uid='sources_handler_create_default_document_source'\n )\n post_version_upload.connect(\n receiver=handler_copy_transformations_to_version,\n dispatch_uid='sources_handler_copy_transformations_to_version'\n )\n","sub_path":"mayan/apps/sources/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638840148","text":"\"\"\"\n[agent, opponent, ESTIMATE , COMINGOUT, VOTE, DIVINED , IDENTIFIED , AGREE,\n DISAGREE , REQUEST , INQUIRE , BECAUSE , DEAD , execute , divined , DAY]\nagent: 1 - 5,6ANY\nopponent: 1 - 5,6ANY\nestimate: 0, 1, 2, 3(werewolf, villager, seer, possessed)\ncomingout: 0, 1, 2, 3\nvote: 0, 1\ndivined: 0, 1(werewolf,villager)\nidentified: 0, 1(werewolf,villager)\nagree: 0, 1\ndisagree: 0, 1\nrequest: 0, 1\ninquire: 0, 1\nbecause: 0, 1\ndead: 0, 1\nexecute: 0, 1\ndivined(結果):0,1\nday:0,1,2\n\"\"\"\nimport numpy as np\nimport os\nfrom . import splitText\n\n# lstm\n\n\nclass preprocess2(object):\n def __init__(self):\n self.agentNum = 5\n self.content_map = {\n \"agent\": 0, \"opponent\": 1,\n \"ESTIMATE\": 2, \"COMINGOUT\": 3, \"VOTE\": 4, \"DIVINED\": 5, \"IDENTIFIED\": 6, \"AGREE\": 7, \"DISAGREE\": 8, \"REQUEST\": 9, \"INQUIRE\": 10, \"BECAUSE\": 11, \"attack\": 12, \"execute\": 13, \"divined\": 14, \"DAY\": 15\n }\n self.f_maps = []\n self.y_map = np.zeros(25)\n self.is_divine = False\n self.is_finish = False\n\n def update_result(self):\n self.y_map1 = self.y_map[:5]\n self.y_map2 = self.y_map[5:10]\n self.y_map3 = self.y_map[10:15]\n self.y_map4 = self.y_map[15:20]\n self.y_map5 = self.y_map[20:25]\n self.is_finish = True\n\n def update_status(self, contents):\n if \"Sample\" in contents[5]:\n agent = int(contents[2]) - 1\n self.y_map[agent*5+0] = 1\n\n elif \"CALM\" in contents[5]:\n agent = int(contents[2]) - 1\n self.y_map[agent*5+1] = 1\n\n elif \"Liar\" in contents[5]:\n agent = int(contents[2]) - 1\n self.y_map[agent*5+2] = 1\n\n elif \"REPEL\" in contents[5]:\n agent = int(contents[2]) - 1\n self.y_map[agent*5+3] = 1\n\n elif \"Follow\" in contents[5]:\n agent = int(contents[2]) - 1\n self.y_map[agent * 5 + 4] = 1\n\n def update_talk_content(self, agent, content):\n if len(content) == 0:\n return\n if content[0] == \"AGREE\" or content[0] == \"DISAGREE\":\n self.f_map[self.content_map[content[0]]] = 1\n self.f_map[self.content_map[\"agent\"]] = agent\n return\n if type(content[1]) == int:\n op = int(content[1])\n self.f_map[self.content_map[\"agent\"]] = agent\n self.f_map[self.content_map[\"opponent\"]] = op\n self.f_map[self.content_map[content[0]]] = 1\n elif content[1] == \"ANY\":\n self.f_map[self.content_map[\"agent\"]] = agent\n self.f_map[self.content_map[\"opponent\"]] = 5\n self.f_map[self.content_map[content[0]]] = 1\n else:\n self.update_talk_content(\n agent, splitText.splitText(content[1].replace(\"(\", \"\").replace(\")\", \"\")))\n self.update_talk_content(\n agent, splitText.splitText(content[2].replace(\"(\", \"\").replace(\")\", \"\")))\n\n def update_talk(self, contents):\n agent = int(contents[4])-1\n text = contents[5]\n texts = splitText.parse_text(text)\n for text in texts:\n content = splitText.splitText(text)\n if len(content) != 0:\n self.update_talk_content(agent, content)\n\n def update_divine(self, content):\n agent = int(content[2]) - 1\n op = int(content[3]) - 1\n self.f_map[self.content_map[\"agent\"]] = agent\n self.f_map[self.content_map[\"opponent\"]] = op\n self.f_map[self.content_map[content[1]]] = 1\n\n def update_attack(self, content):\n agent = int(content[0]) - 1\n op = int(content[2]) - 1\n self.f_map[self.content_map[\"agent\"]] = agent\n self.f_map[self.content_map[\"opponent\"]] = op\n self.f_map[self.content_map[content[1]]] = 1\n\n def update_dead(self, content):\n agent = int(content[2]) - 1\n self.f_map[self.content_map[\"agent\"]] = agent\n self.f_map[self.content_map[content[1]]] = 1\n\n def update(self, file_name):\n f = open(file_name, mode=\"r\")\n line = f.readline()\n if line[3] == \"SEER\":\n self.is_divine = True\n while line:\n self.f_map = [0] * len(self.content_map)\n # np.zeros(len(self.content_map))\n line = f.readline().rstrip(os.linesep)\n contents = line.split(\",\")\n if len(contents) == 1:\n continue\n elif contents[1] == \"talk\":\n self.update_talk(contents)\n elif contents[1] == \"execute\":\n self.update_dead(contents)\n elif contents[1] == \"attack\" and contents[3] == \"true\":\n self.update_attack(contents)\n elif contents[1] == \"divine\" and self.is_divine:\n self.update_divine(contents)\n elif contents[1] == \"status\":\n self.update_status(contents)\n elif contents[1] == \"result\":\n self.update_result()\n if any(self.f_map):\n self.f_maps.append(self.f_map)\n","sub_path":"wolf-strategy/train/utils/preprocess2.py","file_name":"preprocess2.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557644313","text":"import time\n\nresidence_limit = 90 # 45, 60\nschengen_constraint = 180\n\ndef visit_duration(*arguments):\n return(max(arguments) - min(arguments) + 1)\n\ndef check_input_data(visits_list, new_visit):\n days_of_visits = set()\n new_visit_set = set()\n for each in visits_list: days_of_visits.update(range(each[0], each[1] + 1))\n new_visit_set.update(range(new_visit[0], new_visit[1] + 1))\n if new_visit_set.intersection(days_of_visits):\n raise Exception('Вы уже были в это время в Европе')\n elif max(days_of_visits) >= min(new_visit_set):\n raise Exception('Ой-ей, что-то пошло не так...Вы хотите въехать в Шенген раньше последней даты выезда')\n\ndef check_date_consistency(date_start, date_end):\n if date_start >= date_end:\n print(\"Дата отъезда должна быть позднее даты начала поездки. Попробуйте еще раз\", end='\\n \\n')\n add_visit()\n\ndef visits_left(visits_list, new_visit_enter, residence_limit, schengen_constraint):\n period_start = (new_visit_enter) - (schengen_constraint)\n days_left = residence_limit\n for each_visit in visits_list:\n if each_visit[0] < period_start <= each_visit[1]:\n days_left -= visit_duration(each_visit[1], period_start)\n pass\n elif period_start <= each_visit[0]: days_left -= visit_duration(each_visit[1], each_visit[0])\n return (days_left)\n\ndef visit_add(visits_left, start, end):\n if visits_left >= (end - start + 1):\n visits.append([start, end])\n print('Ваша поездка добавлена')\n else:\n print('У вас не хватает дней для данной поездки, к этой дате въезда остаток составит всего лишь {} д'.format(visits_left))\n\ndef visit_left_print(visits_left):\n if visits_left > 0: print('Вы можете провести в путешествии {}д'.format(visits_left))\n else: print('К сожалению, Вы израсходовали все дни. Остаток: {}'.format(visits_left))\n\ndef preliminary_check():\n x = visits_left(visits, visits[-1][0], residence_limit, schengen_constraint)\n if x < 0:\n print('Что-то не в порядке с данными о поездках - вы пробыли в Европе больше дней, чем положено. Пожалуйста, проверьте информацию', visits, 'Дней израсходовано: {}'.format(residence_limit - x), sep='\\n')\n delete_visit()\n #!!!DELETE\n time.sleep(0.75)\n\ndef welcoming_msg():\n print('v - добавить визит, p - расчитать допустимую продолжительность следующей поездки,\\n r - удалить визит, e - выйти')\n time.sleep(0.25)\n print('Какое действие Вы хотите совершить?')\n\ndef delete_visit():\n print('Список поездок: {0}'.format(visits))\n print('Какую поездку Вы хотите удалить?')\n start = int(input('Дата начала поездки?'))\n end = int(input('Дата окончания поездки?'))\n time.sleep(0.75)\n counter=0\n for each in visits:\n if (start == each[0]) & (end == each[1]):\n visits.remove(each)\n counter += 1\n print('Поездка {0} была уд��лена. \\n Список поездок: {1}'.format(each, visits))\n if counter == 0: print('Мы не смогли найти Вашу поездку...')\n\ndef add_visit():\n start = int(input ('Дата начала поездки?'))\n end = int(input ('Дата окончания поездки?'))\n check_date_consistency(start, end)\n time.sleep(0.75)\n check_input_data (visits, [start, end])\n days_for_new_travel = visits_left(visits, start, residence_limit, schengen_constraint)\n visit_add(days_for_new_travel, start,end)\n print('Список поездок: {0}'.format(visits))\n\ndef exit_msg():\n print('До свидания!')\n working = False\n\ndef calculate_next_visit():\n start = int(input('Дата начала поездки?'))\n check_input_data(visits, [start,start])\n visit_left_print(\n visits_left(visits, start, residence_limit, schengen_constraint))\n\n#number of days used till 174 included - (10+15+44+5)\nvisits=[[1, 10], [25, 39], [87, 130], [150, 155]]\n#[87, 130]\n\nworking = True\nwhile working:\n preliminary_check()\n welcoming_msg()\n user_input = input()\n if user_input == 'v': add_visit()\n elif user_input == 'r': delete_visit()\n elif user_input == 'p': calculate_next_visit()\n elif user_input == 'e':\n exit_msg()\n working = False\n","sub_path":"Schengen_calc/Schengen_calculator.py","file_name":"Schengen_calculator.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"289700919","text":"import numpy as np\nimport cv2\nimport math\n\nratio = np.load('./calibration_data/pixel2mm.npy')\n\ncamera_index = 2\n\n\ndef distance (x, y, a, b):\n return np.sqrt((x - a) ** 2 + (y-b)**2)\n\n\ndef take_pictures():\n cap = cv2.VideoCapture(camera_index)\n\n while 1:\n # Capture frame-by-frame\n ret, image = cap.read()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n cv2.imshow('raw', gray)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY, gray)\n done = cv2.morphologyEx(gray, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)))\n edges = cv2.Canny(gray, 100, 200)\n\n # Display the resulting frame\n contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours_filtered = []\n\n # Filter out large contours\n for i in contours:\n if len(i) > 20:\n contours_filtered.append(i)\n\n mu = [None]*len(contours_filtered)\n mc = [None]*len(contours_filtered)\n\n # Moments\n for i in range(len(contours_filtered)):\n mu[i] = cv2.moments(contours_filtered[i])\n\n # Get the mass centers\n for i in range(len(contours_filtered)):\n if cv2.arcLength(contours_filtered[i], True) < 20:\n continue\n # add 1e-5 to avoid division by zero\n mc[i] = (mu[i]['m10'] / (mu[i]['m00'] + 1e-5), mu[i]['m01'] / (mu[i]['m00'] + 1e-5))\n\n\n # Check duplicates\n # Filter moments and centroids\n mu_filtered = []\n mc_filtered = []\n contours_filterered = []\n\n for i in range(len(contours_filtered)):\n pushable = True\n for j in range(len(contours_filterered)):\n if distance(mc[i][0], mc[i][1], mc_filtered[j][0], mc_filtered[j][1]) < 30:\n pushable = False\n break\n if pushable:\n contours_filterered.append(contours_filtered[i])\n mu_filtered.append(mu[i])\n mc_filtered.append(mc[i])\n\n\n drawing = np.zeros((edges.shape[0],edges.shape[1], 3), dtype=np.uint8)\n principal_angle = []\n bounding_boxes = []\n actual_legnth_of_boxes = []\n for i in range(len(contours_filterered)):\n # Draw contours\n color = (np.random.randint(0,256), np.random.randint(0,256), np.random.randint(0,256))\n cv2.drawContours(drawing, contours_filterered, i, color, 2)\n bound_rect = cv2.minAreaRect(contours_filterered[i])\n \n bound_4 = cv2.boxPoints(bound_rect)\n bound_4_len = np.linalg.norm(bound_4[0] - bound_4[1]), np.linalg.norm(bound_4[1] - bound_4[2])\n bounding_boxes.append(bound_4)\n print(bound_4)\n actual_bound = bound_4_len * ratio\n actual_legnth_of_boxes.append(actual_bound)\n print(\"length of bounding box: \", bound_4_len)\n print(\"actual length: \", actual_bound)\n\n cv2.circle(drawing, (int(mc_filtered[i][0]), int(mc_filtered[i][1])), 4, color, -1)\n # Principal angle\n num = 2 * (mu_filtered[i]['m00'] * mu_filtered[i]['m11'] -mu_filtered[i]['m10'] *mu_filtered[i]['m01'])\n denom = ((mu_filtered[i]['m00'] *mu_filtered[i]['m20'] -mu_filtered[i]['m10'] *mu_filtered[i]['m10']) - (mu_filtered[i]['m00'] *mu_filtered[i]['m02'] -mu_filtered[i]['m01'] *mu_filtered[i]['m01']))\n P_angle = 0.5 * math.atan2( num, denom)\n\n if P_angle > math.pi/ 2:\n P_angle -= math.pi\n m = math.tan(P_angle)\n\n x1 = mc_filtered[i][0] + 100\n x2 = mc_filtered[i][0] - 100\n y1 = m * (x1 - mc_filtered[i][0]) + mc_filtered[i][1]\n y2 = m * (x2 - mc_filtered[i][0]) + mc_filtered[i][1]\n x1 = int(x1)\n x2 = int(x2)\n y1 = int(y1)\n y2 = int(y2)\n principal_angle.append(P_angle * 180 / math.pi )\n cv2.line(drawing, (x1, y1), (x2, y2), color)\n print(i, P_angle * 180 / math.pi , mc_filtered[i])\n \n \n cv2.imshow('Contours', drawing)\n if cv2.waitKey() == ord('q'):\n cv2.destroyWindow('raw')\n return mc_filtered, principal_angle\n \n return\n\n\ndef mapping(act, image):\n act = np.matrix(act)\n ones = np.matrix(np.ones([1, 3]))\n act = np.concatenate((act, ones.T), axis=1)\n image = np.concatenate((image, ones.T), axis=1)\n act = act.T\n image = image.T\n A = np.matmul(act, image.I)\n return A\n \n# Yellow (469.51339957488125, 131.64767848311172) top-left \n# 187.51265\n# 367.98181\n# -182.7314\n# Green (551.8404734749532, 335.5730447400397) bottom-right\n# -185.7127\n# 576.04547\n# -184.7553\n# (481.7600934857015, 97.13918453055045) top-right\n# -140.7747\n# 382.30029\n# -177.4168\n\ndef find_grabbing(p, e1, e2):\n v1 = [e1[0][0] - e1[1][0], e1[0][1] - e1[1][1]]\n v2 = [e2[0][0] - e2[1][0], e2[0][1] - e2[1][1]]\n x1, y1 = (0, 0)\n x2, y2 = (1, math.tan(p))\n p_vec = [x2 - x1, y2 - y1]\n if abs(np.dot(p_vec, v1)) > abs(np.dot(p_vec, v2)):\n return np.linalg.norm(v1)\n else:\n return np.linalg.norm(v2)\n # [(x1, y1) , (x2, y2)]\n # [(x2, y2) , (x3, y3)]","sub_path":"calib/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"649917024","text":"from ontobio.io.gpadparser import GpadParser\nfrom ontobio.io import assocparser\n\nimport yaml\n\nPOMBASE = \"tests/resources/truncated-pombase.gpad\"\n\ndef test_skim():\n p = GpadParser()\n results = p.skim(open(POMBASE,\"r\"))\n print(str(results))\n\n\ndef test_parse():\n p = GpadParser(config=assocparser.AssocParserConfig(group_metadata=yaml.load(open(\"tests/resources/mgi.dataset.yaml\"), Loader=yaml.FullLoader)))\n test_gpad_file = \"tests/resources/mgi.test.gpad\"\n results = p.parse(open(test_gpad_file, \"r\"))\n print(p.report.to_markdown())\n","sub_path":"tests/test_gpad_parser.py","file_name":"test_gpad_parser.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"157922857","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom copy import deepcopy\n\nx = np.r_[np.random.randn(100, 2) + [2, 2],\n np.random.randn(100, 2) + [-2, -2],\n np.random.randn(100, 2) + [2, -4]]\n\n[plt.scatter(e[0], e[1], c='black', s=7) for e in x]\n\nk = 3\nC_x = np.random.uniform(np.min(x[:, 0]), np.max(x[:, 0]), size=k)\nC_y = np.random.uniform(np.min(x[:, 1]), np.max(x[:, 1]), size=k)\nprint(type(C_x), C_x)\nprint(type(C_y), C_y)\nC = np.array(list(zip(C_x, C_y)), dtype=np.float32)\nprint(type(C), f'print C :{C}')\nplt.scatter(C_x, C_y, marker=\"*\", s=200, c='#005599')\nplt.show()\n\n\ndef dist(a, b, ax=1):\n return np.linalg.norm(a - b, axis=ax)\n\n\n# print(dist(np.array([[0, 0]]), np.array([[2, 2]])))\n\nC_old = np.zeros(C.shape)\nclusters = np.zeros(len(x))\ndelta = dist(C, C_old, None)\nprint(f\"delta={delta}\")\n\n\ndef plot_kmean(current_cluster, delta):\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']\n fig, ax = plt.subplots()\n for i in range(k):\n points = np.array([x[j] for j in range(len(x)) if current_cluster[j] == i])\n ax.scatter(points[:, 0], points[:, 1], s=7, c=colors[i])\n ax.scatter(C[:, 0], C[:, 1], marker=\"*\", s=200, c=\"#005599\")\n plt.title(f\"delta for C will be:{delta:.4f} \")\n plt.plot()\n plt.show()\n\n\nwhile delta != 0:\n print(\"start a new iteration\")\n for i in range(len(x)):\n distances = dist(x[i], C)\n cluster = np.argmin(distances)\n clusters[i] = cluster\n C_old = deepcopy(C)\n for i in range(k):\n points = [x[j] for j in range(len(x)) if clusters[j] == i]\n C[i] = np.mean(points, axis=0)\n delta = dist(C, C_old, None)\n print(f\"delta={delta}\")\n plot_kmean(clusters, delta)\n","sub_path":"demo30_kmeans2.py","file_name":"demo30_kmeans2.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"193193752","text":"def citefind(file=None, outfile='citationsfound.xml', ns='crossref'):\n root = parse(file)\n if ns == 'crossref':\n urls = root.xpath('//foo:resource/text()',\n namespaces={\"foo\": \"http://www.crossref.org/schema/4.3.7\"})\n elif ns == 'bepress':\n urls = root.xpath('//fulltext-url/text()')\n else:\n return 'Currently only support bepress schema or crossref'\n\n count = 0\n allcitations = et.Element('root')\n for url in urls:\n count += 1\n r = requests.get(url, stream=True)\n with open('metadata.pdf', 'wb') as f:\n f.write(r.content)\n f.close()\n\n raw = parser.from_file('metadata.pdf')\n f = raw['content'].encode()\n zfind = re.findall(r'(?i)doi:.+?(?= |$)', str(f))\n print(len(zfind), url)\n if len(zfind) > 0:\n dois = [y.replace('\\\\n', '').replace(' ', '').replace(\n 'doi:', '').strip('.') for y in zfind]\n dfli = []\n works = Works()\n for item in dois:\n d = works.doi(item)\n df = pd.Series(d).to_frame()\n dfli.append(df.transpose())\n\n cr = pd.concat(dfli)\n cr = cr.reset_index(drop=True)\n cr['year'] = cr.created.apply(lambda x: getyear(x))\n cr['journaltitle'] = cr['container-title'].apply(\n lambda x: gettitle(x))\n cr['surname'] = cr['author'].apply(lambda x: getsurname(x))\n try:\n cr['fpage'] = cr['page'].apply(lambda x: firstpage(x))\n except KeyError:\n cr['fpage'] = ''\n cr['issues'] = cr['journal-issue'].apply(lambda x: issuefunc(x))\n\n xml = cr[['DOI', 'ISSN', 'journaltitle', 'surname',\n 'volume', 'issues', 'fpage', 'year']]\n tree = et.SubElement(allcitations, 'citationlist')\n tree.set('file', url)\n for row in xml.iterrows():\n citation = et.SubElement(tree, 'citation')\n citation.set('key', 'key-{}'.format(str(row[1]['DOI'])))\n\n issn = et.SubElement(citation, 'issn')\n try:\n issn.text = str(row[1]['ISSN'][0])\n try:\n issn = et.SubElement(citation, 'issn')\n issn.text = str(row[1]['ISSN'][1])\n except IndexError:\n pass\n except TypeError:\n pass\n\n journal_titlex = et.SubElement(citation, 'journal_title')\n journal_titlex.text = str(row[1]['journaltitle'])\n\n authorx = et.SubElement(citation, 'author')\n authorx.text = str(row[1]['surname'])\n\n volumex = et.SubElement(citation, 'volume')\n volumex.text = str(row[1]['volume'])\n\n issuex = et.SubElement(citation, 'issue')\n issuex.text = str(row[1]['issues'])\n\n first_pagex = et.SubElement(citation, 'first_page')\n first_pagex.text = str(row[1]['fpage'])\n\n cYearx = et.SubElement(citation, 'cYear', )\n cYearx.text = str(row[1]['year'])\n\n else:\n continue\n\n with open(outfile, 'wb') as fi:\n fi.write(et.tostring(allcitations,\n pretty_print=True, xml_declaration=True))\n fi.close()\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"596671838","text":"\"\"\"Factories for creating new databse objects.\"\"\"\nfrom pollbot.models import User\n\n\ndef user_factory(session, user_id, name, admin=False):\n \"\"\"Create a user.\"\"\"\n user = User(user_id, name)\n session.add(user)\n session.commit()\n\n return user\n","sub_path":"tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"490076460","text":"import client\r\nimport pygame\r\nimport os\r\n\r\nos.environ[\"SDL_VIDEO_CENTERED\"] = \"1\"\r\npygame.init()\r\n\r\nclient.setup()\r\n\r\npygame.mixer.music.load('sample_audio/crab_rave.mp3')\r\npygame.mixer.music.play(-1)\r\n\r\npygame.display.set_caption(\"Mind Music\")\r\nscreen = pygame.display.set_mode((360, 480))\r\n\r\nfont = pygame.font.SysFont(\"Ariel Black\", 24)\r\ntitle_font = pygame.font.SysFont(\"Ariel Black\", 48)\r\n\r\n\r\nimg1 = pygame.transform.scale(pygame.image.load(\"cover_imgs/crab_rave.jpg\"), (180, 180)).convert_alpha()\r\nimg2 = pygame.transform.scale(pygame.image.load(\"cover_imgs/megalovania.jpg\"), (180, 180)).convert_alpha()\r\n\r\ncover = img1\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nVIOLET = (190, 80, 190)\r\nGREY = (120, 120, 120)\r\n\r\nback1 = BLACK\r\nback2 = WHITE\r\n\r\nselection = 1\r\nchange = False\r\n\r\nvol = 0\r\n\r\ndef display():\r\n screen.fill(WHITE)\r\n pygame.draw.rect(screen, VIOLET, (0, 0, 360, 80))\r\n pygame.draw.rect(screen, VIOLET, (0, 420, 360, 60))\r\n screen.blit(title_font.render(\"Mind Music\", 5, WHITE), (95, 30))\r\n screen.blit(font.render(\"Crab Rave\", 5, back2, back1), (70, 120))\r\n screen.blit(font.render(\"Megalovania\", 5, back1, back2), (190, 120))\r\n screen.blit(cover, (90, 170))\r\n screen.blit(font.render(\"Keep volume low, as sudden\", 5, BLACK), (70, 370))\r\n screen.blit(font.render(\"increases in volume may occur\", 5, BLACK), (60, 385))\r\n for v in range(1, 100, 20):\r\n if v<=vol:\r\n pygame.draw.rect(screen, WHITE, (120+v, 450-v//5, 10, (80+v)//5))\r\n else:\r\n pygame.draw.rect(screen, GREY, (120+v, 450-v//5, 10, (80+v)//5)) \r\n screen.blit(font.render(str(vol), 5, WHITE), (230, 445))\r\n \r\n\r\nexit_flag = False\r\n\r\nwhile not exit_flag:\r\n res = client.get_value(pygame.mixer.music.get_volume())\r\n pygame.mixer.music.set_volume(res[0])\r\n\r\n vol = round(pygame.mixer.music.get_volume()*100)\r\n \r\n if res[1] == 'surprised':\r\n selection *= -1\r\n change = True\r\n \r\n print('Volume:', vol)\r\n\r\n\r\n if selection == -1 and change:\r\n pygame.mixer.music.load('sample_audio/crab_rave.mp3')\r\n pygame.mixer.music.play(-1)\r\n change = False\r\n back1 = BLACK\r\n back2 = WHITE\r\n cover = img1\r\n if selection == 1 and change:\r\n pygame.mixer.music.load('sample_audio/megalovania.mp3')\r\n pygame.mixer.music.play(-1)\r\n change = False\r\n back1 = WHITE\r\n back2 = BLACK\r\n cover = img2\r\n \r\n for e in pygame.event.get():\r\n if e.type == pygame.QUIT:\r\n exit_flag = True\r\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\r\n exit_flag = True\r\n \r\n display() \r\n pygame.display.flip()\r\n pygame.time.delay(4)\r\n\r\npygame.quit()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313305680","text":"import json\nimport os\n\nimport pyseto\n\nfrom dataclasses import dataclass\nfrom glob import glob\nfrom typing import Callable, List, Any\n\nfrom eth_typing import HexStr\nfrom eth_utils import to_checksum_address\n\nfrom eth_challenge_base.config import Config\nfrom eth_challenge_base.utils import Account, Contract\n\n\n@dataclass\nclass Action:\n description: str\n handler: Callable[[], int]\n\n\nclass ActionHandler:\n def __init__(self, build_path: str, config: Config) -> None:\n with open(os.path.join(build_path, f\"{config.contract}.json\")) as fp:\n build_json = json.load(fp)\n self._contract: Contract = Contract(build_json)\n self._token_key = pyseto.Key.new(version=4, purpose=\"local\", key=os.getenv(\"TOKEN_SECRET\", \"\"))\n\n self._actions: List[Action] = [self._create_account_action(config.constructor_args),\n self._deploy_contract_action(config.constructor_value, config.constructor_args),\n self._get_flag_action(config.flag, config.solved_event)]\n if config.show_source:\n self._actions.append(self._show_source_action(build_path))\n\n def __getitem__(self, index: int) -> Action:\n return self._actions[index]\n\n def __len__(self):\n return len(self._actions)\n\n def _create_account_action(self, constructor_args: Any) -> Action:\n def action() -> int:\n account: Account = Account()\n print(f\"[+]deployer account: {account.address}\")\n token: str = pyseto.encode(self._token_key, payload=account.private_key).decode(\"utf-8\")\n print(f\"[+]token: {token}\")\n estimate_gas: int = self._contract.deploy.estimate_gas(constructor_args)\n print(f\"[+]it will cost {estimate_gas} gas to deploy, make sure that deployer account has enough ether!\")\n return 0\n\n return Action(description=\"Create an account which will be used to deploy the challenge contract\", handler=action)\n\n def _deploy_contract_action(self, constructor_value: int, constructor_args: Any) -> Action:\n def action() -> int:\n try:\n private_key: str = pyseto.decode(self._token_key, input(\"[-]input your token: \").strip()).payload.decode(\"utf-8\")\n except ValueError as e:\n print(e)\n return 1\n\n account: Account = Account(private_key)\n if account.balance() == 0:\n print(f\"[+]Don't forget to get some test ether for {account.address} first\")\n return 1\n\n contract_addr: str = account.get_deployment_address()\n try:\n tx_hash: str = self._contract.deploy(account, constructor_value, constructor_args)\n except ValueError as e:\n print(e)\n return 1\n print(f\"[+]contract address: {contract_addr}\")\n print(f\"[+]transaction hash: {tx_hash}\")\n return 0\n\n return Action(description=\"Deploy the challenge contract using your generated account\", handler=action)\n\n def _get_flag_action(self, flag: str, solved_event: str) -> Action:\n def action() -> int:\n try:\n private_key: str = pyseto.decode(self._token_key, input(\"[-]input your token: \").strip()).payload.decode(\"utf-8\")\n except ValueError as e:\n print(e)\n return 1\n\n account: Account = Account(private_key)\n nonce: int = account.nonce\n if nonce == 0:\n print(\"[+]challenge contract has not yet been deployed\")\n return 1\n\n contract_addr: str = account.get_deployment_address(nonce - 1)\n is_solved = False\n if solved_event:\n tx_hash = input(f\"[-]input tx hash that emitted {solved_event} event: \").strip()\n logs = self._contract.get_events(solved_event, HexStr(tx_hash))\n for item in logs:\n if item['address'] == contract_addr:\n is_solved = True\n else:\n is_solved = self._contract.at(to_checksum_address(contract_addr)).isSolved().call()\n\n if is_solved:\n print(f\"[+]flag: {flag}\")\n return 0\n else:\n print(\"[+]it seems that you have not solved the challenge~~~~\")\n return 1\n\n return Action(description=\"Get your flag once you meet the requirement\", handler=action)\n\n def _show_source_action(self, build_path: str) -> Action:\n def action() -> int:\n for path in glob(os.path.join(build_path, \"*.json\")):\n try:\n with open(path) as fp:\n build_json = json.load(fp)\n except json.JSONDecodeError:\n continue\n else:\n print()\n print(build_json[\"sourcePath\"])\n print(build_json[\"source\"])\n\n return 0\n\n return Action(description=\"Show the contract source code\", handler=action)\n","sub_path":"eth_challenge_base/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583696949","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport keras\nimport multiprocessing\nimport math\nimport statistics\n\nimport sklearn\nimport sklearn.tree\nimport sklearn.ensemble\nimport sklearn.naive_bayes\nimport sklearn.svm\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nfrom keras.layers import Dense, Dropout, GaussianNoise, Activation, Lambda, concatenate\nfrom keras.models import Sequential, load_model, Model\nfrom keras.regularizers import l2\nfrom keras.optimizers import Adam\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report, confusion_matrix, f1_score, roc_auc_score, accuracy_score, mean_squared_error\n\n#from cleverhans.attacks import SaliencyMapMethod\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom cleverhans.attacks import FastGradientMethod, DeepFool, CarliniWagnerL2\nfrom saliency_map_method import SaliencyMapMethod\n\nconfig = tf.ConfigProto(device_count={\"CPU\": multiprocessing.cpu_count()})\nkeras.backend.tensorflow_backend.set_session(tf.Session(config=config))\n\n'''\nDecision trees\n'''\ndef dtree_new(train_x, train_y, params):\n classifier = sklearn.tree.DecisionTreeClassifier(**params)\n classifier.fit(train_x, train_y)\n return classifier\n\ndef dtree_new_grid(train_x, train_y):\n classifier = sklearn.tree.DecisionTreeClassifier()\n params = {'max_depth': [3,5,7,9,11,13], 'criterion': ['gini', 'entropy']}\n search = GridSearchCV(classifier, params, scoring='roc_auc', n_jobs=8, cv=5, verbose=True)\n search.fit(train_x, train_y)\n classifier = search.best_estimator_\n print(search.best_params_)\n return search.best_params_, classifier\n\ndef dtree_test(classifier, test_x):\n pred_y = classifier.predict(test_x)\n return pred_y\n'''\nRandom Forest\n'''\ndef rforest_new(train_x, train_y, params):\n classifier = sklearn.ensemble.RandomForestClassifier(**params)\n classifier.fit(train_x, train_y)\n return classifier\n\ndef rforest_new_grid(train_x, train_y):\n classifier = sklearn.ensemble.RandomForestClassifier()\n params = {'max_depth': [5,7,9], 'criterion': ['gini', 'entropy'], 'n_estimators':[100,200,400]}\n search = GridSearchCV(classifier, params, scoring='roc_auc', n_jobs=8, cv=3, verbose=True)\n search.fit(train_x, train_y)\n classifier = search.best_estimator_\n print(search.best_params_)\n return search.best_params_, classifier\n\ndef rforest_test(classifier, test_x):\n pred_y = classifier.predict(test_x)\n return pred_y\n'''\nSVM\n'''\ndef svm_new(train_x, train_y, params):\n classifier = sklearn.svm.SVC(**params)\n classifier.fit(train_x, train_y)\n return classifier\n\ndef svm_new_grid(train_x, train_y):\n classifier = sklearn.svm.SVC()\n params = {'kernel':['rbf'], 'C':[10,100,1000], 'gamma':['auto']}\n search = GridSearchCV(classifier, params, scoring='roc_auc', n_jobs=-1, cv=3, verbose=True)\n search.fit(train_x, train_y)\n classifier = search.best_estimator_\n print(search.best_params_)\n return search.best_params_, classifier\n\ndef svm_test(classifier, test_x):\n pred_y = classifier.predict(test_x)\n return pred_y\n'''\nNaive Bayes\n'''\ndef nb_new(train_x, train_y, params):\n classifier = sklearn.naive_bayes.GaussianNB(**params)\n classifier.fit(train_x, train_y)\n return classifier\n\ndef nb_new_grid(train_x, train_y):\n classifier = sklearn.naive_bayes.GaussianNB()\n params = {'var_smoothing':[10,1e0,1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]}\n search = GridSearchCV(classifier, params, scoring='roc_auc', n_jobs=8, cv=5, verbose=True)\n search.fit(train_x, train_y)\n classifier = search.best_estimator_\n print(search.best_params_)\n return search.best_params_, classifier\n\ndef nb_test(classifier, test_x):\n pred_y = classifier.predict(test_x)\n return pred_y\n'''\nNeural Network (Keras)\n'''\ndef nn_new(train_x, train_y):\n model = Sequential()\n\n model.add(Dense(30, activation='relu', input_dim=train_x.shape[1]))\n model.add(Dense(30, activation='relu'))\n model.add(Dropout(.3, noise_shape=None, seed=None))\n\n model.add(Dense(train_y.shape[1], activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(train_x, train_y, epochs=5, verbose=1)\n\n return model\n\n\ndef dae_new(train_x, train_y):\n # Denoising autoencoder\n model = Sequential()\n \n model.add(GaussianNoise(1))\n model.add(Dense(train_x.shape[1]+7, activation='relu', input_dim=train_x.shape[1]))\n model.add(Dense(train_x.shape[1]+14, activation='relu'))\n model.add(Dense(train_x.shape[1]+7, activation='relu'))\n model.add(Dense(train_x.shape[1], activation='sigmoid'))\n\n #adam = Adam(lr=0.01)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(train_x, train_x, epochs=5, verbose=1, shuffle=False)\n \n #Classifier\n\n #model.pop()\n #model.pop()\n\n model.layers[0].trainable = False\n model.layers[1].trainable = False\n model.layers[2].trainable = False\n model.layers[3].trainable = False\n\n model.add(Dense(30, activation='relu'))\n model.add(Dense(30, activation='relu'))\n model.add(Dropout(.3, noise_shape=None, seed=None))\n model.add(Dense(train_y.shape[1], activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\n\n model.fit(train_x, train_y, epochs=5, verbose=1)\n\n return model\n\ndef distillation_new(train_x, train_y):\n model = Sequential()\n\n model.add(Dense(30, activation='relu', input_dim=train_x.shape[1]))\n model.add(Dense(30, activation='relu'))\n model.add(Dropout(.3, noise_shape=None, seed=None))\n\n model.add(Dense(train_y.shape[1], activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(train_x, train_y, epochs=5, verbose=1)\n\n new_y = []\n\n pred = model.predict(train_x)\n temperature = 100\n pred = np.array(pred)**(1/temperature)\n p_sum = pred.sum()\n sample_temp = pred/p_sum\n model2 = Sequential()\n\n model2.add(Dense(30, activation='relu', input_dim=train_x.shape[1]))\n model2.add(Dense(30, activation='relu'))\n model2.add(Dropout(.3, noise_shape=None, seed=None))\n\n model2.add(Dense(train_y.shape[1], activation='softmax'))\n\n model2.compile(optimizer=\"adam\", loss='categorical_crossentropy', metrics=['accuracy'])\n\n model2.fit(train_x, sample_temp, epochs=5, verbose=1)\n\n return model2\n\ndef binclass_new(train_x, train_y, advs, type=0, params=None):\n if type<= 3:\n lab = np.zeros((train_x.shape[0],1))\n lab[:] = 0\n\n train1 = np.concatenate((train_x, advs[0]))\n\n lab1 = np.concatenate((lab, advs[1]))\n else:\n lab = np.zeros((train_x.shape[0],2))\n lab[:,1] = 1\n\n train1 = np.concatenate((train_x, advs[0]))\n lab1 = np.concatenate((lab, advs[1]))\n\n if type==0:\n model1 = dtree_new(train_x, train_y, params)\n model2 = dtree_new(train1, lab1, params)\n elif type==1:\n model1 = rforest_new(train_x, train_y, params)\n model2 = rforest_new(train1, lab1, params)\n elif type==2:\n model1 = svm_new(train_x, train_y, params)\n model2 = svm_new(train1, lab1, params)\n elif type==3:\n model1 = nb_new(train_x, train_y, params)\n model2 = nb_new(train1, lab1, params)\n elif type==4:\n model1 = nn_new(train_x, train_y)\n model2 = nn_new(train1, lab1)\n elif type==5:\n model1 = dae_new(train_x, train_y)\n model2 = dae_new(train1, lab1)\n \n return model1, model2\n\ndef binclass_test(model1, model2, x, type=0):\n if type == 1:\n pred1 = nn_test(model1, x)\n pred2 = nn_test(model2, x)\n pred = pred1 + pred2\n pred[pred == 2] = 1\n print(pred1,pred2)\n return pred\n else:\n pred1 = dtree_test(model1, x)\n pred2 = dtree_test(model2, x)\n pred = pred1 + pred2\n pred[pred == 2] = 1\n return pred\n\ndef magnet_new(train_x, train_y, train_y2, params=None):\n model1 = Sequential()\n\n model1.add(Dense(train_x.shape[1]-7, activation='relu', input_dim=train_x.shape[1]))\n model1.add(Dense(train_x.shape[1]-14, activation='relu'))\n model1.add(Dense(train_x.shape[1]-7, activation='relu'))\n model1.add(Dense(train_x.shape[1], activation='sigmoid'))\n\n model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model1.fit(train_x, train_x, epochs=10, verbose=1, shuffle=False)\n\n #mask = extra[0] == 1 # dos\n #mask2 = extra[0] == 0 # normal\n\n all_losses = []\n for i in range(len(train_x)):\n res = model1.evaluate(train_x[i:i+1], train_x[i:i+1], batch_size=1, verbose=0)[0]\n all_losses.append(res)\n stdev = statistics.stdev(all_losses)\n mean = statistics.mean(all_losses)\n\n stddev_multiplier = 4\n limit = mean + stddev_multiplier * stdev\n \n\n model = rforest_new(train_x, train_y2, params)\n\n return model1, model, limit\n\n\ndef nn_predicted(preds):\n argmaxed = np.argmax(preds, axis=1)\n return np.logical_not(argmaxed)\n\n\ndef nn_test(classifier, test_x):\n pred_y = classifier.predict(test_x)\n pred_y = np.argmax(pred_y, axis=1)\n pred_y[pred_y == 1] = 2\n pred_y[pred_y == 0] = 1\n pred_y[pred_y == 2] = 0\n\n return pred_y\n\ndef magnet_test(model1, model2, limit, pred):\n all_predicts = []\n for i in range(len(pred)):\n res = model1.evaluate(pred[i:i+1], pred[i:i+1], batch_size=1, verbose=0)[0]\n if res > limit:\n all_predicts.append(1)\n else:\n all_predicts.append(0)\n \n fixed_x = model1.predict(pred)\n\n predicts = rforest_test(model2, fixed_x)\n\n final_predicts = all_predicts + predicts\n final_predicts[final_predicts==2] = 1\n\n return final_predicts\n\n'''\nPerformance\n'''\ndef auc(test_y, pred_y):\n return roc_auc_score(test_y, pred_y)\n\n#dont use this, debugging only\ndef auc_safe(test_y, pred_y):\n return roc_auc_score(np.concatenate((test_y,[0,1])), np.concatenate((pred_y,[0,1])))\n\ndef f1(test_y, pred_y):\n return f1_score(test_y, pred_y)\n\ndef cmat(test_y, pred_y):\n return confusion_matrix(test_y, pred_y)\n\ndef performance(test_y, pred_y):\n print('Performance:')\n print('Auc:',auc(test_y, pred_y))\n print('F1:',f1(test_y, pred_y))\n print('Conf:\\n',cmat(test_y, pred_y))\n'''\nJSMA\n'''\ndef jsma(model, x, y, theta, gamma):\n sess = keras.backend.get_session()\n wrap = KerasModelWrapper(model)\n\n jsma = SaliencyMapMethod(wrap, sess=sess)\n\n onehottarget = np.zeros((1, y.shape[1]), dtype=np.float32)\n onehottarget[0,1] = 1\n\n jsma_params = {'theta': theta, 'gamma': gamma,\n 'clip_min': 0., 'clip_max': 1.,\n 'y_target': onehottarget}\n\n adv_x = jsma.generate_np(x, **jsma_params)\n #jsma bug fix?\n for row in range(len(adv_x)):\n for feat in range(len(adv_x[row])):\n if abs(adv_x[row][feat] - x.values[row][feat]) > abs(theta) + 0.01:\n adv_x[row][feat] = x.values[row][feat] + theta\n\n adv_x = pd.DataFrame(adv_x, columns=x.columns.values)\n \n #print(x.loc[:,'count'])\n #print(adv_x.loc[:,'count'])\n\n return adv_x\n\ndef fgsm(model, x, y, eps):\n sess = keras.backend.get_session()\n wrap = KerasModelWrapper(model)\n\n fgsm = FastGradientMethod(wrap, sess=sess)\n\n fgsm_params = {'eps': eps, 'clip_min': 0., 'clip_max': 1.}\n\n adv_x = fgsm.generate_np(x, **fgsm_params)\n\n adv_x = pd.DataFrame(adv_x, columns=x.columns.values)\n\n return adv_x\n\ndef deepfool(model, x, y, os):\n sess = keras.backend.get_session()\n wrap = KerasModelWrapper(model)\n\n deepfool = DeepFool(wrap, sess=sess)\n\n df_params = {'nb_candidate': 2, 'overshoot': os, 'max_iter': 3,'clip_min': 0., 'clip_max': 1.}\n\n adv_x = deepfool.generate_np(x, **df_params)\n\n adv_x = pd.DataFrame(adv_x, columns=x.columns.values)\n\n return adv_x\n\ndef carliniwagner(model, x, y):\n sess = keras.backend.get_session()\n wrap = KerasModelWrapper(model)\n\n carwag = CarliniWagnerL2(wrap, sess=sess)\n\n onehottarget = np.zeros((1, y.shape[1]), dtype=np.float32)\n onehottarget[0,1] = 1\n #35 for cicids\n #47 for nslkdd\n cw_params = {'confidence':0.5,\n 'batch_size':47,\n 'learning_rate': 0.2,\n 'binary_search_steps':5,\n 'max_iterations':100,\n 'abort_early':True,\n 'initial_const':0.1,\n 'clip_min':0.,\n 'clip_max':1.}\n\n adv_x = carwag.generate_np(x, **cw_params)\n\n adv_x = pd.DataFrame(adv_x, columns=x.columns.values)\n\n return adv_x\n\n\n\n\ndef attack_stats(orig, adv):\n changed_cols = {}\n changes_cols = {}\n for col in orig.columns.values:\n changed_cols[col] = 0\n changes_cols[col] = 0\n for i in range(orig.shape[0]):\n for j in range(len(orig.columns.values)):\n change = orig.values[i][j] - adv.values[i][j]\n if abs(change) >= 0.001:\n col = orig.columns.values[j]\n changed_cols[col] += 1\n changes_cols[col] = (changes_cols[col] * (changed_cols[col] - 1) + change) / changed_cols[col]\n for col in orig.columns.values:\n if changed_cols[col] > 0:\n print(col + ':')\n print(changed_cols[col])\n print(changes_cols[col])\n\ndef feature_select(data, labels, fields, ncols):\n selector = SelectKBest(chi2,k=ncols)\n selector.fit(data, labels)\n cols = selector.get_support(indices=True)\n return cols\n","sub_path":"src/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":13579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394553110","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a TrabajaenMexico spider created on top of the ATSSpider\nscrapy crawl trabajaenmexico -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.trabajaenmexico.com/\"\n\nsample url:\n http://www.trabajaenmexico.com/\n\"\"\"\n\nfrom urlparse import urljoin\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString, HtmlFormatter\n\n\nclass TrabajaenMexico(ATSSpider):\n\n name = 'trabajaenmexico'\n download_delay = 0.3\n page = 0\n url_anchor = \"/todobuenophp.php?pagina=%s&tipo=Ver Todos&nom=&fec=Cualquier Fecha\"\n ref_re = r'iden=(\\d+)'\n details_xpath = '//td[div//*[contains(text(), \"%s\")]]/following-sibling::td//text()'\n\n def start_requests(self):\n yield self.make_request()\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\n '//table[@class=\"Estilo7\"]//tr/td/span/a/@href'\n ).extract()\n for job_url in jobs:\n job_url = urljoin(response.url, job_url)\n yield Request(job_url, callback=self.parse_job_callback())\n\n if jobs:\n self.page += 1\n yield self.make_request()\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n loader.add_xpath(\n 'date', self.details_xpath % \"Fecha:\", ConvertDateString('%d-%m-%Y')\n )\n loader.add_xpath('title', self.details_xpath % \"Vacante:\")\n loader.add_xpath('location', self.details_xpath % \"Ciudad:\")\n loader.add_xpath('company', self.details_xpath % \"Empresa:\")\n loader.add_xpath('baseSalary', self.details_xpath % \"Salario:\")\n loader.add_xpath(\n 'description',\n '//td[div//*[contains(text(), \"%s\")]]|//td[div//*[contains(text(), \"%s\")]]/following-sibling::td' % (unicode('Descripción:', 'utf-8'), unicode('Descripción:', 'utf-8')),\n HtmlFormatter()\n )\n yield loader.load_item()\n\n def make_request(self):\n return Request(\n urljoin(self.start_urls[0], self.url_anchor % self.page),\n callback=self.parse\n )\n","sub_path":"brightcorp/brightcorp/spiders/trabajaenmexico.py","file_name":"trabajaenmexico.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243763680","text":"import requests\n#import time\n\ndef lat_lon(*args):\n \"\"\"Funkcja jako argumenty przyjmuje poszczegolne elementy adresu\"\"\"\n adr = \"\"\n for arg in args:\n adr = adr+\"+\"+arg.replace(' ','+').replace(',','')\n adres = adr[1:]\n\n #time.sleep(1.25)\n response = requests.get(\"https://nominatim.openstreetmap.org\" \\\n \"/?format=json&addressdetails=1&q={}&format=json&limit=1\".format(adres))\n\n data = response.json()[0]\n # Zwraca szerokosc i dlugosc geograficzna\n # latwo mozna ja przerobic aby zwracala inne elementy\n return {'lat':float(data['lat']), 'lon':float(data['lon'])}\n\nif __name__ == '__main__':\n resp = lat_lon('83-323', 'Gdynia Morska', '1/3')\n print(resp['lat'])\n print(resp['lon'])\n","sub_path":"response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255507467","text":"\"\"\"\n Framework Django D01 - 42\n Created on:\n Author\n\"\"\"\n\n\"\"\" \n on importe le module sys\n\"\"\"\nimport sys\n\n\"\"\" \n global variables\n\"\"\"\n\ndef my_sort(el1, el2):\n \"\"\" \n This function is my_sort() and it is doing ...\n \"\"\"\n if el1 > el2:\n return -1\n if el2 < el2:\n return 1\n return 0\n\ndef convert_list_of_tuples_into_a_dictionary(l):\n \"\"\" \n This function is convert_list_of_tuples_into_a_dictionary() and it is doing ...\n \"\"\"\n d = {}\n for key, value in l:\n d.setdefault(value, []).append(key)\n return d\n\ndef is_key_present(x, d):\n \"\"\" \n This function is is_key_present() and it is doing ...\n \"\"\"\n if x.lower() in (name.lower() for name in d):\n return True\n else:\n return False\n\ndef is_value_present(x, d):\n \"\"\" \n This function is is_value_present() and it is doing ...\n \"\"\"\n if x.lower() in (name.lower() for name in d.values()):\n return True\n else:\n return False\n\ndef get_key_from_dictionnary_using_value_search(search_value, d):\n \"\"\" \n This function is get_capital_city() and it is doing ...\n \"\"\"\n for key, value in d.items():\n #print('key:', key, \"value:\", value)\n if value.lower() == search_value.lower():\n return key\n \n return None\n\ndef get_key_from_dictionnary_using_key_search(search_key, d):\n \"\"\" \n This function is get_() and it is doing ...\n \"\"\"\n for key, value in d.items():\n #print('key:', key, \"value:\", value)\n if key.lower() == search_key.lower():\n return key\n \n return None\n\ndef main(arguments):\n \"\"\" \n This function is main() and it is doing ...\n \"\"\"\n states = {\n \"Oregon\" : \"OR\",\n \"Alabama\" : \"AL\",\n \"New Jersey\": \"NJ\",\n \"Colorado\" : \"CO\"\n }\n\n capital_cities = {\n \"OR\": \"Salem\",\n \"AL\": \"Montgomery\",\n \"NJ\": \"Trenton\",\n \"CO\": \"Denver\"\n }\n\n if len(arguments) != 2:\n sys.exit(-1)\n\n capitals_or_states = [x.strip() for x in arguments[1].split(',')]\n capitals_or_states = [x for x in capitals_or_states if len(x) > 0]\n #capitals_or_states = [x.lower() for x in capitals_or_states]\n\n for x in capitals_or_states:\n if is_value_present(x, capital_cities) == True:\n short_name_state = get_key_from_dictionnary_using_value_search(x, capital_cities)\n if short_name_state in states.values():\n full_name_state = get_key_from_dictionnary_using_value_search(short_name_state, states)\n print(capital_cities[short_name_state], 'is the capital of', full_name_state)\n else:\n print(capital_cities[short_name_state], 'is a capital ', 'can not find corresponding state')\n elif is_key_present(x, states) == True:\n full_name_state = get_key_from_dictionnary_using_key_search(x, states)\n short_name_state = states[full_name_state]\n if short_name_state in capital_cities:\n full_name_capital_city = capital_cities[short_name_state]\n print(full_name_capital_city, 'is the capital of', full_name_state)\n else:\n print(fullt_name_state, 'is a state ', 'can not find corresponding capital city')\n else:\n print(x, 'is neither a capital city nor a state')\n\n sys.exit(0)\n\nif __name__ == '__main__' :\n \"\"\" \n \"\"\"\n arguments = sys.argv\n main(arguments)\n\n","sub_path":"01-piscine_python_django.a/day01/ex05/all_in.py","file_name":"all_in.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624196174","text":"import os\nimport subprocess\nimport fileinput\nimport sys\nfrom random import shuffle\nimport math\n\ndef transform_ajax(v):\n for line in fileinput.input(\"ajax.obj\", inplace=True):\n if line.startswith('v '):\n newLine = line.split()\n x = float(newLine[1])\n y = float(newLine[2])\n z = float(newLine[3])\n x = x+v[0]\n y = y+v[1]\n z = z+v[2]\n line = \"v \" + str(x) + \" \" + str(y) + \" \" + str(z) + \"\\n\"\n sys.stdout.write(line)\n\nxml_file = \"cboxArea.xml\"\n# ajax center of mass = <-1.36395 17.8657 -17.3952>\n#in this transform i just took a random point on the sphere from sphere2.object\n# and a random point from ajax and created the vector from the subtraction.\n# this way ajax should have showed up in the position of the sphere (and i wanted to work from there)\n# i really didn't try a lot of things here. i decided that it's better to focus on sphere first + area light first\ntransform_ajax([-6.23+(0.44+6.32),27.31+(0.36-27.31),-21.52+(0.051+21.52)])\nsubprocess.run([\"nori.exe\", xml_file])","sub_path":"imageGeneration/ajaxCboxDraft.py","file_name":"ajaxCboxDraft.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350607207","text":"import time\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport glob\nimport os\nfrom pathlib import Path\n\n\n'''Report DB03: Dataframe with followers and theirs bios'''\n\n\nclass DB03:\n def __init__(self, username, driver, cookies):\n self.username = username\n self.driver = driver\n self.cookies = cookies\n\n def create_report(self):\n print('Creating folder')\n root_folder = Path(__file__).parents[0]\n os.chdir(root_folder)\n folder = str(self.username)\n if not os.path.exists(folder):\n os.mkdir(folder)\n print('Folder created successfully')\n\n try:\n pd.read_csv(str(self.username) + '/report_03_list_' + str(self.username) + '.csv')\n except:\n report_03 = {\n 'index': [],\n 'followers_links': [],\n 'followers_names': []\n }\n report_03 = pd.DataFrame(data=report_03)\n report_03.to_csv(str(self.username) + '/report_03_list_' + str(self.username) + '.csv', index=False)\n\n def followers_link(self):\n username = self.username\n driver = self.driver\n cookies = self.cookies\n\n driver.get('https://www.instagram.com/accounts/onetap/?next=%2F')\n for cookie in cookies:\n driver.add_cookie(cookie)\n\n print(\"Accessing instagram page to be searched\") # accessing page to be searched\n driver.get('https://www.instagram.com/' + username)\n wait = WebDriverWait(driver, 10)\n try:\n wait.until(EC.presence_of_element_located(((By.PARTIAL_LINK_TEXT, 'follower'))))\n except:\n wait.until(EC.presence_of_element_located(((By.PARTIAL_LINK_TEXT, 'seguidor'))))\n\n print(\"Accessing followers page\")\n try:\n driver.find_element_by_partial_link_text(\"follower\").click()\n except:\n driver.find_element_by_partial_link_text(\"seguidor\").click()\n print(\"Followers page accessed successfully\")\n\n try: # waiting page to be loaded\n wait.until(\n EC.presence_of_element_located(((By.XPATH, \"//*[@id='react-root']/section/main/div/ul/li[2]/a/span\"))))\n except:\n wait.until(EC.presence_of_element_located(\n ((By.XPATH, '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a/span'))))\n\n try: # getting total amount of followers\n total_followers = driver.find_element_by_xpath(\n \"//*[@id='react-root']/section/main/div/ul/li[2]/a/span\").text\n total_followers = total_followers.replace('.', '')\n total_followers = total_followers.replace(',', '')\n except:\n total_followers = driver.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a/span').text\n total_followers = total_followers.replace('.', '')\n total_followers = total_followers.replace(',', '')\n\n if total_followers.find('k') == True:\n total_followers = total_followers.replace('k', '')\n total_followers = int(float(total_followers)) * 100\n print(\"Total number of followers approximately: \", total_followers)\n else:\n total_followers = int(float(total_followers))\n print(\"Total number of followers: \", total_followers)\n\n print(\"---------------------------\")\n print(\"Amount of followers: \", total_followers)\n\n fbody = driver.find_element_by_xpath(\"//div[@class='isgrP']\") # Getting screen info to scroll down\n scroll = 1\n followers_loaded = 0\n items = []\n items_names = []\n index = 0\n index_now = 0\n lista = []\n while index_now < int(float(total_followers)): # while the amount of followers scrapped is lesser than the total of followers.\n time.sleep(1)\n followers_on_screen = driver.find_elements_by_xpath('//a[contains(@class, \"FPmhX notranslate _0imsa \")]')\n for follower_on_screen in followers_on_screen:\n items.append(follower_on_screen.get_attribute('href')) # getting followers_link\n index += 1\n\n lista = items # removing duplicates links found from list\n lista = pd.Series(lista).drop_duplicates()\n index_now = len(lista)\n print(\"Total amount of followers now: \", index_now)\n\n if index_now < int(total_followers): # if the amount of scrapped followers is lesser than the total of followers.\n index_now = 0 # resetting the amount of scrapped followers\n driver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;',\n fbody) # does scroll down action\n time.sleep(1)\n fList = driver.find_elements_by_xpath(\"//div[@class='isgrP']//li\") # Amount of followers found\n scroll += 1\n\n print(\"---- Process Concluded ----\")\n print(\"---------------------------\")\n print(\"----- Showing results -----\")\n print(\"Amount of followers found: \", len(fList))\n print(\"Amount of ScrollDown done: \", scroll)\n print(\"---------------------------\")\n print(\"----- Processing data -----\")\n\n # scrape followers links and names from data using BS and CSV (without Selenium and Pandas)\n print(\"Scraping followers links and names from data\")\n source = driver.page_source\n data = bs(source, 'html.parser')\n index_now = 0\n index = []\n followers_links = []\n followers_names = []\n for followers in data.find_all('a', class_='FPmhX notranslate _0imsa'):\n index.append(index_now + 1)\n followers_links.append(followers['href']) # extraction of followers_links\n followers_names.append(followers.text) # extraction of followers_names\n\n report_03_list = {\n 'index': index,\n 'followers_links': followers_links,\n 'followers_names': followers_names\n }\n report_03_list = pd.DataFrame(data=report_03_list)\n report_03_list = report_03_list.drop_duplicates()\n report_03_list = report_03_list.drop_duplicates()\n report_03_list.to_csv(str(username) + '/report_03_list_' + str(username) + '.csv', index=False)\n\n print(\"---------------------------\")\n print(\"Report:\")\n print(\"Amount of followers: \", int(total_followers))\n print(\"Amount of followers loaded: \", index_now)\n print(\"---------------------------\")\n\n def followers_link_list(self):\n followers_link_list = pd.read_csv(str(self.username) + '/report_03_list_' + str(self.username) + '.csv')\n followers_link_list = followers_link_list['followers_links'].tolist()\n return followers_link_list\n\n def followers_bios(self, followers_links_list):\n username = self.username\n driver = self.driver\n \n followers_link_completed = []\n followers_name_completed = []\n followers_bio_completed = []\n followers_amount_of_posts_completed = []\n followers_amount_of_followers_completed = []\n followers_amount_of_following_completed = []\n followers_private_completed = []\n\n report_03_bio = {\n 'followers_links': followers_link_completed,\n 'followers_names': followers_name_completed,\n 'amount_of_posts': followers_amount_of_posts_completed,\n 'amount_of_followers': followers_amount_of_followers_completed,\n 'amount_of_following': followers_amount_of_following_completed,\n 'private': followers_private_completed,\n 'bio': followers_bio_completed\n }\n\n report_03_bio = pd.DataFrame(data=report_03_bio)\n report_03_bio = report_03_bio.drop_duplicates()\n\n try:\n report_03_created = pd.read_csv(str(username) + '/report_03_bio_' + str(username) + '.csv')\n report_03_bio = report_03_created.append([report_03_bio], ignore_index=True)\n report_03_bio = report_03_bio.drop_duplicates()\n report_03_bio.to_csv(str(username) + '/report_03_bio_' + str(username) + '.csv', index=False)\n except:\n report_03_bio.to_csv(str(username) + '/report_03_bio_' + str(username) + '.csv', index=False)\n\n try:\n for insta_followers_links in followers_links_list: # accessing page to be searched\n try:\n insta_followers_links = str(insta_followers_links)\n\n print(\"Accessing instagram page to be searched\")\n driver.get('https://www.instagram.com' + insta_followers_links)\n time.sleep(3)\n\n print(\"Starting scrapping\")\n source = driver.page_source\n data = bs(source, 'html.parser')\n\n # followers_link\n followers_link_completed.append(insta_followers_links)\n\n # followers_name\n try:\n name = data.find(class_='-vDIg').h1.text\n followers_name_completed.append(name)\n except:\n name = str(\"n/a\")\n followers_name_completed.append(name)\n print('name:', name)\n\n # bio\n try:\n bio = data.find(class_='-vDIg').span.text\n if bio.find(\"Followed by\") == 0:\n bio = str(\"n/a\")\n followers_bio_completed.append(bio)\n else:\n bio = data.find(class_='-vDIg').text\n followers_bio_completed.append(bio)\n except:\n bio = str(\"n/a\")\n followers_bio_completed.append(bio)\n\n # amount_of_posts\n amount_of_posts = data.findAll(class_='g47SY')[0].text\n try:\n if (amount_of_posts.find('.') or amount_of_posts.find(',')):\n amount_of_posts = amount_of_posts.replace(',', '')\n amount_of_posts = amount_of_posts.replace('.', '')\n amount_of_posts = int(amount_of_posts)\n else:\n amount_of_posts = int(amount_of_posts)\n except:\n amount_of_posts = amount_of_posts\n followers_amount_of_posts_completed.append(amount_of_posts)\n\n # amount_of_followers\n amount_of_followers = data.findAll(class_='g47SY')[1].text\n try:\n if (amount_of_followers.find('.') or amount_of_followers.find(',')):\n amount_of_followers = amount_of_followers.replace(',', '')\n amount_of_followers = amount_of_followers.replace('.', '')\n amount_of_followers = int(amount_of_followers)\n else:\n amount_of_followers = int(amount_of_followers)\n except:\n amount_of_followers = amount_of_followers\n followers_amount_of_followers_completed.append(amount_of_followers)\n\n # amount_of_following\n amount_of_following = data.findAll(class_='g47SY')[2].text\n if amount_of_following.find('.') or amount_of_following.find(','):\n amount_of_following = amount_of_following.replace('.', '')\n amount_of_following = amount_of_following.replace(',', '')\n amount_of_following = int(amount_of_following)\n else:\n amount_of_following = int(amount_of_following)\n followers_amount_of_following_completed.append(amount_of_following)\n\n # is_private\n try:\n is_private = data.find(class_='rkEop').text == 'This Account is Private'\n followers_private_completed.append(\"1\") # This Account is Private\n except:\n followers_private_completed.append(\"0\")\n\n report_03_bio = {\n 'followers_links': followers_link_completed,\n 'followers_names': followers_name_completed,\n 'amount_of_posts': followers_amount_of_posts_completed,\n 'amount_of_followers': followers_amount_of_followers_completed,\n 'amount_of_following': followers_amount_of_following_completed,\n 'private': followers_private_completed,\n 'bio': followers_bio_completed\n }\n\n report_03_bio = pd.DataFrame(data=report_03_bio)\n report_03_bio = report_03_bio.drop_duplicates()\n try:\n report_03_created = pd.read_csv(str(username) + '/report_03_bio_' + str(username) + '.csv')\n report_03_bio = report_03_created.append([report_03_bio], ignore_index=True)\n report_03_bio = report_03_bio.drop_duplicates()\n report_03_bio.to_csv(str(username) + '/report_03_bio_' + str(username) + '.csv', index=False)\n except:\n report_03_bio.to_csv(str(username) + '/report_03_bio_' + str(username) + '.csv', index=False)\n\n # deleting source of data if the post_link was processed\n report_03_link_list = pd.read_csv(str(username) + '/report_03_bio_' + str(username) + '.csv')\n report_03_link_list = report_03_link_list['followers_links'].tolist()\n\n item_processed = insta_followers_links\n try:\n if report_03_link_list.index(item_processed) >= 0:\n doc_source = pd.read_csv(str(username) + '/report_03_list_' + str(username) + '.csv') # deleting source of data\n doc_source.drop(doc_source.index[doc_source['followers_links'] == str(insta_followers_links)], inplace=True)\n doc_source.to_csv(str(username) + '/report_03_list_' + str(username) + '.csv', index=False)\n except:\n pass\n except:\n pass\n except:\n pass\n\n @staticmethod\n def delete_file_by_key_name(username, key_word_for_file):\n current_dir = os.path.dirname(os.path.realpath(__file__)) # accessing report_01_IMG (to delete processed rows)\n path = current_dir + '\\\\' + username + \"\\\\\" + '*' + key_word_for_file + '*'\n for file_to_delete in glob.glob(path):\n os.remove(file_to_delete)\n\n report_03_bio = pd.read_csv(str(username) + '/report_03_bio_' + str(username) + '.csv')\n report_03_bio.to_csv(str(username) + '/report_03_' + str(username) + '.csv', index=False) # save report_03_bio as report_03\n os.remove(str(username) + '/report_03_bio_' + str(username) + '.csv') # delete report_03_bio\n\n def run(self):\n print('Starting to create report 03')\n username = self.username\n driver = self.driver\n cookies = self.cookies\n\n start = DB03(username, driver, cookies)\n start.create_report()\n start.followers_link()\n followers_link_list = start.followers_link_list()\n start.followers_bios(followers_link_list)\n\n # deleting report 01 without after whole process:\n followers_link_list = start.followers_link_list() # create: a dataframe db02img; returns a list of img_links\n if len(followers_link_list) == 0:\n key_word_for_file = 'report_03_list'\n start.delete_file_by_key_name(username, key_word_for_file)\n\n print('Process \"Report 03\" concluded.')\n\n","sub_path":"codes/DB03.py","file_name":"DB03.py","file_ext":"py","file_size_in_byte":16362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"145145215","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sys\nfrom PyQt4.QtGui import QApplication\nfrom PyQt4.QtCore import QUrl\nfrom PyQt4.QtWebKit import QWebPage\nimport bs4 as bs\nimport urllib.request\nimport os\nimport datetime\n\n\n###############################################################\npath_of_brandwise = 'C:\\\\LavaWebScraper\\\\BrandWiseFiles\\\\'\n###############################################################\n\nbase_url = 'http://www.allcall.hk/phones.html'\nur='http://www.allcall.hk'\ncountry = 'China'\ncompany = 'AllCall'\nmodel_list = []\nusp = []\ndisplay_list = []\nmemory_list = []\nprocessor_list = []\ncamera_list = []\nbattery_list = []\nthickness_list = []\nextras_links = []\nrecords = []\nhref = []\nst_list_heads=[]\nst_list_dets=[]\nhr=[]\nr=requests.get(base_url)\nsoup=BeautifulSoup(r.text,'html.parser')\nresults=soup.find_all('li',attrs={'class':'col-xs-6 col-sm-3 col-md-3 col-lg-3 padding-left-right-def'})\nfor i in range(len(results)):\n model_list.append(results[i].find('h4').text)\n href.append(results[i].find('a')['href'])\nHREF=[]\nfor i in range(len(href)):\n href[i]=ur + href[i]\nfor i in range(len(href)):\n r=requests.get(href[i])\n soup=BeautifulSoup(r.text,'html.parser')\n results=soup.find_all('ul',attrs={'class':'col-xs-12 col-sm-12 col-md-12 col-lg-12 padding-margin-no padding-margin-def subnavul'})\n if len(results)==1:\n sa=results[0].find_all('li')\n for a in range(len(sa)):\n sb=sa[a].text\n if sb == 'Specification':\n hr.append(ur + sa[a].find('a')['href']) \n else:\n hr.append(href[i])\n \nfor i in range(len(hr)):\n s=''\n heads=[]\n dets=[]\n r=requests.get(hr[i])\n soup=BeautifulSoup(r.text,'html.parser')\n results=soup.find_all('div',attrs={'class':'container overflow-hidden padding-tottom-5'})\n for a in range(len(results)):\n sa=results[a].find_all('ul',attrs={'class':'col-xs-12 col-sm-8 col-md-8 col-lg-8 padding-top-5 padding-margin-def top-info'})\n for b in range(len(sa)):\n sb=sa[b].find_all('li')\n for c in range(len(sb)):\n tt = sb[c].text.strip('\\n')\n tt = tt.strip('\\t')\n s=s+ tt + ' || '\n s=s.strip('\\n')\n s=s.strip('\\t').replace('\\xa0',' ')\n usp.append(s)\n for a in range(len(results)):\n sa=results[a].find_all('ul',attrs={'class':'col-xs-12 col-sm-12 col-md-12 col-lg-12 padding-margin-def content'})\n for b in range(len(sa)-1):\n sb=sa[b].find_all('li')\n for c in range(len(sb)):\n if c%2 ==0:\n heads.append(sb[c].text.strip('\\n\\t'))\n else:\n dets.append(sb[c].text.strip('\\n\\t').replace('\\xa0',' ').replace('\\n',' '))\n st_list_heads.append(heads)\n st_list_dets.append(dets)\nfor i in range(len(st_list_heads)):\n q1 = ''\n q2 = ''\n r1 = ''\n r2 = ''\n s1 = ''\n s2 = ''\n for j in range(len(st_list_heads[i])):\n if 'cpu' in st_list_heads[i][j].lower():\n processor_list.append(st_list_dets[i][j].replace(' (','(').replace(')',')'))\n if 'memory' in st_list_heads[i][j].lower():\n memory_list.append(st_list_dets[i][j])\n if 'battery' in st_list_heads[i][j].lower():\n battery_list.append(st_list_dets[i][j])\n if 'ram' in st_list_heads[i][j].lower():\n q1='RAM:- '+st_list_dets[i][j]+' || '\n if 'rom' in st_list_heads[i][j].lower():\n q2='ROM:- '+st_list_dets[i][j]+' || '\n if 'screen size' in st_list_heads[i][j].lower() or'display size' in st_list_heads[i][j].lower():\n r1 ='Size:- ' +st_list_dets[i][j]+' || '\n if 'type' in st_list_heads[i][j].lower():\n r2 ='Type:- '+ st_list_dets[i][j]+' || '\n if 'rear' in st_list_heads[i][j].lower() and 'camera' in st_list_heads[i][j].lower():\n s1='Rear camera:- '+st_list_dets[i][j]+' || '\n if 'front' in st_list_heads[i][j].lower() and 'camera' in st_list_heads[i][j].lower():\n s2='Front camera:- '+st_list_dets[i][j]+' || '\n \n \n if q1!='' or q2!='':\n memory_list.append(q1+' '+q2)\n if r1!='' or r2!='':\n display_list.append(r1+' '+r2)\n if s1!='' or s2!='':\n camera_list.append(s1 +' '+ s2)\n if len(battery_list)==i:\n battery_list.append('Not Available')\n if len(camera_list)==i:\n camera_list.append('Not Available')\n if len(display_list)==i:\n display_list.append('Not Available')\n if len(memory_list)==i:\n memory_list.append('Not Available')\n if len(processor_list)==i:\n processor_list.append('Not Available')\n if len(thickness_list)==i:\n thickness_list.append('Not Available')\n if len(usp)==i:\n usp.append('Not Available')\nprint(len(model_list))\nprint(len(usp))\nprint(len(thickness_list))\nprint(len(processor_list))\nprint(len(memory_list))\nprint(len(battery_list))\nprint(len(display_list))\nprint(len(camera_list))\nextras_links = hr\nfor i in range(len(model_list)):\n records.append((country, company, model_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))\n\ndf = pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS'])\ndf.to_csv(os.path.join(path_of_brandwise, str(datetime.date.today())+ '-allcall' +'.csv'), index=False, encoding='utf-8')\n","sub_path":"allcall.py","file_name":"allcall.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"160678129","text":"\"\"\"\nDefines the deviceCommand class\n\"\"\"\nimport threading\n\n__author__ = 'Cesar'\n\nimport time\nimport urllib2\nimport xml.etree.ElementTree as myXML\nimport config\n\n\nclass deviceCommand:\n \"\"\"\n deviceCommand class\n \"\"\"\n\n d_id = ''\n commandId = ''\n command = ''\n threadRunning = False\n\n def __init__(self, d_id, commandId, command, response, threadRunning):\n\n self.d_id = d_id\n self.commandId = commandId\n self.command = command\n self.response = response\n self.threadRunning = threadRunning\n\n # spawn a new thread for every new device\n commandFinderThread = threading.Thread(target=self.commandFinder)\n commandFinderThread.daemon = True\n commandFinderThread.start()\n\n def getCommand_via_httpPOST(self):\n \"\"\"\n Request command pending to Intelectix\n \"\"\"\n req = urllib2.Request(url='http://petrologweb.com/api/command',\n headers={'Content-Type': 'text/xml',\n 'Authorization': 'DeviceNumber={0},ApiKey=UGV0cm9sb2dDbGllbnRl'.format(self.d_id)})\n try:\n r = urllib2.urlopen(req)\n resp_xml = myXML.ElementTree(myXML.fromstring(r.read()))\n root = resp_xml.getroot()\n for resp in root.findall('{http://schemas.datacontract.org/2004/07/Ifx.Api.Model}Response'):\n if resp.find('Command') is not None and resp.find('ConsoleCommandId') is not None:\n c = resp.find('Command').text\n c_id = resp.find('ConsoleCommandId').text\n r = [c_id, c]\n return r\n return 'e:No Command found'\n except urllib2.URLError as e:\n config.logging.warning('Error on device {0} = {1}'.format(self.d_id, e.reason))\n return 'e:Connection Error'\n\n def setCommandResponse_via_httpPOST(self):\n \"\"\"\n Set the response of a command to Intelectix\n \"\"\"\n commandResponseData = myXML.parse(config.xml_root+'/setCommandResponse.xml')\n root = commandResponseData.getroot()\n root[0].text = self.commandId\n root[1].text = self.response\n req = urllib2.Request(url='http://petrologweb.com/api/command',\n data=myXML.tostring(root),\n headers={'Content-Type': 'text/xml',\n 'Authorization': 'DeviceNumber={0},ApiKey=UGV0cm9sb2dDbGllbnRl'.format(self.d_id)})\n try:\n r = urllib2.urlopen(req)\n resp_xml = myXML.ElementTree(myXML.fromstring(r.read()))\n root = resp_xml.getroot()\n resp = root.find('{http://schemas.datacontract.org/2004/07/Ifx.Api.Model}Response')\n if resp.text == 'true':\n # Response successful, get ready for next command.\n self.commandId = ''\n self.command = ''\n self.response = ''\n return True\n else:\n config.logging.warning('Error in command response!')\n return False\n\n except urllib2.URLError as e:\n config.logging.warning('Failed to open connection to server! Error = %s', e.reason)\n\n def commandFinder(self):\n \"\"\"\n Continuously looks for commands.\n \"\"\"\n self.threadRunning = True\n while True:\n try:\n if self.d_id != '':\n command = self.getCommand_via_httpPOST()\n c_id = command[0]\n c = command[1]\n if c_id != 'e':\n # If command found and commandId and command are empty (we are NOT in the middle of another\n # command) then: save command and commandId and send command to corresponding Petrolog\n if self.command == '' and self.commandId == '':\n # Process command\n if c == 'Pg38h':\n # Ignore Intelectix internal command\n config.logging.debug('Ignoring Intelectix internal command')\n self.command = c\n self.commandId = c_id\n self.response = ''\n self.setCommandResponse_via_httpPOST()\n else:\n config.logging.debug('DeviceID=[{0}] CommandID=[{1}] Command=[{2}]'\n .format(self.d_id, c_id, c))\n # Change CPU address to 0F\n c = c[2:]\n c = '0F'+c\n self.command = c\n self.commandId = c_id\n from mqttPetrolog import mqttc\n mqttc.publish(self.d_id+'/SC', self.command)\n except Exception as e:\n config.logging.error('Error in Thread! DeviceID = %s', self.d_id)\n\n time.sleep(5)\n\n# Dictionary to store Commands\nCommands = {deviceCommand}\n\n","sub_path":"deviceCommand_class.py","file_name":"deviceCommand_class.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"525800719","text":"import importlib\nimport pytest\nfrom datetime import datetime, timedelta\nfrom unittest.mock import patch\n\nfrom snuba.clickhouse.http import JSONRowEncoder\nfrom snuba.clusters.cluster import CLUSTERS, ClickhouseClientSettings, get_cluster\nfrom snuba.clusters.storage_sets import StorageSetKey\nfrom snuba.consumer import KafkaMessageMetadata\nfrom snuba.datasets.schemas.tables import TableSchema\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_storage, get_writable_storage\nfrom snuba.migrations.errors import MigrationError\nfrom snuba.migrations.groups import get_group_loader, MigrationGroup\nfrom snuba.migrations.parse_schema import get_local_schema\nfrom snuba.migrations.runner import MigrationKey, Runner\nfrom snuba.migrations.status import Status\nfrom snuba.utils.metrics.backends.dummy import DummyMetricsBackend\nfrom snuba.writer import BatchWriterEncoderWrapper\nfrom tests.fixtures import get_raw_transaction\n\n\ndef _drop_all_tables() -> None:\n for cluster in CLUSTERS:\n connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE)\n database = cluster.get_database()\n\n data = connection.execute(\n f\"SELECT name FROM system.tables WHERE database = '{database}'\"\n )\n for (table,) in data:\n connection.execute(f\"DROP TABLE IF EXISTS {table}\")\n\n\ndef setup_function() -> None:\n _drop_all_tables()\n\n\ndef teardown_function() -> None:\n _drop_all_tables()\n\n\ndef test_get_status() -> None:\n runner = Runner()\n assert runner.get_status(\n MigrationKey(MigrationGroup.EVENTS, \"0001_events_initial\")\n ) == (Status.NOT_STARTED, None)\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n assert runner.get_status(\n MigrationKey(MigrationGroup.EVENTS, \"0001_events_initial\")\n ) == (Status.NOT_STARTED, None)\n runner.run_migration(MigrationKey(MigrationGroup.EVENTS, \"0001_events_initial\"))\n status = runner.get_status(\n MigrationKey(MigrationGroup.EVENTS, \"0001_events_initial\")\n )\n assert status[0] == Status.COMPLETED\n assert isinstance(status[1], datetime)\n assert status[1] > datetime.now() - timedelta(seconds=1)\n\n\ndef test_show_all() -> None:\n runner = Runner()\n assert all(\n [\n migration.status == Status.NOT_STARTED\n for (_, group_migrations) in runner.show_all()\n for migration in group_migrations\n ]\n )\n runner.run_all(force=True)\n assert all(\n [\n migration.status == Status.COMPLETED\n for (_, group_migrations) in runner.show_all()\n for migration in group_migrations\n ]\n )\n\n\ndef test_run_migration() -> None:\n runner = Runner()\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n\n connection = get_cluster(StorageSetKey.MIGRATIONS).get_query_connection(\n ClickhouseClientSettings.MIGRATE\n )\n assert connection.execute(\n \"SELECT group, migration_id, status, version FROM migrations_local;\"\n ) == [(\"system\", \"0001_migrations\", \"completed\", 1)]\n\n # Invalid migration ID\n with pytest.raises(MigrationError):\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"xxx\"))\n\n # Run out of order\n with pytest.raises(MigrationError):\n runner.run_migration(MigrationKey(MigrationGroup.EVENTS, \"0003_errors\"))\n\n # Running with --fake\n runner.run_migration(\n MigrationKey(MigrationGroup.EVENTS, \"0001_events_initial\"), fake=True\n )\n assert connection.execute(\"SHOW TABLES LIKE 'sentry_local'\") == []\n\n\ndef test_reverse_migration() -> None:\n runner = Runner()\n runner.run_all(force=True)\n\n connection = get_cluster(StorageSetKey.MIGRATIONS).get_query_connection(\n ClickhouseClientSettings.MIGRATE\n )\n\n # Invalid migration ID\n with pytest.raises(MigrationError):\n runner.reverse_migration(MigrationKey(MigrationGroup.SYSTEM, \"xxx\"))\n\n with pytest.raises(MigrationError):\n runner.reverse_migration(MigrationKey(MigrationGroup.EVENTS, \"0003_errors\"))\n\n # Reverse with --fake\n for migration_id in reversed(\n get_group_loader(MigrationGroup.EVENTS).get_migrations()\n ):\n runner.reverse_migration(\n MigrationKey(MigrationGroup.EVENTS, migration_id), fake=True\n )\n assert (\n len(connection.execute(\"SHOW TABLES LIKE 'sentry_local'\")) == 1\n ), \"Table still exists\"\n\n\ndef test_get_pending_migrations() -> None:\n runner = Runner()\n total_migrations = get_total_migration_count()\n assert len(runner._get_pending_migrations()) == total_migrations\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n assert len(runner._get_pending_migrations()) == total_migrations - 1\n\n\ndef test_run_all() -> None:\n runner = Runner()\n assert len(runner._get_pending_migrations()) == get_total_migration_count()\n\n with pytest.raises(MigrationError):\n runner.run_all(force=False)\n\n runner.run_all(force=True)\n assert runner._get_pending_migrations() == []\n\n\ndef test_reverse_all() -> None:\n runner = Runner()\n all_migrations = runner._get_pending_migrations()\n runner.run_all(force=True)\n for migration in reversed(all_migrations):\n runner.reverse_migration(migration, force=True)\n\n connection = get_cluster(StorageSetKey.MIGRATIONS).get_query_connection(\n ClickhouseClientSettings.MIGRATE\n )\n assert connection.execute(\"SHOW TABLES\") == [], \"All tables should be deleted\"\n\n\ndef get_total_migration_count() -> int:\n count = 0\n for group in MigrationGroup:\n count += len(get_group_loader(group).get_migrations())\n return count\n\n\ndef test_version() -> None:\n runner = Runner()\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n migration_key = MigrationKey(MigrationGroup.EVENTS, \"test\")\n assert runner._get_next_version(migration_key) == 1\n runner._update_migration_status(migration_key, Status.IN_PROGRESS)\n assert runner._get_next_version(migration_key) == 2\n runner._update_migration_status(migration_key, Status.COMPLETED)\n assert runner._get_next_version(migration_key) == 3\n\n\ndef test_no_schema_differences() -> None:\n runner = Runner()\n runner.run_all(force=True)\n\n for storage_key in StorageKey:\n storage = get_storage(storage_key)\n conn = storage.get_cluster().get_query_connection(\n ClickhouseClientSettings.MIGRATE\n )\n\n schema = storage.get_schema()\n\n if not isinstance(schema, TableSchema):\n continue\n\n table_name = schema.get_local_table_name()\n local_schema = get_local_schema(conn, table_name)\n\n assert (\n schema.get_column_differences(local_schema) == []\n ), f\"Schema mismatch: {table_name} does not match schema\"\n\n\ndef test_transactions_compatibility() -> None:\n cluster = get_cluster(StorageSetKey.TRANSACTIONS)\n connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE)\n\n def get_sampling_key() -> str:\n database = cluster.get_database()\n ((sampling_key,),) = connection.execute(\n f\"SELECT sampling_key FROM system.tables WHERE name = 'transactions_local' AND database = '{database}'\"\n )\n return sampling_key\n\n # Create old style table without sampling expression and insert data\n connection.execute(\n \"\"\"\n CREATE TABLE transactions_local (`project_id` UInt64, `event_id` UUID,\n `trace_id` UUID, `span_id` UInt64, `transaction_name` LowCardinality(String),\n `transaction_hash` UInt64 MATERIALIZED CAST(cityHash64(transaction_name), 'UInt64'),\n `transaction_op` LowCardinality(String), `transaction_status` UInt8 DEFAULT 2,\n `start_ts` DateTime, `start_ms` UInt16, `finish_ts` DateTime, `finish_ms` UInt16,\n `duration` UInt32, `platform` LowCardinality(String), `environment` LowCardinality(Nullable(String)),\n `release` LowCardinality(Nullable(String)), `dist` LowCardinality(Nullable(String)),\n `ip_address_v4` Nullable(IPv4), `ip_address_v6` Nullable(IPv6), `user` String DEFAULT '',\n `user_hash` UInt64 MATERIALIZED cityHash64(user), `user_id` Nullable(String),\n `user_name` Nullable(String), `user_email` Nullable(String),\n `sdk_name` LowCardinality(String) DEFAULT CAST('', 'LowCardinality(String)'),\n `sdk_version` LowCardinality(String) DEFAULT CAST('', 'LowCardinality(String)'),\n `http_method` LowCardinality(Nullable(String)) DEFAULT CAST('', 'LowCardinality(Nullable(String))'),\n `http_referer` Nullable(String),\n `tags.key` Array(String), `tags.value` Array(String), `_tags_flattened` String,\n `contexts.key` Array(String), `contexts.value` Array(String), `_contexts_flattened` String,\n `partition` UInt16, `offset` UInt64, `message_timestamp` DateTime, `retention_days` UInt16,\n `deleted` UInt8) ENGINE = ReplacingMergeTree(deleted) PARTITION BY (retention_days, toMonday(finish_ts))\n ORDER BY (project_id, toStartOfDay(finish_ts), transaction_name, cityHash64(span_id))\n TTL finish_ts + toIntervalDay(retention_days);\n \"\"\"\n )\n\n assert get_sampling_key() == \"\"\n generate_transactions()\n\n runner = Runner()\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n runner._update_migration_status(\n MigrationKey(MigrationGroup.TRANSACTIONS, \"0001_transactions\"), Status.COMPLETED\n )\n runner.run_migration(\n MigrationKey(\n MigrationGroup.TRANSACTIONS,\n \"0002_transactions_onpremise_fix_orderby_and_partitionby\",\n ),\n force=True,\n )\n\n assert get_sampling_key() == \"cityHash64(span_id)\"\n\n assert connection.execute(\"SELECT count(*) FROM transactions_local;\") == [(5,)]\n\n\ndef generate_transactions() -> None:\n from datetime import datetime\n\n table_writer = get_writable_storage(StorageKey.TRANSACTIONS).get_table_writer()\n\n rows = []\n\n for i in range(5):\n raw_transaction = get_raw_transaction()\n # Older versions of this table did not have measurements\n del raw_transaction[\"data\"][\"measurements\"]\n\n processed = (\n table_writer.get_stream_loader()\n .get_processor()\n .process_message(\n (2, \"insert\", raw_transaction),\n KafkaMessageMetadata(0, 0, datetime.utcnow()),\n )\n )\n rows.extend(processed.rows)\n\n BatchWriterEncoderWrapper(\n table_writer.get_batch_writer(metrics=DummyMetricsBackend(strict=True)),\n JSONRowEncoder(),\n ).write(rows)\n\n\ndef test_groupedmessages_compatibility() -> None:\n cluster = get_cluster(StorageSetKey.EVENTS)\n database = cluster.get_database()\n connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE)\n\n # Create old style table witihout project ID\n connection.execute(\n \"\"\"\n CREATE TABLE groupedmessage_local (`offset` UInt64, `record_deleted` UInt8,\n `id` UInt64, `status` Nullable(UInt8), `last_seen` Nullable(DateTime),\n `first_seen` Nullable(DateTime), `active_at` Nullable(DateTime),\n `first_release_id` Nullable(UInt64)) ENGINE = ReplacingMergeTree(offset)\n ORDER BY id SAMPLE BY id SETTINGS index_granularity = 8192\n \"\"\"\n )\n\n migration_id = \"0010_groupedmessages_onpremise_compatibility\"\n\n runner = Runner()\n runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, \"0001_migrations\"))\n events_migrations = get_group_loader(MigrationGroup.EVENTS).get_migrations()\n\n # Mark prior migrations complete\n for migration in events_migrations[: (events_migrations.index(migration_id))]:\n runner._update_migration_status(\n MigrationKey(MigrationGroup.EVENTS, migration), Status.COMPLETED\n )\n\n runner.run_migration(\n MigrationKey(MigrationGroup.EVENTS, migration_id), force=True,\n )\n\n assert connection.execute(\n f\"SELECT primary_key FROM system.tables WHERE name = 'groupedmessage_local' AND database = '{database}'\"\n ) == [(\"project_id, id\",)]\n\n\ndef test_settings_skipped_group() -> None:\n from snuba.migrations import groups, runner\n\n with patch(\"snuba.settings.SKIPPED_MIGRATION_GROUPS\", {\"querylog\"}):\n importlib.reload(groups)\n importlib.reload(runner)\n runner.Runner().run_all(force=True)\n\n connection = get_cluster(StorageSetKey.MIGRATIONS).get_query_connection(\n ClickhouseClientSettings.MIGRATE\n )\n assert connection.execute(\"SHOW TABLES LIKE 'querylog_local'\") == []\n","sub_path":"tests/migrations/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":12659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"327833071","text":"#coding:utf-8\r\n'''\r\nCreated on 2017年12月15日\r\n\r\n@author: qiujiahao\r\n\r\n@email:997018209@qq.com\r\n\r\n'''\r\n#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport sys\r\nsys.path.append('..')\r\n\r\nimport os\r\nimport time\r\nfrom sklearn import metrics\r\nfrom datetime import timedelta\r\nfrom cnn.cnn_module import TextCNN\r\nfrom cnn.data import data\r\nfrom conf import *\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\ndef get_time_dif(start_time):\r\n \"\"\"获取已使用时间\"\"\"\r\n end_time = time.time()\r\n time_dif = end_time - start_time\r\n return timedelta(seconds=int(round(time_dif)))\r\n\r\ndef feed_data(x_batch, y_batch, keep_prob,model):\r\n feed_dict = {\r\n model.input_x: x_batch,\r\n model.input_y: y_batch,\r\n model.keep_prob: keep_prob\r\n }\r\n return feed_dict\r\n\r\ndef evaluate(sess, x_, y_,model):\r\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\r\n feed_dict = feed_data(x_, y_, 1.0,model)\r\n loss, acc,scores = sess.run([model.loss, model.acc,model.logits], feed_dict=feed_dict)\r\n\r\n return loss, acc,scores\r\n\r\ndef train(args,data):\r\n with tf.Graph().as_default() as g:\r\n model=TextCNN(args)\r\n session = tf.Session(graph=g)\r\n with session.as_default():\r\n session.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n if not os.path.exists(args.module_path):\r\n os.makedirs(args.module_path)\r\n start_time = time.time()\r\n print('Training and evaluating...')\r\n start_time = time.time()\r\n total_batch = 0 # 总批次\r\n best_acc_val = 0.0 # 最佳验证集准确率\r\n last_improved = 0 # 记录上一次提升批次\r\n require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练\r\n \r\n flag = False\r\n batches=data.get_batch_data()\r\n for index,batch in enumerate(batches):\r\n x_batch, y_batch,x_val,y_val=batch\r\n feed_dict = feed_data(x_batch, y_batch, args.dropout_keep_prob,model)\r\n if len(data.quest_label)<1000:\r\n args.print_per_batch=10\r\n if total_batch % args.print_per_batch == 0:\r\n # 每多少轮次输出在训练集和验证集上的性能\r\n feed_dict[model.keep_prob] = 1.0\r\n loss_train, acc_train= session.run([model.loss, model.acc], feed_dict=feed_dict)\r\n loss_val, acc_val,scores = evaluate(session, x_val, y_val,model) # todo\r\n \r\n if acc_val > best_acc_val:\r\n # 保存最好结果\r\n best_acc_val = acc_val\r\n last_improved = total_batch\r\n saver.save(sess=session, save_path=args.module_path)\r\n improved_str = '*'\r\n else:\r\n improved_str = ''\r\n \r\n time_dif = get_time_dif(start_time)\r\n msg = 'total_batch: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},'\\\r\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'\r\n print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))\r\n \r\n session.run(model.optim, feed_dict=feed_dict) # 运行优化\r\n total_batch += 1\r\n \r\n if total_batch - last_improved > require_improvement:\r\n # 验证集正确率长期不提升,提前结束训练\r\n print(\"No optimization for a long time, auto-stopping...\")\r\n flag = True\r\n break # 跳出循环\r\n if flag: # 同上\r\n print('最佳准确率:',best_acc_val)\r\n\r\n\r\nif __name__ == '__main__': \r\n args=get_args()\r\n data=data(args) \r\n train(args,data)","sub_path":"11.人机对话第二次验收版本/对话_第二版/cnn/run_cnn.py","file_name":"run_cnn.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197207503","text":"#!/usr/bin/env python3\n# This file will automate, creation and testing of lxc containers and maas\n# long term plans, spin up maas container in LXC, a few things need to be in\n# place for this to work:\n\n #1:) LXC Containers Templates (/etc/lxc/lxc.conf) need to be setup to\n # ... include allowable loop mount perms\n #2:) LXC Containers Templates need to already include the correct Bridge\n # device to adhere to\n #3:) This is designed to run through Jenkins for automation\n #4:) Long term plans are to add systems via MaaS CLI and test\n #4a:) Commissioning / Deployment / Images\n #5:) Currently only support Trusty & Xenial\n\n\n#USAGE:\n # All user defined variables should be entered in the 'example.yaml'\n\n #1.) To Provision the Container use the '-p' flag (Useful for just building\n # A Container\n\n #2.) To Provision the Container and Configure the container use,\n #' -p -c flags\n\n #3.) To Provision & Configure the Container and Configure MAAS\n # use '-p -c -m'\n\n ####3a.) Configuring the MAAS Environment will:\n # -- Download Boot Images\n # -- Configure SSH Keys (supplied by you)\n # -- Configure Proxies (for the lab)\n\nimport subprocess\nimport os\nimport sys\nimport argparse\nimport time\nimport yaml\n\ncontainer = \"maas-qa-builder\"\n\n\nclass lxcmanager():\n\n def lxcbuilder(self, config, release):\n '''Build the lxc container, requires the location of your lxc.conf\n Useful in the event you want to have a customer config file for testing\n '''\n\n try:\n subprocess.call([\"sudo\", \"lxc-create\", \"-t\", \"ubuntu\", \"-n\",\n container, \"-f\", config, \"--\", \"-r\", release])\n except Exception as e:\n print(e)\n return False\n\n def lxcstop(self):\n ''' Stops the lxc container'''\n\n try:\n subprocess.call([\"sudo\", \"lxc-stop\", \"-n\", container, \"-f\"])\n return 1\n except Exception as e:\n print(e)\n return 0\n\n def lxcstart(self):\n ''' Start the lxc container'''\n\n try:\n subprocess.call([\"sudo\", \"lxc-start\", \"-n\", container])\n except Exception as e:\n print(e)\n return False\n\n def lxcinfo(self):\n '''Gather lxc container information'''\n\n try:\n subprocess.call([\"sudo\", \"lxc-info\", \"-n\", container])\n except Exception as e:\n print(e)\n return False\n\n def lxccheckconfig(self):\n '''Verify the template is correct, just output to console, If we find\n that the lxc.conf is present adjust the location accordingly'''\n\n loc = \"maas-qa-builder/config\"\n if os.path.isfile(\"/etc/lxc/lxc.conf\"):\n lxcconf = subprocess.check_output([\"sudo\", \"cat\",\n \"/etc/lxc/lxc.conf\"]).split(\"\\n\")\n if any('lxc.lxcpath =' in s for s in lxcconf):\n foo = filter(lambda x: 'lxc.lxcpath =' in x, lxcconf)\n bar = ''.join(foo)\n newconfig = bar.split(\"=\")[1].replace(\" \", \"\")\n subprocess.call([\"sudo\", \"cat\", newconfig + loc])\n else:\n subprocess.call([\"sudo\", \"cat\", \"/var/lib/lxc/\" + loc])\n return True\n\n def getlxcipaddress(self, interface):\n '''Get the lxc ip address '''\n\n try:\n myaddress = subprocess.getoutput([\"sudo lxc-attach -n %s -e -- \\\n ifconfig %s\" % (container, interface)]).split(\"\\n\")[1].split() \\\n [1][5:]\n return myaddress\n except Exception as e:\n print(e)\n return False\n\n def lxcruncmd(self, cmd):\n '''Take a command and run it through lxc attach'''\n p = subprocess.Popen(cmd, stderr=subprocess.PIPE,\n stdout=subprocess.PIPE, universal_newlines=True, shell=True)\n output, errors = p.communicate()\n print(output, errors, p.returncode)\n if p.returncode or errors:\n print (\"Errors Found\")\n return False\n\n\n def lxccheckdistro(self):\n '''Check the distro and do Foo if needed'''\n\n command = \"lsb_release -a\"\n commandxenial0 = \"mknod /dev/loop0 b 7 1\"\n commandxenial1 = \"mknod /dev/loop1 b 7 1\"\n commandxenial2 = \"mknod /dev/loop-control b 10 237\"\n\n try:\n foo = subprocess.getoutput([\"sudo lxc-attach -n %s -e -- %s\" %\n (container, command)]).split(\"\\t\")\n try:\n# Shell=True won't paas exceptions so i need to revisit this and improve'\n if 'xenial' in foo:\n subprocess.call([\"sudo lxc-attach -n %s -e -- %s\" %\n (container, commandxenial0)], shell=True)\n subprocess.call([\"sudo lxc-attach -n %s -e -- %s\" %\n (container, commandxenial1)], shell=True)\n subprocess.call([\"sudo lxc-attach -n %s -e -- %s\" %\n (container, commandxenial2)], shell=True)\n except Exception as e:\n print(e)\n return False\n except Exception as e:\n print(e)\n return False\n\nclass operationsmanager():\n '''Everything in here is written to pull from our yaml, if there is an\n error we will display it on the screen but not necessarily pass an\n exception'''\n\n def install_debs(self, debs):\n '''Here we install anything to make anything work'''\n\n try:\n subprocess.call([\"sudo lxc-attach -n %s -e -- apt-get install \\\n -y %s\" % (container, debs)], shell=True)\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n def install_ppa(self, ppa):\n '''Install a PPA, for example, MAAS Devel PPA'''\n\n try:\n if ppa is \"none\":\n print(\"None\")\n subprocess.check_call([\"sudo\", \"lxc-attach\", \"-n\", container, \\\n \"-e\", \"--\", \"add-apt-repository\", \"-y\", ppa])\n self.update_pkg_index()\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n def update_pkg_index(self):\n '''Update ubuntu pkg index'''\n\n try:\n subprocess.check_call([\"sudo\", \"lxc-attach\", \"-n\", container, \\\n \"-e\", \"--\", \"apt-get\",\"update\"])\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n\nclass maasmanager():\n\n\n def configure_maas_admin(self, username, password, email):\n '''Configure the MAAS ADMIN; maas-region-admin to be depreciated for\n maas 2.0'''\n\n try:\n subprocess.check_call([\"sudo\", \"lxc-attach\", \"-n\", container, \\\n \"-e\", \"--\", \"sudo\", \"maas-region\", \"createadmin\", \\\n \"--username\", username, \"--password\", password, \"--email\",\\\n email])\n except subprocess.CalledProcessError as e:\n print (e)\n return False\n\n def get_maas_user_apikey(self, username):\n '''Get the maas API KEY for user; maas-region-admin to be depreciated\n for maas 2.0'''\n\n try:\n api = subprocess.getoutput([\"sudo lxc-attach -n %s -e -- \\\n maas-region apikey --username %s\" % (container, username)])\n return api\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n def login_maas(self, profile, url, api):\n '''Set the maas boot sources'''\n\n try:\n subprocess.check_call([\"maas\", \"login\", profile, url, api])\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n def import_boot_images(self, profile):\n ''' Import the MAAS BOOT IMAGES'''\n\n try:\n subprocess.check_call([\"maas\", profile, \"boot-resources\", \"import\"])\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n#This does not work needs to updated, maas2.0 API and support 1.0 API\n def set_boot_sources(self, profile, url):\n ''' Configure the boot source URL prior to updating'''\n\n try:\n subprocess.call([\"maas %s boot-source update 1 \\\n url=%s\" % (profile, url)], shell=True)\n return 1\n except Exception as e:\n print(e)\n return 0\n\nclass di():\n\n def make_bootfile(self, tftploc, template, release, archive):\n ''' Create a Bootfile that lives in (tftpbootloc)\n For the host we are booting, note that the template needs to be\n present for this to happen'''\n # We will feed this via the yaml file\n\n bootfile = template + \"-\" + release + \"-\" + archive\n\n #Lets create the bootfile\n try:\n foo = subprocess.check_call([\"sudo\", \"cp\", \"-v\", tftploc + \\\n template, bootfile])\n return foo\n\n except subprocess.CalledProcessError as e:\n print(e)\n return False\n\n def configure_bootfile(self, bootfile, archive, preseed, hostname):\n ''' Configure the bootfile with the items required to PXE boot such as:\n - Preseed, Archive, Hostname'''\n\n try:\n infile = open(bootfile, \"r\")\n outfile = open(bootfile + \".mac\", \"a\")\n outfile.write(infile.read().replace(\"interface=auto\",\n preseed + \" \" + archive + \" \" + \"hostname=\" + hostname))\n infile.close()\n outfile.close()\n\n except Exception as e:\n print (e)\n return False\n\n\n\n\ndef main():\n '''Make sure that you updated your yaml template prior to running'''\n\n lxc = lxcmanager()\n ops = operationsmanager()\n maas= maasmanager()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--provision',\n required=False,\n action='store_true',\n help=\"Provision the lxc container\")\n parser.add_argument('-c', '--configure',\n required=False,\n action='store_true',\n help=\"Configure Maas in the lxc container\")\n parser.add_argument('-i', '--ipaddress',\n required=False,\n action='store_true',\n help=\"Get Container IP\")\n parser.add_argument('-m', '--maas',\n required=False,\n action='store_true',\n help=\"Prep Maas Environment\")\n parser.add_argument('-d', '--di',\n required=False,\n action='store_true',\n help=\"Prep D-I\")\n parser.add_argument('-y', '--yaml',\n required=True,\n type=str,\n help=\"Location of Yaml File\")\n\n\n args = parser.parse_args()\n\n\n with open(args.yaml) as f:\n doc = yaml.safe_load(f)\n\n##################################################################\n## Check to see if '-p' flag is triggered, if so, Build the container,\n## check and start if flag is passed, Grab Container Info and\n## Start the Container\n## Note: We recommend using a custom lxc.conf called \"lxc.conf.lxcbot\" This file\n## Allows us to specify a hard mac address so that the IP stays the\n## same. Easy for Testing\n##################################################################\n\n if args.provision is True:\n try:\n print(\"Provisioning Container\")\n #This is for Jenkins\n sys.stdout.flush()\n #/\n lxc.lxcbuilder(doc[\"lxc_config_path\"], doc[\"lxc_release\"])\n lxc.lxcinfo()\n lxc.lxccheckconfig()\n lxc.lxcstart()\n time.sleep(10)\n #This is for Jenkins\n sys.stdout.flush()\n #/\n print (\"Container IP ADDRESS IS: \" + lxc.getlxcipaddress(\"eth0\"))\n except Exception:\n pass\n\n #This is for Jenkins\n sys.stdout.flush()\n #/\n\n##################################################################\n## Check to see if the '-c' flag is triggered, if so,\n## Configure the container and Install MAAS\n## Modify these values as you see fit..lxcmanager()\n##################################################################\n\n#We assume you want maas installed\n if args.configure is True:\n try:\n #This is for Jenkins\n sys.stdout.flush()\n #/\n print (\"Configuring the Container, please wait\")\n time.sleep(10)\n ops.update_pkg_index()\n time.sleep(5)\n ops.update_pkg_index()\n ops.install_debs(doc[\"packages_to_install\"])\n ops.install_ppa(doc[\"software_ppa\"])\n if args.maas is True:\n print(\"Going to Configure MAAS PACKAGES\")\n ops.install_debs(\"maas\")\n\n except Exception as e:\n print (e)\n\n##################################################################\n## Check to see if the '-i' flag is triggered, if so, get the\n## ip address of the container, Informational only\n##################################################################\n\n if args.ipaddress is True:\n try:\n #This is for Jenkins\n sys.stdout.flush()\n print(\"Printing Eth0 info for container \")\n print(lxc.getlxcipaddress(\"eth0\"))\n print(maas.get_maas_user_apikey(doc[\"maas-username\"]))\n except Exception as e:\n print (e)\n\n##################################################################\n## Check to see if the '-m' flag is triggered, if so, prep the MAAS\n## environment, Configure USER, APIKEY, SSHKEY, Load the Boot Images,\n## Configure Maas Proxy, etc\n##################################################################\n\n if args.maas is True:\n try:\n print(\"Starting to prep maas env\")\n #This is for Jenkins\n sys.stdout.flush()\n #/\n print (\"Configuring MAAS account\")\n maas.configure_maas_admin(doc[\"maas-username\"],\n doc[\"maas-password\"], doc[\"maas-email\"])\n time.sleep(5)\n maas.get_maas_user_apikey(doc[\"maas-username\"])\n time.sleep(2)\n maas.login_maas(doc[\"maas-profile\"], \"http://\" +\n lxc.getlxcipaddress(\"eth0\") + \"/MAAS\",\n maas.get_maas_user_apikey(doc[\"maas-username\"]))\n time.sleep(6)\n maas.import_boot_images(doc[\"maas-profile\"])\n except Exception as e:\n print (e)\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"seqa.py","file_name":"seqa.py","file_ext":"py","file_size_in_byte":15037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310187722","text":"import sys\r\nsys.path.append('C:\\\\Users\\\\Nick\\\\PycharmProjects\\\\1st-itertion\\\\1st iteration')\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\n\r\nfrom dataUtils.dataUtils import match, dBaseAction, emailReport, writeDBv2\r\nimport AlgorithcTrading.config.crypto as cryptoConfig\r\nimport AlgorithcTrading.config.stocks as stockConfig\r\nfrom AlgorithcTrading.dataFetch import DataFetch\r\nfrom AlgorithcTrading.backtesting import Backtesting\r\nfrom AlgorithcTrading.stockUtils import getTickerYahooStats\r\n\r\n\r\nclass Trading(object):\r\n\r\n def __init__(self, asset='crypto'):\r\n\r\n if asset == 'crypto':\r\n self.dBase = cryptoConfig.dBase\r\n self.coinTable = cryptoConfig.coinTable\r\n self.saveLocation = cryptoConfig.plotSaveLocation\r\n self.title = cryptoConfig.emailTitle\r\n self.portfolio = cryptoConfig.portfolio\r\n\r\n elif asset == 'stocks':\r\n self.dBase = stockConfig.dBase\r\n self.yahooStatsTable = stockConfig.yahooStatsTable\r\n self.saveLocation = stockConfig.plotSaveLocation\r\n self.title = stockConfig.emailTitle\r\n self.portfolio = stockConfig.portfolio\r\n\r\n def runCrypto(self, runType='hourly'):\r\n\r\n # Crypto is essentially momentumless, but based purely on sentiment. It will run up and then run straight down.\r\n # Until it is more established and less correlated, momentum trading cannot work. Thus simply run very slow\r\n # averages to identify when the market might start trending upwards rather than current down trend.\r\n\r\n downloadStocks = DataFetch()\r\n downloadStocks.get_cryptoData(runList=[runType])\r\n\r\n currentPortfolio = dBaseAction(cryptoConfig.dBase, 'Select * from %s' % cryptoConfig.portfolio)\r\n if len(currentPortfolio) > 0:\r\n currentPortfolio = currentPortfolio[0].ticker.unique()\r\n else:\r\n currentPortfolio=None\r\n\r\n buysAndSells = Backtesting()\r\n buysAndSells = buysAndSells.run(runType='cryptoHourly' if runType == 'hourly' else 'cryptoDaily',\r\n returnType='BAU', method='momentum',\r\n portfolioTickers=currentPortfolio,\r\n seedLength=30,\r\n averageType='ewma',\r\n minTimeseriesVol=0.1,\r\n CTAlookback=500,\r\n minVolume=100e3,\r\n buyWindow=[400],\r\n sellWindow=[18],\r\n sellWindowB=[190],\r\n baseWindow=[500],\r\n lookback=10000,\r\n priceVolatilityPeriod=400,\r\n priceVolatilityThreshold=0.037,\r\n priceCrossMax=0.6, ####### lenient\r\n priceCrossThreshold=0.03,\r\n nPeriodsGradient=5,\r\n priceVolatilityMaxPeriod=50,\r\n priceVolatilityMaxThreshold=0.03,\r\n minObservations=10,\r\n volumeFilter=24,\r\n maxPriceBaseCross=6,\r\n maxPriceBaseCrossPeriod=100,\r\n returnAveragePeriod=50,\r\n minInterpVolatility=0.0002,\r\n minCrossGradient=-10,\r\n minCrossGradientSell=-10,\r\n sellLosers=-1,\r\n minPriceBasePercent=0.04,\r\n minVolatility=0.004,\r\n minAverageReturn=0.002,\r\n maxRollingReturn=1,\r\n maxAverageRatio=10,\r\n maxAverageRatio_returnRatio=5,\r\n minReturnRatio=0.5,\r\n type='crypto')\r\n\r\n report = pd.concat([buysAndSells['buys'], buysAndSells['sells']]).reset_index(drop=True)\r\n if len(report) > 0:\r\n coinInfo = dBaseAction(self.dBase, 'Select * from %s' % self.coinTable)[0]\r\n report = pd.merge(report, coinInfo[['ticker', 'market_cap', 'volume']], on='ticker')\r\n report['CTA'] = report['CTA'].astype(float).map('{:,.3f}'.format)\r\n report['skew'] = report['skew'].astype(float).map('{:,.5f}'.format)\r\n report['market_cap'] = report['market_cap'].astype(float).map('{:,.0f}'.format)\r\n report['volume'] = report['volume'].astype(float).map('{:,.0f}'.format)\r\n\r\n self.email_report(report, buysAndSells)\r\n\r\n report['timestamp'] = str(datetime.now().strftime('%Y-%m-%d %HH:%MM'))\r\n report['ID'] = report['ticker'] + '_' + report['timestamp']\r\n\r\n # log report of buys in historical dataBase\r\n report = report[report['action'] == 'buy'].reset_index(drop=True)\r\n if len(report)>0:\r\n writeDBv2(dBase=cryptoConfig.dBase, name=cryptoConfig.reportHistory, data=report,\r\n createArgs='(\"' + '\",\"'.join(report.columns.tolist()) + '\")',\r\n indexColName='ID',\r\n args='(\"' + '\",\"'.join(report.columns.tolist()) + '\")')\r\n\r\n def runStocks(self, runType='daily', fullUpdate=False):\r\n\r\n downloadStocks = DataFetch()\r\n downloadStocks.get_stockData(fullUpdate=fullUpdate)\r\n\r\n currentPortfolio = dBaseAction(stockConfig.dBase, 'Select * from %s' % stockConfig.portfolio)\r\n if len(currentPortfolio) > 0:\r\n currentPortfolio = currentPortfolio[0].ticker.unique()\r\n else:\r\n currentPortfolio=None\r\n\r\n buysAndSells = Backtesting()\r\n buysAndSells = buysAndSells.run(runType='stocksDaily' if runType == 'daily' else 'stocksHourly',\r\n returnType='BAU', method='momentum',\r\n sellWindow=[12], sellWindowB=[70], buyWindow=[95], baseWindow=[100],\r\n portfolioTickers=currentPortfolio,\r\n minTimeseriesVol=0.1,\r\n CTAlookback=500,\r\n minVolume=100e3,\r\n lookback=1825,\r\n priceVolatilityPeriod=100,\r\n priceVolatilityThreshold=0.037,\r\n priceCrossMax=0.35,\r\n priceCrossThreshold=0.03,\r\n nPeriodsGradient=10,\r\n priceVolatilityMaxPeriod=50,\r\n priceVolatilityMaxThreshold=0.01,\r\n minObservations=10,\r\n maxPriceBaseCross=10,\r\n maxPriceBaseCrossPeriod=50,\r\n returnAveragePeriod=50,\r\n minInterpVolatility=0.0002,\r\n minCrossGradient=-10,\r\n minCrossGradientSell=-10,\r\n sellLosers=-0.5,\r\n minPriceBasePercent=0.04,\r\n minVolatility=0.011,\r\n minAverageReturn=0.007,\r\n maxRollingReturn=1,\r\n returnRatioPeriod=50,\r\n type='stocks')\r\n\r\n report = pd.concat([buysAndSells['buys'], buysAndSells['sells']]).reset_index(drop=True)\r\n if len(report) > 0:\r\n tickerStats = getTickerYahooStats(np.sort(report.ticker.values))\r\n\r\n report = pd.merge(report, tickerStats[['ticker', 'marketCap', 'longName', 'earningsDate',\r\n 'shortRatio', 'recommendationKey', 'industry']], on='ticker')\r\n report['CTA'] = report['CTA'].astype(float).map('{:,.3f}'.format)\r\n report['skew'] = report['skew'].astype(float).map('{:,.5f}'.format)\r\n report['market_cap'] = report['marketCap'].map('{:,}'.format)\r\n report = report.drop('marketCap', axis=1)\r\n\r\n self.email_report(report, buysAndSells)\r\n\r\n report['timestamp'] = str(datetime.now().strftime('%Y-%m-%d %HH:%MM'))\r\n report['ID'] = report['ticker'] + '_' + report['timestamp']\r\n # log report of buys in historical dataBase\r\n report = report[report['action'] == 'buy'].reset_index(drop=True)\r\n if len(report)>0:\r\n writeDBv2(dBase=stockConfig.dBase, name=stockConfig.reportHistory, data=report,\r\n createArgs='(\"' + '\",\"'.join(report.columns.tolist()) + '\")',\r\n indexColName='ID',\r\n args='(\"' + '\",\"'.join(report.columns.tolist()) + '\")')\r\n\r\n def email_report(self, report, buysAndSells, maxTickers=100):\r\n\r\n # input buy into database via input script/webpage?\r\n buys = report[report.action == 'buy'].reset_index(drop=True)\r\n\r\n # emails have a maximum size, plus can only make a certain number of choices, so filter out top 100\r\n # (inclusive of crosses\r\n\r\n crosses = buys[buys.type == 'cross'].reset_index(drop=True)\r\n crosses['MC'] = crosses.market_cap.str.replace(',', '').astype(float)\r\n crosses = crosses.sort_values(by=['gradient', 'CTA', 'MC'], ascending=False)\r\n\r\n continuations = buys[buys.type == 'continuation'].reset_index(drop=True)\r\n continuations['MC'] = continuations.market_cap.str.replace(',','').astype(float)\r\n continuations = continuations.sort_values(by=['gradient', 'CTA', 'MC'], ascending=False)\r\n\r\n nContinuations = maxTickers - len(crosses)\r\n continuations = continuations.iloc[:nContinuations, :].reset_index(drop=True)\r\n buys = pd.concat([crosses, continuations]).reset_index(drop=True)\r\n buys = buys.drop('MC', axis=1)\r\n buyTickerMask = match(buys.ticker, buysAndSells['price'].columns)\r\n\r\n sells = report[report.action == 'sell'].reset_index(drop=True)\r\n # read current portfolio and check if we hold the specified sell, if so then email.\r\n # input sell into database via input script/webpage?\r\n currentPortfolio = dBaseAction(self.dBase, 'Select * from %s' % self.portfolio)\r\n if len(currentPortfolio) > 0:\r\n currentPortfolio = currentPortfolio[0]\r\n sells = sells[sells.ticker.isin(currentPortfolio.ticker.unique())].reset_index(drop=True)\r\n sellTickerMask = match(sells.ticker, buysAndSells['price'].columns)\r\n else:\r\n sells = []\r\n\r\n if len(buys) > 0:\r\n # send email\r\n emailReport(report=buys,\r\n buyCurve=buysAndSells['buyCurve'][:, buyTickerMask],\r\n sellCurve=buysAndSells['sellCurve'][:, buyTickerMask],\r\n sellCurveB=buysAndSells['sellCurveB'][:, buyTickerMask],\r\n baseCurve=buysAndSells['baseCurve'][:, buyTickerMask],\r\n price=buysAndSells['price'].iloc[:, buyTickerMask],\r\n buyLocations=buysAndSells['buyLocations'][:, buyTickerMask],\r\n sellLocations=buysAndSells['sellLocations'][:, buyTickerMask],\r\n saveLocation=self.saveLocation,\r\n chartTitleKey='BUY %s - momentum HOURLY ' % self.title)\r\n if len(sells) > 0:\r\n # send email\r\n emailReport(report=sells,\r\n buyCurve=buysAndSells['buyCurve'][:, sellTickerMask],\r\n sellCurve=buysAndSells['sellCurve'][:, sellTickerMask],\r\n sellCurveB=buysAndSells['sellCurveB'][:, sellTickerMask],\r\n baseCurve=buysAndSells['baseCurve'][:, sellTickerMask],\r\n price=buysAndSells['price'].iloc[:, sellTickerMask],\r\n buyLocations=buysAndSells['buyLocations'][:, sellTickerMask],\r\n sellLocations=buysAndSells['sellLocations'][:, sellTickerMask],\r\n saveLocation=self.saveLocation,\r\n chartTitleKey='SELL %s - momentum HOURLY ' % self.title)\r\n\r\n @staticmethod\r\n def updatePortfolio():\r\n \"\"\"\r\n Method to update portfolio and portfolio history in dataBase\r\n\r\n Expects input:\r\n assetType: str\r\n \"crypto\" or \"stocks\"\r\n action:\r\n \"buy\" or \"sell\"\r\n ticker: str\r\n what ticker is bought or sold\r\n price: float\r\n price bought at/ sold at\r\n timestamp: \"YYYY-MM-DD HH:MM\"\r\n timestamp trade was actioned\r\n\r\n\r\n \"\"\"\r\n\r\n assetCorrect = False\r\n actionCorrect=False\r\n timeCorrect=False\r\n\r\n while not assetCorrect:\r\n assetType = input('Enter Asset (crypto/stocks): ')\r\n if assetType in ['crypto', 'stocks']:\r\n assetCorrect = True\r\n\r\n while not actionCorrect:\r\n action = input('Enter Action (buy/sell): ')\r\n if action in ['buy', 'sell']:\r\n actionCorrect = True\r\n\r\n ticker = input('Enter Ticker: ')\r\n price = input('Enter Price (GBP for stocks, USD for crypto): ')\r\n quantity = input('Enter Quantity: ')\r\n\r\n while not timeCorrect:\r\n timestamp = input('Enter TimeStamp (YYYY-MM-DD HH:MM): ')\r\n try:\r\n timestamp = np.datetime64(timestamp)\r\n except Exception as e:\r\n print(str(e))\r\n else:\r\n timeCorrect = True\r\n\r\n if assetType == 'stocks':\r\n dBase = stockConfig.dBase\r\n portfolioTable = stockConfig.portfolio\r\n portfolioHistoryTable = stockConfig.portfolioHistory\r\n base = 'GBP'\r\n elif assetType == 'crypto':\r\n dBase = cryptoConfig.dBase\r\n portfolioTable = cryptoConfig.portfolio\r\n portfolioHistoryTable = cryptoConfig.portfolioHistory\r\n base = 'USD'\r\n\r\n data = pd.DataFrame({'ticker': ticker,\r\n 'price (%s)' % base: price,\r\n 'quantity': quantity,\r\n 'timestamp': timestamp}, index=[0])\r\n\r\n if action == 'buy':\r\n # add buy to portfolio\r\n writeDBv2(dBase=dBase, name=portfolioTable, data=data,\r\n createArgs='(\"' + '\",\"'.join(data.columns.tolist()) + '\")',\r\n indexColName='ticker',\r\n args='(\"' + '\",\"'.join(data.columns.tolist()) + '\")')\r\n\r\n data['ID'] = data.ticker + '_' + str(data.timestamp)\r\n data['action'] = 'buy'\r\n # log into history table\r\n writeDBv2(dBase=dBase, name=portfolioHistoryTable, data=data,\r\n createArgs='(\"' + '\",\"'.join(data.columns.tolist()) + '\", PRIMARY KEY(\"ID\"), UNIQUE(\"ID\"))',\r\n indexColName='ID',\r\n args='(\"' + '\",\"'.join(data.columns.tolist()) + '\")')\r\n print('buy made: ')\r\n print(data)\r\n\r\n elif action == 'sell':\r\n # remove ticker from portfolio\r\n assert ticker in dBaseAction(dBase, 'select distinct ticker from %s ' % portfolioTable)[0],\\\r\n '\"%s\" not in portfolio' % ticker\r\n dBaseAction(dBase, 'delete from %s where ticker = \"%s\"' % (portfolioTable, ticker), noReturn=True)\r\n # add trade to history\r\n data['ID'] = data.ticker + '_' + data.timestamp\r\n data['action'] = 'sell'\r\n # log into history table\r\n writeDBv2(dBase=dBase, name=portfolioHistoryTable, data=data,\r\n createArgs='(\"' + '\",\"'.join(data.columns.tolist()) + '\", PRIMARY KEY(\"ID\"), UNIQUE(\"ID\"))',\r\n indexColName='ID',\r\n args='(\"' + '\",\"'.join(data.columns.tolist()) + '\")')\r\n print('sell made: ')\r\n print(data)\r\n\r\n input('press enter to exit...')\r\n\r\n @staticmethod\r\n def portfolioStatus(view='current'):\r\n \"\"\"\r\n Method to view currenct portfolio and portfolio history\r\n\r\n Parameters:\r\n -----------\r\n view: str\r\n \"current\" for current portfolio\r\n \"history\" for full trade history\r\n\r\n\r\n \"\"\"\r\n\r\n assetType = input('Enter Asset (crypto/stocks): ')\r\n ticker = input('Enter Ticker: ')\r\n assert assetType in ['stocks', 'crypto'], 'dataBase must be \"crypto\" or \"stocks'\r\n\r\n if assetType == 'stocks':\r\n dBase = stockConfig.dBase\r\n portfolioTable = stockConfig.portfolio\r\n portfolioHistoryTable = stockConfig.portfolioHistory\r\n base = 'GBP'\r\n elif assetType == 'crypto':\r\n dBase = cryptoConfig.dBase\r\n portfolioTable = cryptoConfig.portfolio\r\n portfolioHistoryTable = cryptoConfig.portfolioHistory\r\n base = 'USD'\r\n\r\n if view == 'current':\r\n currentPortfolio = dBaseAction(dBase, 'Select * from %s' % portfolioTable)[0]\r\n print('###################\\n'\r\n 'Current Portfolio #\\n'\r\n '###################')\r\n\r\n print(currentPortfolio.to_string())\r\n\r\n if view == 'history':\r\n portfolioHistory = dBaseAction(dBase, 'Select * from %s' % portfolioHistoryTable)[0]\r\n print('###################\\n'\r\n 'Portfolio History #\\n'\r\n '###################')\r\n print(ticker)\r\n if ticker is not None and ticker != '':\r\n if isinstance(ticker, str):\r\n ticker = [ticker]\r\n portfolioHistory = portfolioHistory[portfolioHistory.ticker.isin(ticker)].reset_index(drop=True)\r\n print(portfolioHistory.to_string())\r\n\r\n @staticmethod\r\n def reportHistory():\r\n \"\"\"\r\n Method to view currenct portfolio and portfolio history\r\n\r\n Parameters:\r\n -----------\r\n view: str\r\n \"current\" for current portfolio\r\n \"history\" for full trade history\r\n\r\n\r\n \"\"\"\r\n\r\n assetType = input('Enter Asset (crypto/stocks): ')\r\n assert assetType in ['stocks', 'crypto'], 'dataBase must be \"crypto\" or \"stocks'\r\n\r\n if assetType == 'stocks':\r\n dBase = stockConfig.dBase\r\n table = stockConfig.reportHistory\r\n elif assetType == 'crypto':\r\n dBase = cryptoConfig.dBase\r\n table = cryptoConfig.reportHistory\r\n\r\n reportHistory = dBaseAction(dBase, 'Select * from %s' % table)[0]\r\n print('###################\\n'\r\n ' Report History #\\n'\r\n '###################')\r\n\r\n print(reportHistory.to_string())\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if sys.argv[1] == 'cryptoHourly':\r\n trade = Trading('crypto')\r\n trade.runCrypto('hourly')\r\n elif sys.argv[1] == 'cryptoDaily':\r\n trade = Trading('crypto')\r\n trade.runCrypto('daily')\r\n elif sys.argv[1] == 'stocksDaily':\r\n trade = Trading('stocks')\r\n trade.runStocks(fullUpdate=eval(sys.argv[2]))\r\n elif sys.argv[1] == 'updatePortfolio':\r\n Trading.updatePortfolio()\r\n elif sys.argv[1] == 'portfolioStatus':\r\n Trading.portfolioStatus()\r\n","sub_path":"1st iteration/AlgorithcTrading/trading.py","file_name":"trading.py","file_ext":"py","file_size_in_byte":20257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376217184","text":"# Python Tutorials - Internet Operations\nimport urllib.request\n\nurl = 'http://www.brainjar.com/java/host/test.html'\nstream_open = None\ntry:\n stream = urllib.request.urlopen(url)\n stream_open = True\n contents = stream.read()\n print(contents)\nfinally:\n if stream_open == True:\n stream.close()\n print('URL successfully closed')\n \n","sub_path":"internet_operations.py","file_name":"internet_operations.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477103370","text":"'''\nAuthor: Jai Luthra\n'''\n\nimport pygame\nimport random\nimport time\n\npygame.init()\n\nwidth, height = 800, 600\ncenter = (240, 260)\nscore_loc = (10, 10)\nlife_loc = (720, 10)\nbullet_Speed = 0.4\nscore = 0\nlives = 3\nwindow = pygame.display.set_mode((width, height))\n\npygame.display.set_caption('Space Invaders')\n\nplayerImg = pygame.image.load('resources/spaceship.png')\nplayerX = (width - 64)/2\nplayerY = height - 10 - 64\nplayerX_change = 0\n\nenemyImg = pygame.image.load('resources/enemy.png')\nenemyX = random.randint(0, width - 64)\nenemyY = 0\n\nbulletImg = pygame.image.load('resources/bullet.png')\nbulletX = 0\nbulletY = playerY - 30\nbulletX_change = 0\nbulletY_change = bullet_Speed\nbullet_fired = False\n\ndef show_score():\n text = pygame.font.Font('freesansbold.ttf', 16)\n return text.render(f'Score: {score}', True, (255,255,255))\n\ndef player(playerX, playerY):\n window.blit(playerImg, (playerX, playerY))\n\ndef enemy(enemyX, enemyY):\n window.blit(enemyImg, (enemyX, enemyY))\n\ndef bullet(bulletX, bulletY):\n window.blit(bulletImg, (bulletX, bulletY))\n\n# run until a quit\nloop = True\n\nwhile(loop == True):\n window.fill((0,0,0))\n\n for event in pygame.event.get():\n # if window closed\n if event.type == pygame.QUIT:\n loop = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n playerX_change += -0.5\n if event.key == pygame.K_RIGHT:\n playerX_change += 0.5\n\n if event.key == pygame.K_SPACE:\n if not bullet_fired:\n bulletX = playerX + 16\n bullet_fired = True\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n playerX_change = 0\n\n playerX += playerX_change\n playerX = max(0, playerX)\n playerX = min(width - 64, playerX)\n player(playerX, playerY)\n\n enemyY += (score//5 + 1) * 0.1\n enemy(enemyX, enemyY)\n \n if bullet_fired:\n bullet(bulletX, bulletY)\n bulletY -= bulletY_change\n if(bulletY < -16):\n bullet_fired = False\n bulletY = playerY - 30\n\n if enemyX - 28 < bulletX < enemyX + 56 and enemyY - 56 < bulletY < enemyY + 56:\n bullet_fired = False\n bulletY = playerY - 30\n enemyX = random.randint(0, width - 64)\n enemyY = 0\n score += 1\n\n if enemyY > height - 50:\n lives-= 1\n if enemyX - 56 < playerX < enemyX + 56:\n lives = 0\n enemyX = random.randint(0, width - 64)\n enemyY = 0\n\n if lives == 0:\n for i in range(5,0,-1):\n time.sleep(1)\n window.fill((0,0,0))\n text = pygame.font.Font('freesansbold.ttf', 50)\n out = text.render('Game Over!', True, (255,255,0))\n window.blit(out, center)\n out = text.render(f'Window will exit in {i}s', True, (255,255,0))\n window.blit(out, (120,300))\n window.blit(show_score(), score_loc)\n pygame.display.update()\n loop = False\n\n else:\n text = pygame.font.Font('freesansbold.ttf', 16)\n life = text.render(f'Live(s): {lives}', True, (255,255,255))\n window.blit(life, life_loc)\n window.blit(show_score(), score_loc)\n pygame.display.update()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393662868","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom six.moves.urllib.parse import quote\n\nfrom .exc import DownstreamError\n\n\ndef urlify(string):\n \"\"\" You might be wondering: why is this here at all, since it's basically\n doing exactly what the quote_plus function in urllib does. Well, to keep\n the 2 & 3 stuff all in one place, meaning rather than try to import the\n urllib stuff twice in each file where url-safe strings are needed, we keep\n it all in one file: here.\n\n Supporting multiple Pythons is hard.\n\n :param string: String to URLify\n :return: URLified string\n \"\"\"\n return quote(string)\n\n\ndef handle_json_response(resp):\n try:\n resp.raise_for_status()\n except Exception as ex:\n r_json = resp.json()\n raise DownstreamError(\"Error fetching downstream\"\n \"-node response: %s\" % str(ex))\n\n try:\n r_json = resp.json()\n except:\n raise DownstreamError('Invalid response from Downstream node.')\n\n return r_json\n","sub_path":"downstream_farmer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389542999","text":"import sys\n\nusrid = sys.argv[1]\n\nimport numpy as np\nimport MySQLdb as mdb\nfrom scipy import sparse\nfrom scipy.sparse.linalg import svds\nfrom collections import defaultdict\nfrom operator import itemgetter\n\ndef sparse_mean(mat, row = -1, column = -1):\n\t# function to take means on a sparse matrix\n\tif row != -1:\n\t\tmat = mat[row]\n\telif column != -1:\n\t\tmat = mat.transpose()[column]\n\n\tif mat.count_nonzero() != 0:\n\t\treturn mat.sum()/mat.count_nonzero()\n\telse:\n\t\treturn 0\n\ndef recommend(id):\n\tcon = mdb.connect(\"localhost\", \"root\", \"\", \"recommender\") #connection to the database\n\tdata = []\n\n\t# getting user x films ratings from the database\n\ttry:\n\t\twith con:\n\t\t\tcur = con.cursor()\n\t\t\tquery = \"SELECT * FROM user_ratings\"\n\t\t\tcur.execute(query)\n\t\t\tresult = cur.fetchall()\n\n\t\t\tfor r in result:\n\t\t\t\tdata.append(r)\n\n\t\t\tcur = con.cursor()\n\t\t\tquery = \"SELECT id FROM users ORDER BY id\"\n\t\t\tcur.execute(query)\n\n\t\t\tresult = cur.fetchall()\n\t\t\tuser_ids = [user[0] for user in result]\n\t\t\tn_users = len(user_ids)\n\n\t\t\tcur = con.cursor()\n\t\t\tquery = \"SELECT COUNT(*) FROM films\"\n\t\t\tcur.execute(query)\n\n\t\t\tn_films = cur.fetchone()[0]\n\texcept Exception:\n\t\tprint(Exception)\n\n\t# prepping the data into a user x movies rating matrix\n\tuid_matrix = user_ids.index(int(id))\n\n\tdata = np.array(data, dtype=np.float32)\n\n\tres = defaultdict(list)\n\tfor v,u,k in data: res[int(v)].append([u, k])\n\t\n\tfor i in user_ids:\n\t\tif res[i]:\n\t\t\tres[i] = np.array(res[i])\n\t\telse:\n\t\t\tres[i] = np.array([[1,4]])\n\n\tfirst_usr = int(data[:,0].min())\n\tfirst_mov = int(data[:,1].min())\n\n\tuser_ratings_mean = []\n\tfor i in res:\n\t\tuser_ratings_mean.append(sum(res[i][:,1])/len(res[i]))\n\n\turm_array = []\n\toriginal_matrix = np.zeros(shape=(n_users, n_films), dtype=np.float32)\n\n\tfor cur_rating in user_ratings_mean:\n\t\turm_array.append([cur_rating]*n_films)\n\n\ti = 0\n\tfor r in res: # users\n\t\tfor j in res[r]: #movies\n\t\t\turm_array[i][int(j[0])-1] = original_matrix[i, int(j[0])-1] = j[1]\n\t\ti += 1\n\t\n\turm = np.array(urm_array)\n\n\tcur.close()\n\tcon.close()\n\n\tdef unrated(userid, movieid):\n\t\t# function to check if user j has rated movie i\n\t\tif not original_matrix[userid,movieid]:\n\t\t\treturn 1\n\n\toriginal_matrix_sparse = sparse.csr_matrix(original_matrix)\n\tratings_mean = sparse_mean(original_matrix_sparse) # mean of all ratings in original_matrix\n\n\tfilm_ratings_mean = []\n\n\tfor i in range(n_films):\n\t\tfilm_ratings_mean.append(sparse_mean(original_matrix_sparse,-1, i))\n\n\tpredictions = []\n\n\t# naive SVD\n\tU, S, V = sparse.linalg.svds(sparse.csr_matrix(urm))\n\tP = S * V.T\n\n\tfor i in range(first_usr-1, n_users):\n\t\tfor j in range(first_mov-1, n_films):\n\t\t\tif unrated(i,j):\n\t\t\t\tp = [i,j,((ratings_mean+(film_ratings_mean[j]-ratings_mean)+(user_ratings_mean[i]-ratings_mean))+(U[i]*P[j]).sum())/2]\n\t\t\t\tpredictions.append(p)\n\n\tfor prediction in predictions:\n\t\turm[prediction[0], prediction[1]] = prediction[2]\n\n\t# prepping data to send\n\tpreds = defaultdict(list)\n\tfor a,b,c in predictions: preds[a].append([b+1,c])\n\n\tuser_preds = []\n\tfor i in preds[uid_matrix]:\n\t\tuser_preds.append(i)\n\n\tuser_preds = sorted(user_preds, key=itemgetter(1), reverse=True)\n\tprint(user_preds[:6])\n\nrecommend(usrid)","sub_path":"site/__pyscripts/ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"510564761","text":"# coding:utf-8\n\n\nsalary = input(\"your salary: \")\ncommodity_list = [['iphone 7plus', '6388'], ['mac pro', '21000'], ['starbuck', '31'], ['bicycle', '1200'], ['Book', '88'], ['cup', '35']]\n\nuser = None\nhave_selected=[]\n\nif salary.isdigit():\n salary = int(salary)\n account_balaence = salary\n while user != 'q':\n\n '''\n for i in commodity_list:\n commodity_index = commodity_list.index(i)+1\n commodity_show = ' '.join(i)\n print(commodity_index, commodity_show)\n '''\n # 还有一种写法 使用enumerate()\n for index, item in enumerate(commodity_list):\n print(index+1, ' '.join(item))\n # enumerate() 取出列表的下标 和 数据\n\n commodity_num = input(\">>>: \")\n # 应该先判断 输入的是不是数字 如果是数字 先转成 int 在来判断 数字的在不在商品长度之内,在判断是不是q\n if commodity_num.isdigit():\n commodity_num = int(commodity_num)\n if commodity_num < len(commodity_list) and commodity_num >=0:\n commodity_real_price = int(commodity_list[int(commodity_num) - 1][1])\n if account_balaence < commodity_real_price:\n print('Insufficient account balance. Please refill it \\n')\n else:\n have_selected.append(commodity_list[int(commodity_num) - 1])\n account_balaence = account_balaence - commodity_real_price\n print('%s have added to shoppong cart' % commodity_list[int(commodity_num) - 1][0])\n print('your account_balaence is \\033[31;1m%s\\033[0m \\n' % account_balaence)\n else:\n print('the product %s is not exit' % commodity_num)\n elif commodity_num == 'q':\n print('-----------shopping list-------------')\n for j in have_selected:\n print(j[0])\n print('your account_balaence is \\033[31;1m%s\\033[0m \\n' % account_balaence)\n exit()\n '''\n exit('your account_balaence is \\033[31;1m%s\\033[0m \\n' % account_balaence)\n '''\n else:\n print('wrong option')\n\nelse:\n print('spelling wrong')\n\n\n\n\n\n\n\n","sub_path":"zyh_python-20170628晚改完/zyh_python/zyh_python/day2/shopping cart/shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"56854184","text":"class Solution:\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n if nums is None or len(nums) == 0:\n return -1\n res = [-1, -1]\n res[0] = self.find_first(nums, target)\n res[1] = self.find_last(nums, target)\n return res\n\n def find_first(self, nums, target):\n start, end = 0, len(nums)-1\n while start+1 < end:\n mid = (start+end)//2\n if nums[mid] == target:\n end = mid\n elif nums[mid] < target:\n start = mid\n else:\n end = mid\n\n if nums[start] == target:\n return start\n if nums[end] == target:\n return end\n return -1\n\n def find_last(self, nums, target):\n start, end = 0, len(nums)-1\n while start+1 < end:\n mid = (start+end)//2\n if nums[mid] == target:\n start = mid\n elif nums[mid] < target:\n start = mid\n else:\n end = mid\n\n if nums[end] == target:\n return end\n\n if nums[start] == target:\n return start\n return -1\n\n\ndef main():\n nums = [5, 7, 7, 8, 8, 10]\n target = 8\n sol = Solution()\n print(sol.searchRange(nums, target))\n\n\nmain()\n","sub_path":"034-find-first-and-last-position-of-element-in-sorted-array/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"613956135","text":"#!/usr/bin/env python3\r\n\r\nimport json\r\nimport os\r\nimport random\r\nimport sys\r\n\r\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\r\n\r\nfrom arguments import get_arguments\r\n\r\nimport dp_parser\r\nimport bp_parser\r\nimport remove_similar_sequences as rss\r\n\r\n\r\narguments = get_arguments(path=str,\r\n data_format=str,\r\n validation_ratio=(float, 0.2),\r\n test_ratio=(float, 0.2),\r\n path_similar=(str,None))\r\n\r\npath, data_format, validation_ratio, test_ratio, path_similar = arguments\r\n\r\n# Choose correct parser\r\nif data_format == 'dp':\r\n read_data = dp_parser.read_data\r\nelif data_format == 'bp':\r\n read_data = bp_parser.read_data\r\nelse:\r\n print('Invalid format, must be dp or bp.')\r\n sys.exit()\r\n\r\n# Additional parser for the similar dataset\r\nif path_similar is not None:\r\n if data_format == 'dp':\r\n read_data = dp_parser.read_data\r\n elif data_format == 'bp':\r\n read_data = bp_parser.read_data\r\n else:\r\n print('Invalid format, must be dp or bp.')\r\n sys.exit()\r\n\r\nprint(\"Loading the dataset...\")\r\n\r\n# Load the dataset\r\ndata = list(read_data(path))\r\n\r\n# remove similar sequences if pathSimilar is set\r\nif path_similar is not None:\r\n dataSim = list(read_data(path_similar))\r\n data = rss.remove_sim_seq_list(data,dataSim)\r\n\r\nlength = len(data)\r\n\r\nprint(f\" {length} entries read\")\r\n\r\n# Shuffle the dataset in a random order\r\n# random.shuffle(data)\r\n\r\n# Split data into training, validation and test set\r\nvalidation_length = int(length * validation_ratio)\r\ntest_length = int(length * test_ratio)\r\n\r\nvalidation_data = data[:validation_length]\r\ntest_data = data[validation_length:validation_length + test_length]\r\ntraining_data = data[validation_length + test_length:]\r\n\r\nprint(\"Some statistics...\")\r\n\r\ndef avg_len(data):\r\n return sum(len(rna) for rna, db in data) / max(len(data), 1)\r\n\r\nprint(f\" validation data: {avg_len(validation_data)} // {len(validation_data)}\")\r\nprint(f\" test data: {avg_len(test_data)} // {len(test_data)}\")\r\nprint(f\" training data: {avg_len(training_data)} // {len(training_data)}\")\r\n\r\nprint(\"Writing data to files...\")\r\n\r\npath = path.rstrip('/')\r\n\r\nwith open(f'{path}-validation.json', 'w') as validation_file:\r\n json.dump(validation_data, validation_file, indent=2)\r\n\r\nwith open(f'{path}-test.json', 'w') as test_file:\r\n json.dump(test_data, test_file, indent=2)\r\n\r\nwith open(f'{path}-training.json', 'w') as training_file:\r\n json.dump(training_data, training_file, indent=2)\r\n","sub_path":"rna_prediction/src/parsing/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"228095403","text":"#! /usr/bin/env python3\n\nimport argparse\n\n# VASP processing modules\nfrom vasppy.doscar import Doscar\nfrom vasppy.utils import validate_checksum\nfrom vasppy.summary import load_vasp_summary\n\n# matplotlib and figure formatting\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\nimport figure_formatting as ff\n\n# additional functions in this repository\nfrom analysis_functions import element_list_from_vasprun, read_vasprun\n\nff.set_formatting( ff.formatting )\n\ndef checksum( filename, md5, verbose=False ):\n if verbose:\n print( 'Validating checksum for {}'.format( filename ) )\n validate_checksum( '../data/{}'.format( filename ), md5 )\n if verbose:\n print( 'okay' )\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('functional', choices=['PBEsol','HSE06'],\n help='Functional to plot pDOS data for')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Verbose output')\n parser.add_argument('-f', '--fig-dir', help='Figures directory', default='.')\n args = parser.parse_args()\n return args\n\ndef main( functional, fig_dir='.', verbose=False ):\n if verbose:\n print( 'Plotting {} pDOS'.format( functional ) )\n filename = '../data/vasp_summary.yaml'\n if verbose:\n print( 'Reading VASP summary from {}'.format( filename ) )\n summary = load_vasp_summary( filename ) \n\n panel_a = { 'calc': 'FeF3 {} DOS'.format(functional),\n 'to_plot': {'Fe': 'd', 'F': 'p'},\n 'title': r'(a) FeF$_3$' }\n panel_b = { 'calc': 'FeF3 OH_F 1,2 {} DOS'.format(functional),\n 'to_plot': {'Fe': 'd', 'F': 'p', 'O': 'p'},\n 'title': r'(b) FeF$_3$ + 4(OH)$_\\mathrm{F}$' }\n panel_c = { 'calc': 'FeF3 2O_F 2V_F {} DOS'.format(functional),\n 'to_plot': {'Fe': 'd', 'F': 'p', 'O': 'p'},\n 'title': r'(c) FeF$_3$ + 2O$_\\mathrm{F}$ + 2$\\square_\\mathrm{F}$' }\n\n plotting_data = ( panel_a, panel_b, panel_c )\n fig, axes = plt.subplots(3, 1, figsize=(8,9.0))\n fig_params = {\n 'xrange': [-12, 8], \n 'title_loc': 'left', \n 'scaling': {'O': {'p': 5.0}},\n 'legend_pos': 'best'\n }\n\n title_x = 0.02\n title_y = 0.86\n\n def title( ax, text ):\n ax.text( title_x, title_y, text, horizontalalignment='left', transform=ax.transAxes )\n\n for ax, p_data in zip( axes, plotting_data ):\n calc = summary[p_data['calc']]\n tracked_files = calc['file tracking']\n for tfile in tracked_files.values():\n checksum( tfile['filename'], tfile['md5'], verbose=verbose )\n doscar_filename = tracked_files['DOSCAR']['filename']\n vasprun_filename = tracked_files['vasprun.xml']['filename']\n v = read_vasprun( '../data/{}'.format( vasprun_filename ), gzipped=True )\n\n doscar = Doscar('../data/{}'.format(doscar_filename), species=element_list_from_vasprun(v))\n doscar.energy -= v.eigenvalue_band_properties[2]\n doscar.plot_pdos(to_plot=p_data['to_plot'], ax=ax, **fig_params)\n\n ax.text(title_x,\n title_y,\n p_data['title'],\n horizontalalignment='left',\n transform=ax.transAxes)\n\n plt.tight_layout()\n plt.subplots_adjust(hspace=0)\n fig_filename = '{}/FeF3_{}_pDOS.pdf'.format(fig_dir, functional)\n if verbose:\n print('Saving figure to {}'.format(fig_filename))\n fig.savefig(fig_filename)\n\nif __name__ == '__main__':\n args = parse_args()\n main(args.functional, args.fig_dir, args.verbose)\n","sub_path":"analysis/plot_FeF3_pDOS.py","file_name":"plot_FeF3_pDOS.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"508130703","text":"import re\nimport pandas as pd\nfrom igf_data.illumina.samplesheet import SampleSheet\nfrom igf_data.utils.sequtils import rev_comp\nfrom igf_data.process.metadata_reformat.reformat_metadata_file import Reformat_metadata_file\n\nSAMPLESHEET_COLUMNS = [\n 'Lane',\n 'Sample_ID',\n 'Sample_Name',\n 'Sample_Plate',\n 'Sample_Well',\n 'I7_Index_ID',\n 'index',\n 'I5_Index_ID',\n 'index2',\n 'Sample_Project',\n 'Description',\n 'Pool_Number'\n]\n\nclass Reformat_samplesheet_file:\n '''\n A class for reformatting samplesheet file\n\n :param infile: Input samplesheet file\n :param file_format: Input file format, default samplesheet\n List of allowed formats\n\n * samplesheet\n * csv\n\n :param samplesheet_columns: A list of expected columns in the samplesheet file\n A list of default columns\n\n * Lane\n * Sample_ID\n * Sample_Name\n * Sample_Plate\n * Sample_Well\n * I7_Index_ID\n * index\n * Sample_Project\n * Description\n * Pool_number\n\n :param remove_adapters: A toggle for removing adapters from header section ,default False\n :param revcomp_index1: A toggle for reverse complementing index1 column, default False\n :param revcomp_index2: A toggle for reverse complementing index2 column, default False\n :param tenx_label: Description label for 10x experiments, default '10X'\n :param sample_id: Sample id column name, default 'Sample_ID'\n :param sample_name: Sample name column name, default 'Sample_Name'\n :param index: I7 index column name, default 'index'\n :param index2: I5 index column name, default 'index2'\n :param sample_project: Project name column name, default 'Sample_Project'\n :param description: Description column name, default 'Description'\n :param adapter_section: Adapter section name in header, default 'Settings'\n :param adapter_keys: A list of adapter keys to be removed from samplesheet header, default ('Adapter','AdapterRead2')\n '''\n def __init__(self,infile,\n file_format='samplesheet',\n samplesheet_columns=SAMPLESHEET_COLUMNS,\n remove_adapters=False,\n revcomp_index1=False,\n revcomp_index2=False,\n tenx_label='10X',\n sample_id='Sample_ID',\n sample_name='Sample_Name',\n index='index',\n index2='index2',\n sample_project='Sample_Project',\n description='Description',\n adapter_section='Settings',\n adapter_keys=('Adapter','AdapterRead2')):\n try:\n self.infile = infile\n if file_format not in ['samplesheet','csv']:\n raise ValueError('File format {0} not supported'.format(file_format))\n\n self.file_format = file_format\n self.samplesheet_columns = samplesheet_columns\n self.tenx_label = tenx_label\n self.remove_adapters = remove_adapters\n self.revcomp_index1 = revcomp_index1\n self.revcomp_index2 = revcomp_index2\n self.sample_id = sample_id\n self.sample_name = sample_name\n self.index = index\n self.index2 = index2\n self.sample_project = sample_project\n self.description = description\n self.adapter_section = adapter_section\n self.adapter_keys = adapter_keys\n except Exception as e:\n raise ValueError('Error in initializing samplesheet reformatting, error: {0}'.\\\n format(e))\n\n @staticmethod\n def detect_tenx_barcodes(index,tenx_label='10X'):\n '''\n A static method for checking 10X I7 index barcodes\n\n :param index: I7 index string\n :param tenx_label: A string description for 10X samples, default, '10X'\n :returns: A string\n '''\n try:\n description = ''\n pattern = re.compile(r'SI-[GN]A-[A-H]\\d+',re.IGNORECASE)\n if re.match(pattern,index):\n description = tenx_label\n return description\n except Exception as e:\n raise ValueError('Failed to detect Tenx single cell barcode for index {0}, error: {1}'.\\\n format(index,e))\n\n def correct_samplesheet_data_row(self,row):\n '''\n A method for correcting samplesheet data row\n\n :param row: A Pandas Series\n :returns: A Pandas Series\n '''\n try:\n if not isinstance(row,pd.Series):\n raise TypeError('Expecting A pandas series and got {0}'.\\\n format(type(row)))\n\n if self.sample_id in row.keys():\n row[self.sample_id] = \\\n Reformat_metadata_file.\\\n sample_and_project_reformat(row[self.sample_id]) # refoemat sample id\n\n if self.sample_project in row.keys():\n row[self.sample_project] = \\\n Reformat_metadata_file.\\\n sample_and_project_reformat(row[self.sample_project]) # refoemat project name\n\n if self.sample_name in row.keys():\n row[self.sample_name] = \\\n Reformat_metadata_file.\\\n sample_name_reformat(row[self.sample_name]) # refoemat sample name\n\n if self.index in row.keys() and \\\n self.description in row.keys():\n row[self.description] = \\\n self.detect_tenx_barcodes(\\\n index=row[self.index],\n tenx_label=self.tenx_label) # add description label for 10x samples\n\n if self.index in row.keys() and \\\n self.description in row.keys() and \\\n (row[self.index]!='' or row[self.index] is not None ) and \\\n row[self.description] != self.tenx_label:\n row[self.index] = row[self.index].upper()\n if self.revcomp_index1:\n row[self.index] = rev_comp(row[self.index]) # revcomp index 1\n\n if self.index2 in row.keys() and \\\n (row[self.index2]!='' or row[self.index2] is not None ):\n row[self.index2] = row[self.index2].upper()\n if self.revcomp_index2:\n row[self.index2] = rev_comp(row[self.index2]) # revcomp index 2\n\n if self.description in row.keys() and \\\n (row[self.description] !='' or \\\n row[self.description] is not None):\n row[self.description] = row[self.description].upper() # change description to upper case letters\n return row\n except Exception as e:\n raise ValueError('Failed to correct samplesheet data row {0},error {1}'.\\\n format(row,e))\n\n def reformat_raw_samplesheet_file(self,output_file):\n '''\n A method for refoematting raw samplesheet file\n\n :param output_file: An output file path\n :returns: None\n '''\n try:\n samplesheet_data = list()\n if self.file_format == 'samplesheet':\n samplesheet = SampleSheet(infile=self.infile)\n samplesheet_data = pd.DataFrame(samplesheet._data)\n elif self.file_format == 'csv':\n samplesheet_data = pd.read_csv(self.infile,header=0,dtype=object)\n samplesheet_data.fillna('',inplace=True)\n samplesheet_data = \\\n samplesheet_data.\\\n apply(\\\n lambda row: self.correct_samplesheet_data_row(row=row),\n axis=1,\n result_type='reduce') # refoemat samplesheet data\n column_names = \\\n [column_name \\\n for column_name in samplesheet_data.columns \\\n if column_name in self.samplesheet_columns ] # filter expected column names\n if len(column_names) == 0:\n raise ValueError('No expected columns found on the samplesheet data')\n samplesheet_data = samplesheet_data[column_names] # filter samplesheet data\n if self.file_format == 'samplesheet':\n samplesheet._data = \\\n samplesheet_data.\\\n to_dict(orient='records') # update samplesheet object with new data\n if self.remove_adapters:\n for adapter_key in self.adapter_keys:\n samplesheet.\\\n modify_sample_header(\\\n section=self.adapter_section,\n type='remove',\n condition_key=adapter_key) # remove adapters from samplesheet\n samplesheet.print_sampleSheet(outfile=output_file) # print corrected samplesheet\n elif self.file_format == 'csv':\n samplesheet_data.to_csv(output_file,index=False) # dump samplesheet dat as csv file\n except Exception as e:\n raise ValueError('Failed to reformat samplesheet file {0}, error {1}'.\\\n format(self.infile,e))","sub_path":"igf_data/process/metadata_reformat/reformat_samplesheet_file.py","file_name":"reformat_samplesheet_file.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"467603482","text":"\"\"\"downloads WDI data and saves it to disk.\n\"\"\"\n\nimport os\nimport shutil\nimport zipfile\nfrom io import BytesIO\nimport requests\nimport pandas as pd\n\nfrom worldbank_wdi import INPATH\n\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef main():\n delete_input()\n download_data()\n\n\ndef delete_input() -> None:\n \"\"\"deletes all files and folders in `{INPATH}`.\n\n WARNING: this method deletes all input data and is only intended for use\n immediately prior to `download_data()`.\n \"\"\"\n if os.path.exists(INPATH):\n shutil.rmtree(INPATH)\n logger.info(f\"Deleted all existing input files in {INPATH}\")\n\n\ndef download_data() -> None:\n \"\"\"Downloads the raw World Development Indicators data and saves it\n in csv format to `{INPATH}`.\n \"\"\"\n if not os.path.exists(INPATH):\n os.makedirs(INPATH)\n try:\n _download_data_csv()\n except: # noqa\n _download_data_excel()\n logger.info(f\"Data succcessfully downloaded to {INPATH}\")\n\n\ndef _download_data_csv() -> None:\n url = \"http://databank.worldbank.org/data/download/WDI_csv.zip\"\n logger.info(f'Downloading data from \"{url}\"...')\n res = requests.get(url)\n zf = zipfile.ZipFile(BytesIO(res.content))\n fnames = zf.namelist()\n zf.extractall(path=INPATH)\n for fname in fnames:\n fname_zip = f\"{fname}.zip\"\n pd.read_csv(os.path.join(INPATH, fname)).to_csv(\n os.path.join(INPATH, fname_zip), index=False, compression=\"gzip\"\n )\n os.remove(os.path.join(INPATH, fname))\n\n\ndef _download_data_excel() -> None:\n url = \"http://databank.worldbank.org/data/download/WDI_excel.zip\"\n logger.info(f'Downloading data from \"{url}\"...')\n res = requests.get(url)\n zf = zipfile.ZipFile(BytesIO(res.content))\n fnames = zf.namelist()\n assert len(fnames) == 1, \"Expected only one file in xlsx zip archive.\"\n sheet2df = pd.read_excel(\n BytesIO(zf.read(fnames[0])), sheet_name=None, engine=\"openpyxl\"\n )\n for sheet, df in sheet2df.items():\n fname_zip = f\"WDI{sheet}.csv.zip\"\n df.to_csv(os.path.join(INPATH, fname_zip), index=False, compression=\"gzip\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"worldbank_wdi/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"358368893","text":"#!/usr/bin/env python3\n\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Union\n\nfrom attrs import define, field\n\nfrom .alignment import AlignmentParameters\nfrom .reconstruction import ReconstructionParameters\nfrom .simulation import SimulationParameters, generateDirectory\n\n\nclass ExperimentType(Enum):\n LUMI = \"LUMI\"\n KOALA = \"KOALA\"\n\n\nclass ClusterEnvironment(Enum):\n HIMSTER = \"HimsterII\"\n VIRGO = \"Virgo\"\n\n\n@define\nclass Experiment:\n \"\"\"\n Dataclass for simulation, reconstruction and alignment.\n \"\"\"\n\n experimentType: ExperimentType\n cluster: ClusterEnvironment\n simParams: SimulationParameters\n recoParams: ReconstructionParameters\n alignParams: AlignmentParameters\n fitConfigPath: Path = field(default=Path(\"fitconfig-fast.json\"))\n baseDataOutputDir: Union[Path, None] = field(default=None)\n LMDdirectory: Union[Path, None] = field(default=None)\n\n def __attrs_post_init__(self) -> None:\n self.updateBaseDataDirectory()\n\n def updateBaseDataDirectory(self) -> None:\n if self.LMDdirectory is not None:\n self.baseDataOutputDir = self.LMDdirectory / Path(\n generateDirectory(self.simParams, self.alignParams)\n )\n else:\n raise AttributeError(\n \"Cannot update internal path! Please set LMD software directory first.\"\n )\n","sub_path":"python/lumifit/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592202219","text":"n,m = map(int,input().split())\r\n\r\n#赤フラグ 玉の個数\r\narr = [1 for _ in range(n)]\r\narr1 = [False for _ in range(n)]\r\narr1[0]=True\r\nch1 = True\r\n\r\nfor i in range(m):\r\n x,y = map(int,input().split())\r\n x = x-1\r\n y = y-1\r\n if ch1 == True and x == 0:\r\n ch1 = None\r\n arr[y] +=1\r\n arr1[y] = True\r\n arr[x] -=1\r\n if arr[x] == 0:\r\n arr1[x] = False\r\n continue\r\n \r\n arr[y] +=1\r\n arr[x] -=1\r\n if ch1 == None and arr1[x] == True:\r\n \r\n arr1[y] = True\r\n \r\n if ch1 == None and arr[x] == 0:\r\n arr1[x] = False\r\n\r\ncnt=0\r\nfor i in range(n):\r\n if arr1[i] == True:\r\n cnt +=1 \r\nprint(cnt)\r\n","sub_path":"agc002/agc002_b/20191212022418.py","file_name":"20191212022418.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588772440","text":"import numpy as np\n\ndef test():\n\n a = np.array([1,2,3])\n b = np.array([[1,2,3],[4,5,6],[7,8,9]])\n print(b)\n print(np.amin(b))\n print(np.amax(b))\n print((np.amin(b,0)))\n print(np.amin(b,1))\n print(np.median(b))\n\ndef my_type():\n\n persontype = np.dtype({\n 'names':['name', 'age', 'chinese', 'math', 'english'],\n 'formats':['S32','i', 'i', 'i', 'f']})\n\n peoples = np.array([(\"ZhangFei\",32,75,100, 90),\n (\"GuanYu\",24,85,96,88.5),\n (\"ZhaoYun\",28,85,92,96.5),\n (\"HuangZhong\",29,65,85,100)],\n dtype=persontype)\n\n ages = peoples[:]['age']\n chineses = peoples[:]['chinese']\n maths = peoples[:]['math']\n englishs = peoples[:]['english']\n print(ages,np.mean(ages))# np.mean() 取平均值\n print(chineses,np.mean(chineses))\n print(maths,np.mean(maths))\n print(englishs,np.mean(englishs))\n\ndef my_ufunc():\n\n x1 = np.arange(1,11,2) # 初始,结束,步长\n x2 = np.linspace(1,9,5) # 初始,结束,数组长度\n print(x1,x2)\n print(np.add(x1,x2))\n print(np.subtract(x1,x2))\n print(np.multiply(x1,x2))\n\ndef work():\n\n work_tpye = np.dtype({\n\n 'names':['name','yuwen','yingyu','shuxue','total'],\n 'formats':['S32','i','i','i','f']})\n\n works = np.array(\n [\n ('zhangfei',66,65,30,0),\n ('guanyu',95,85,98,0),\n ('zhaoyun',93,92,96,0),\n ('huangzhong',90,88,77,0),\n ('dianwei',80,90,90,0)\n ],\n dtype=work_tpye\n )\n\n works[:]['total'] = works[:]['yuwen'] + works[:]['yingyu']+works[:]['shuxue']\n\n print(np.amin(works[:]['yuwen']),\n np.amax(works[:]['yuwen']),\n np.mean(works[:]['yuwen']),\n np.std(works[:]['yuwen']),\n np.var(works[:]['yuwen']))\n print(np.sort(works,order='total'))\n\nwork()\n","sub_path":"np-learn.py","file_name":"np-learn.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185900442","text":"#!/usr/bin/python3\n\nfrom NetworkManager import NetworkManager, Settings\nimport time\nfrom pprint import pprint\nimport dbus.mainloop.glib\nfrom gi.repository import GObject\n\n\ndef find_primary_connection(connection_id):\n for connection in Settings.ListConnections():\n settings = connection.GetSettings()\n if settings[\"connection\"][\"id\"] == \"Auto Ethernet\":\n return connection\n\n\ndef reset_dns_servers(connection, working_connection):\n settings = connection.GetSettings()\n settings[\"ipv4\"][\"dns\"] = [\"208.67.222.222\", \"208.67.220.220\"]\n settings[\"ipv4\"][\"ignore-auto-dns\"] = True\n connection.Update(settings)\n device_list = working_connection.Devices\n NetworkManager.DeactivateConnection(working_connection)\n NetworkManager.ActivateConnection(connection, device_list[0], \"/\")\n\n\ndef properties_changed(*args, **kwargs):\n print(\"Properties have changed\")\n working_connection = NetworkManager.PrimaryConnection\n active_connection = find_primary_connection(working_connection.Id)\n nameservers = working_connection.Ip4Config.Nameservers\n pprint(nameservers)\n if nameservers != [\"208.67.222.222\", \"208.67.220.220\"]:\n reset_dns_servers(active_connection, working_connection)\n time.sleep(1)\n working_connection = NetworkManager.PrimaryConnection\n nameservers = working_connection.Ip4Config.Nameservers\n pprint(nameservers)\n\n\ndef main():\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n loop = GObject.MainLoop()\n\n properties_changed()\n NetworkManager.OnPropertiesChanged(properties_changed)\n loop.run()\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dns_fixer.py","file_name":"dns_fixer.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281514567","text":"inputfile = \"C:\\\\Users\\\\jkearns\\\\Documents\\\\GitHub\\\\advent-of-code-2017\\\\09\\\\input09.txt\"\nwith open(inputfile) as f:\n lines = f.readlines()\nstream = lines[0]\n\ntotalScore = 0\ncurrentScore = 0\nisGarbage = False\nshouldIgnore = False\n\nfor character in stream:\n if (shouldIgnore): \n shouldIgnore = False\n continue\n if (character == \"!\"):\n shouldIgnore = True\n continue\n if (isGarbage):\n if character == \">\":\n isGarbage = False\n continue\n else : continue\n if character == \"<\":\n isGarbage = True\n if character == \"{\":\n currentScore += 1\n if character == \"}\":\n totalScore += currentScore\n currentScore -= 1\n\n\n\n\nprint(totalScore)","sub_path":"09/09a.py","file_name":"09a.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150839590","text":"from problems.old.EulerFunctions import firstNPrimes\nfrom problems.old.EulerFunctions import bSearch\nimport math\n\nallprimes = list(firstNPrimes(100000))\nprimes = []\n\nfor p in allprimes:\n tmp = []\n while p > 0:\n (p, r) = divmod(p, 10)\n tmp.append(r)\n tmp.reverse()\n primes.append(tmp)\n\n# filter\nprimes = [x for x in primes if ((not (0 in x or 2 in x or 4 in x or 6 in x or 8 in x or 5 in x))\n or x in [23, 53])\n ]\nprimes = [x for x in primes if x.count(1) + x.count(7) < 3]\nprimes = [x for x in primes if len(x) > 1]\nprimes = [x for x in primes if len(x) == 2\n or not ((x[0] in [3, 9] and x[1] in [3, 9])\n or (x[len(x) - 1] in [3, 9] and x[len(x) - 2] in [3, 9])\n )]\nprimes = [x for x in primes if not (x[0] in [1, 9] or x[len(x) - 1] in [1, 9])]\n\ntmp = primes\nprimes = []\nfor p in tmp:\n x = 0\n match = True\n for s in p:\n x = x * 10 + s\n match = match and bSearch(allprimes, x)\n xx = x\n pow = math.pow(10, math.floor(math.log(xx, 10)) + 1)\n while xx > 0 and pow >= 10:\n xx = xx % pow\n pow /= 10\n match = match and bSearch(allprimes, xx)\n if (match):\n primes.append(x)\n","sub_path":"problems/old/pb37.py","file_name":"pb37.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"67172383","text":"#coding:utf8\n\nfrom framework.data import db\nfrom bson import ObjectId,DBRef\n\ndef get_source_by_sites(sites):\n cond = map(lambda x: {'id': x}, sites)\n results = db.sites.find({'$or': cond})\n sources = []\n for result in results:\n # 是不是目录且是全选状态\n if result['isParent'] and sites[result['id']]['s']:\n _get_tree_source_by_sites([result], sources)\n else:\n sources.append(result['source'])\n\n return ';'.join(sources)\n\ndef _get_tree_source_by_sites(sites, callback):\n for site in sites:\n callback.append(site['source'])\n\n sub_sites = db.sites.find({'pId': site['id']})\n if sub_sites.count() > 0:\n _get_tree_source_by_sites(sub_sites, callback)\n\ndef execute(context):\n\n condition = context.condition\n doc = context.doc\n uid = None\n\n if doc:\n uid = doc.pop('_id', None)\n\n #import pdb;pdb.set_trace()\n \n # edit\n if doc and uid:\n sites = doc.pop('sites', None)\n\n if not uid:\n context.result = {'errno': -3, 'msg': '没有传递需要的参数:doc._id'}\n return\n\n if doc['mtype'] == '4' and sites:\n doc['source'] = get_source_by_sites(sites)\n\n db.classify.update({'_id': ObjectId(uid)}, {'$set': doc})\n\n if sites:\n db.classify_sites.update({'classify': uid}, {'classify': uid, 'sites': sites})\n\n context.result = {'errno': 2, 'msg': '修改與情工单成功'}\n return\n\n # get\n elif condition:\n uid = condition.get('_id')\n if not uid:\n context.result = {'errno': -1, 'msg': '没有传递需要的参数:doc._id'}\n return\n\n doc = db.classify.find_one({'_id': ObjectId(uid)})\n if not doc:\n context.result = {'errno': -2, 'msg': '没有找到对象'}\n return\n\n if doc.mtype == '4':\n sites = db.classify_sites.find_one({'classify': uid}) or {}\n if sites:\n doc.sites = sites.sites\n\n context.result = {'errno': 1, 'doc': doc}\n return\n\n # new\n elif doc and uid is None:\n sites = doc.pop('sites', {})\n\n if doc['mtype'] == '4' and sites:\n doc['source'] = get_source_by_sites(sites)\n\n doc['window'] = 0\n new_id = db.classify.save(doc)\n\n if doc['mtype'] == '4' and sites:\n db.classify_sites.save({'classify': str(new_id), 'sites': sites})\n\n context.result = {'errno': 0, 'msg': '新建與情工单成功', 'new_id': str(new_id)}\n return\n\n else:\n context.result = {}\n return\n\ndef __params__():\n return []\n\ndef __unnecessary_params__():\n return ['doc', 'condition']\n\ndef __result__():\n return {'result': {}}\n","sub_path":"platform/script/5604a7a3f3aa2f4442445e6b.py","file_name":"5604a7a3f3aa2f4442445e6b.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"563759100","text":"import math\n\n\ndef karatsuba(x, y):\n str_x = str(x)\n str_y = str(y)\n\n if len(str_x) != len(str_y) or math.log(len(str_x), 2) % 1.0 != 0:\n return x * y\n\n elif len(str_x) == 1 or len(str_y) == 1:\n return x * y\n\n else:\n half_ind = max(int(len(str_x) / 2), int(len(str_y) / 2))\n a = int(str_x[:half_ind])\n b = int(str_x[half_ind:])\n c = int(str_y[:half_ind])\n d = int(str_y[half_ind:])\n\n p = a + b\n q = c + d\n\n ac = karatsuba(a, c)\n bd = karatsuba(b, d)\n pq = karatsuba(p, q)\n\n adbc = pq - ac - bd\n\n return int((math.pow(10, len(str_x)) * ac) + (math.pow(10, len(str_x) / 2) * adbc) + bd)\n\nprint(45 * 14)\nprint(karatsuba(45, 14))\n","sub_path":"karatsuba.py","file_name":"karatsuba.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42680298","text":"\"\"\"Given: Two positive integers a and b (a a[j]:\n minimum = j\n a[i], a[minimum] = a[minimum], a[i]\n print(\"Step {} :{}\".format(i+1, a))\n print(\"정렬 후 :{}\\n\".format(a))\n for k in range(3):\n hour = a[k] // (60*60)\n min = (a[k]-hour*60*60) // 60\n sec = a[k] % 60\n print(\"{} 등 : {}시간 {}분 {}초\".format(k+1, hour, min, sec))\n\nplayer = int(input(\"마라토너의 수를 입력하시오:\"))\nrecords = [0]*player\nfor i in range(player):\n records[i] = int(input(\"마라토너의 기록을 초단위로 입력:\"))\n\nselection_sort(records)","sub_path":"Algorithm/2. Task/Sort_Selection.py","file_name":"Sort_Selection.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1693459","text":"'''\n시작 시각 :18:20\n종료 시각 :20:05\n\n\n다음 코드에서, 출력되는 결과값이 왜 실제 출력되어야 하는 결과값보다 작은지 설명하시오.\n\n답 : 7개의 스레드가 count 변수를 100000번씩 더해서 7000000이 나와야 하는데 다른 값이 나온 이유는, 같은 변수를 동시에\n접근했기 때문이다. 실제로 count+=offset이 실제 메모리에 저장 되려면 값을 메모리에서 레지스터로 불러오고, 레스터에서 더하고,\n그 더한 값을 다시 메모리에 저장하는 과정을 거친다. 즉, 실제 값의 메모리에 더해지려면 이 모든 과정을 거쳐야 하는데\n한 스레드에서 instruction이 끝나기 전에 다른 스레드에서 덧셈을 시작하면서 실제로 더해지는 값이 더 작게 나오게 된다.\n\n해당 문제점을 해결하기 위해 사용해야 하는 해결책을 서술하고,\n\n답 : 해결법은 ��레드를 동기화 하는 것이다. 여러 방법중 Lock을 사용하는 방법이 있다. Lock은 한 스레드가 변수를 사용했으면\n다른 스레드가 사용하지 못하도록 막는 역할을 한다. 변수를 다 사용했으면 그 스레드는 변수에 대한 Lock을 풀어줘야 한다.\n이는 Release라고 부른다.\n사용법\n1. Lock.aquire()=잠금-다른 스레드의 접근을 막는다.\n2.이 안에 Code들을 무조건 한 스레드에 의해 순차적으로 실행되게 된다.\n3. Lock.release()=잠금 해제-다른 스레드들에게 접근 가능하도록 잠금을 푼다.\n\n\n\n해당 개념을 자율차 코드에 적용하였을 때\n1) 어느 부분에 적용할 수 있을지,\n\n서브 스레드를 활용해서 복잡하거나 긴 계산이 필요한 장애물, 목표와의 거리 및 차선인식등의 시스템에 적용할 수 있을 거 같다.\n\n2) 이를 통해 어떤 이점을 얻을 수 있는지\n아무래도 하나씩 값을 처리하는 것보다 동시에 여러 스레드로 계산하는 것이 처리속도가 빠르기에 자율주행 자동차 움직임이\n더 매끄러울 수 있고 이동 속도 또한 빠르게 진행시킬 수 있다.\n\n\n\n\n(Optional) 아래 코드의 실행 결과값이 실제 출력되어야 하는 결과값과 동일하게 출력되도록 변경하시오.\n\n*** Write Your Answer Below ***\n\n\n*** Your Answer Ends Here ***\n'''\n\n\nfrom threading import Thread\n\n\nclass Count:\n def __init__(self):\n self.count = 0\n\n def add_offset(self, offset):\n self.count += offset\n\n\ndef worker(idx, limit, count_obj):\n print(idx)\n for _ in range(limit):\n count_obj.add_offset(1)\n\n\ndef run_threads(func, thread_num, limit, count_obj):\n threads = []\n for i in range(thread_num):\n args = (i, limit, count_obj)\n thread = Thread(target=func, args=args)\n threads.append(thread)\n thread.start()\n\n for thread in threads:\n thread.join()\n\n\nlimit = 10 ** 6\nthread_num = 7\ncount = Count()\nrun_threads(worker, thread_num, limit, count)\nprint(f\"Result should be {thread_num * limit}, but the total count is {count.count}\")\n","sub_path":"Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"437749411","text":"import json\n\nfrom flask import Blueprint\n\nfrom ...component import form\nfrom ...component import mymysql\nfrom ...exception import MyServiceException\n\napp = Blueprint('distribution_logic_directory', __name__,\n url_prefix='/distribution/logic/directory')\n\n\n@app.route('/select', methods=['POST'])\ndef select():\n return json.dumps(mymysql.execute(\"\"\"\n select id, pid, name, description\n from designer_logic_directory\n \"\"\", {\n\n }))\n\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n request_data = form.check([\"pid\", \"name\"])\n pid = request_data[\"pid\"]\n name = request_data[\"name\"]\n\n insert_result = mymysql.execute(\"\"\"\n insert into designer_logic_directory(pid, name) values (%(pid)s, %(name)s)\n \"\"\", {\n \"pid\": pid,\n \"name\": name,\n })\n\n # insert logic_data\n mymysql.execute(\"\"\"\n insert into designer_logic_data(did, file) values (%(did)s, %(file)s)\n \"\"\", {\n \"did\": insert_result,\n \"file\": '',\n })\n\n return json.dumps(insert_result)\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n request_data = form.check([\"id\"])\n params = {}\n update_set_sql_str = \"\"\n\n params[\"id\"] = request_data[\"id\"]\n\n if request_data.__contains__(\"name\"):\n params[\"name\"] = request_data[\"name\"]\n update_set_sql_str = \"set name=%(name)s\"\n\n if request_data.__contains__(\"pid\"):\n params[\"pid\"] = request_data[\"pid\"]\n update_set_sql_str = \"set pid=%(pid)s\"\n\n if \"\" == update_set_sql_str:\n raise MyServiceException(\"no content for update set\")\n\n return json.dumps(\n mymysql.execute(\"update designer_data_directory \" + update_set_sql_str + \" where id = %(id)s\", params))\n\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n request_data = form.check([\"id\"])\n\n def get_children(_id):\n return mymysql.execute(\"\"\"\n select id\n from designer_logic_directory\n where pid = %(id)s\n \"\"\", {\"id\": _id})\n\n def delete_one_level(_id):\n # delete logic data\n mymysql.execute(\"\"\"\n delete from designer_logic_data where did = %(did)s\n \"\"\", {\"did\": _id})\n\n return mymysql.execute(\"\"\"\n delete\n from designer_logic_directory\n where id = %(id)s\n \"\"\", {\"id\": _id})\n\n def do_delete(_id):\n children = get_children(_id)\n if len(children) > 0:\n for item in children:\n do_delete(item[\"id\"])\n delete_one_level(_id)\n\n do_delete(request_data[\"id\"])\n return \"\"\n\n\n@app.route('/fork', methods=['POST'])\ndef fork():\n pass\n","sub_path":"distribution/service/logic/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186423590","text":"import tkinter\nimport math\nimport cmath\n\nimport numpy\n\nimport tesla\n\n# Размер единичной окружности в пикселях\nCIRCLE_SIZE = 400\n\n\ndef formula(x, y, width, height):\n 'Формула по вещественным координатам возвращает массив'\n 'из координат на экране'\n return (math.floor(x * CIRCLE_SIZE) + width // 4,\n -math.floor(y * CIRCLE_SIZE) + height // 2)\n\n\ndef render_point(x, y, *, canvas, rad=5, fill=None, tag=None):\n # используйте render_point вместо\n # make_point, make_point_check, circle или circle_red\n canvas.create_oval(x - rad, y - rad,\n x + rad, y + rad,\n fill=fill, tag=tag)\n\n\ndef arc(rad, center, A, B, *, window_dim, canvas):\n if rad == 0:\n return\n\n A_ = formula(*A, *window_dim)\n B_ = formula(*B, *window_dim)\n center_ = formula(*center, *window_dim)\n\n def begin_end_helper(be, ab):\n for_acos_0 = (be[0] - center[0]) / rad\n\n if math.fabs(for_acos_0) <= 1:\n ret = math.acos(for_acos_0)\n else:\n ret = 0 if for_acos_0 > 0 else math.pi\n\n ret = -ret * 180 / math.pi\n\n if ab < center_[1]:\n ret = -ret\n\n return ret\n\n begin = begin_end_helper(A, A_[1])\n end = begin_end_helper(B, B_[1])\n\n if math.fabs(end - begin) >= 180:\n if end - begin > 0:\n end = 360 - end\n end = 2 * begin - end\n else:\n end += 360\n\n canvas.create_arc(\n formula(center[0] - rad, center[1] - rad, *window_dim),\n formula(center[0] + rad, center[1] + rad, *window_dim),\n start=begin, extent=end - begin, style='arc')\n\n\ndef misha_arc(a, b, *, window_dim, canvas):\n _denom = a[1] * b[0] - a[0] * b[1]\n if math.fabs(_denom) > 1e-14:\n y_O = (a[0] ** 2 * b[0] -\n a[0] * b[0] ** 2 -\n b[1] ** 2 * a[0] +\n a[1] ** 2 * b[0] -\n a[0] + b[0]) / (2 * _denom)\n else:\n canvas.create_line(*formula(*a, *window_dim),\n *formula(*b, *window_dim))\n return\n\n def helper(a):\n return (a[0] ** 2 +\n a[1] ** 2 + 1 -\n 2 * y_O * a[1]) / (2 * a[0])\n\n if math.fabs(a[0]) > 1e-14:\n x_O = helper(a)\n else:\n x_O = helper(b)\n\n c = -1 + x_O ** 2 + y_O ** 2\n R = math.sqrt(c)\n arc(R, (x_O, y_O), a, b, window_dim=window_dim, canvas=canvas)\n\n\ndef matrix_rot_otn_q(Q, alpha):\n 'матрица поворота на альфа относительно Q != центру'\n\n # TODO: понять что за _unknown_value и _tmp и придумать нормальные имена\n _unknown_value = Q[0] ** 2 + Q[1] ** 2\n\n if _unknown_value == 0:\n x, y = 1, 0\n\n else:\n _tmp = math.sqrt(_unknown_value)\n x = Q[0] / _tmp\n y = Q[1] / _tmp\n\n # А - матрица движения, что Q = [x, y, z] -> [x', 0, z], А_1 - её обратная\n angle = cmath.phase(complex(x, -y))\n A = tesla.rotation(angle)\n A_1 = numpy.linalg.inv(A)\n\n # B - переводит [x', 0, y]->[1, 0, 0]\n x = x * Q[0] + y * Q[1]\n z = Q[2]\n\n B = numpy.array(((z, 0, -x),\n (0, 1, 0),\n (-x, 0, z)))\n\n B_1 = numpy.array(((z, 0, x),\n (0, 1, 0),\n (x, 0, z)))\n\n # C - матрица поворота на альфа, а затем в нее записывается результирующая\n # матрица, которая крутит на альфа относительно Q\n C = tesla.rotation(alpha)\n\n return A_1 @ B_1 @ C @ B @ A\n\n\ndef change_to_circle(P):\n a = 1 / (1 + P[2])\n return a * P[0], a * P[1]\n\n\ndef change_to_r3(P):\n a = 1 / (1 - P[0] ** 2 - P[1] ** 2)\n return (a * 2 * P[0],\n a * 2 * P[1],\n a * (1 + P[0] ** 2 + P[1] ** 2))\n\n\ndef motif(p, q, *, window_dim, canvas):\n 'Вывод базового многоугольника.'\n _p, _q = math.pi / p, math.pi / q\n\n arr = [0, []]\n for i in range(p):\n # тут просто поворачиваем точку на единичной окружности\n angle = 2 * i * _p\n x = math.cos(angle)\n y = math.sin(angle)\n # радиус нужной окружности, можно по нему сразу будет строить сторону\n r = (math.cos(_q + _p) /\n math.sqrt((math.cos(_q) ** 2 - math.sin(_p) ** 2)))\n x *= r\n y *= r\n render_point(*formula(x, y, *window_dim),\n canvas=canvas, fill='red', tag='point')\n arr[1].append(change_to_r3([x, y]))\n\n for i in range(p):\n misha_arc(change_to_circle(arr[1][i - 1]),\n change_to_circle(arr[1][i]),\n window_dim=window_dim, canvas=canvas)\n return [arr]\n\n\ndef rotate_not_center(vert, p, q, l, k, *, window_dim, canvas):\n 'крутит массив vertex вокруг l-той вершины, k раз. Обычно q - 2 раза, но'\n 'в граничных - q - 3 раза'\n Q = vert[1][l]\n res = matrix_rot_otn_q(Q, 2 * math.pi / q)\n arr_new = []\n\n for i in range(k):\n arr_new1 = [i == 0, [vert[1][l], vert[1][l - 1]]]\n\n for j in range(p - 2):\n P = res @ numpy.array(vert[1][(l + j + 2) % p])\n arr_new1[1].append(P)\n P = change_to_circle(P)\n render_point(*formula(*P, *window_dim),\n canvas=canvas, fill='red', tag='point')\n\n arr_new.append(arr_new1)\n vert = arr_new1\n l = 0\n\n return arr_new\n\n\ndef match_vert(kol, q, p, vert, rotate_o, *, window_dim, canvas):\n 'соединяем патерн и крутит и соединяет и крутит и соединяем и кру'\n 'vert[i, 0] и vert[0, 1] - точки с предыдущего слоя. Когда кручу - не'\n 'думаю об этом вроде пока'\n for i in range(kol):\n for j in range(2, p + 1):\n misha_arc(\n change_to_circle(vert[i][1][(j - 1) % p]),\n change_to_circle(vert[i][1][j % p]),\n window_dim=window_dim, canvas=canvas)\n\n rot = rotate_o\n\n for k in range(p - 1):\n for i in range(kol):\n old = rot @ numpy.array(vert[i][1][1])\n\n for j in range(2, p):\n P = rot @ numpy.array(vert[i][1][j])\n\n render_point(*formula(*change_to_circle(P), *window_dim),\n canvas=canvas, fill='red', tag='point')\n\n misha_arc(change_to_circle(old), change_to_circle(P),\n window_dim=window_dim, canvas=canvas)\n old = P\n\n rot = rot @ rotate_o\n\n\ndef layer(arr, kol, p, q, rotate_o, n, *, window_dim, canvas):\n if n <= 0:\n return\n\n kol_new = 0\n arr_new = []\n\n for k in range(kol):\n for i in range(2 + arr[k][0], p - 1):\n arr_new1 = rotate_not_center(arr[k], p, q, i, q - 2,\n window_dim=window_dim, canvas=canvas)\n\n for j in range(q - 2):\n arr_new.append(arr_new1[j])\n\n kol_new += q - 2\n\n for i in range(p - 1, p):\n arr_new1 = rotate_not_center(arr[k], p, q, i, q - 3,\n window_dim=window_dim, canvas=canvas)\n\n for j in range(q - 3):\n arr_new.append(arr_new1[j])\n kol_new += q - 3\n\n match_vert(kol_new, q, p, arr_new, rotate_o,\n window_dim=window_dim, canvas=canvas)\n\n layer(arr_new, kol_new, p, q, rotate_o, n - 1,\n window_dim=window_dim, canvas=canvas)\n","sub_path":"tesla_old.py","file_name":"tesla_old.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4976357","text":"#!/usr/bin/env python3\n\ndef quick_sort(array):\n if len(array) < 2:\n return array\n else:\n pivot = array[0]\n less = [i for i in len[1:] if i <= pivot]\n greater = [i for i in len[1:] if i > pivot]\n return quick_sort(less) + pivot + quick_sort(greater)","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"494077321","text":"from collections import deque\nimport warnings\nimport pandas as pd\nimport numpy as np\nfrom xml.etree import ElementTree as ET\n\nBIOLOGICAL_PROCESS = 'GO:0008150'\nMOLECULAR_FUNCTION = 'GO:0003674'\nCELLULAR_COMPONENT = 'GO:0005575'\nFUNC_DICT = {\n 'cc': CELLULAR_COMPONENT,\n 'mf': MOLECULAR_FUNCTION,\n 'bp': BIOLOGICAL_PROCESS}\n\nEXP_CODES = set([\n 'EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP', 'TAS', 'IC',\n 'HTP', 'HDA', 'HMP', 'HGI', 'HEP'])\nCAFA_TARGETS = set([\n '10090', '223283', '273057', '559292', '85962',\n '10116', '224308', '284812', '7227', '9606',\n '160488', '237561', '321314', '7955', '99287',\n '170187', '243232', '3702', '83333', '208963',\n '243273', '44689', '8355'])\n\ndef is_cafa_target(org):\n return org in CAFA_TARGETS\n\ndef is_exp_code(code):\n return code in EXP_CODES\n\n\nclass Ontology(object):\n\n def __init__(self, filename='data/go.obo', with_rels=False):\n self.ont = self.load(filename, with_rels)\n\n def has_term(self, term_id):\n return term_id in self.ont\n\n def get(self, term_id):\n if self.has_term(term_id):\n return self.ont[term_id]\n return None\n\n def load(self, filename, with_rels):\n ont = dict()\n obj = None\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n if not line:\n continue\n if line == '[Term]':\n if obj is not None:\n ont[obj['id']] = obj\n obj = dict()\n obj['is_a'] = list()\n obj['part_of'] = list()\n obj['regulates'] = list()\n obj['alt_ids'] = list()\n obj['is_obsolete'] = False\n continue\n elif line == '[Typedef]':\n obj = None\n else:\n if obj is None:\n continue\n l = line.split(\": \")\n if l[0] == 'id':\n obj['id'] = l[1]\n elif l[0] == 'alt_id':\n obj['alt_ids'].append(l[1])\n elif l[0] == 'is_a':\n obj['is_a'].append(l[1].split(' ! ')[0])\n elif with_rels and l[0] == 'relationship':\n it = l[1].split()\n # add all types of relationships\n obj['is_a'].append(it[1])\n elif l[0] == 'name':\n obj['name'] = l[1]\n elif l[0] == 'is_obsolete' and l[1] == 'true':\n obj['is_obsolete'] = True\n if obj is not None:\n ont[obj['id']] = obj\n for term_id in list(ont.keys()):\n for t_id in ont[term_id]['alt_ids']:\n ont[t_id] = ont[term_id]\n if ont[term_id]['is_obsolete']:\n del ont[term_id]\n for term_id, val in ont.items():\n if 'children' not in val:\n val['children'] = set()\n for p_id in val['is_a']:\n if p_id in ont:\n if 'children' not in ont[p_id]:\n ont[p_id]['children'] = set()\n ont[p_id]['children'].add(term_id)\n return ont\n\n\n def get_anchestors(self, term_id):\n if term_id not in self.ont:\n return set()\n term_set = set()\n q = deque()\n q.append(term_id)\n while(len(q) > 0):\n t_id = q.popleft()\n if t_id not in term_set:\n term_set.add(t_id)\n for parent_id in self.ont[t_id]['is_a']:\n if parent_id in self.ont:\n q.append(parent_id)\n return term_set\n\n\n def get_parents(self, term_id):\n if term_id not in self.ont:\n return set()\n term_set = set()\n for parent_id in self.ont[term_id]['is_a']:\n if parent_id in self.ont:\n term_set.add(parent_id)\n return term_set\n\n\n def get_term_set(self, term_id):\n if term_id not in self.ont:\n return set()\n term_set = set()\n q = deque()\n q.append(term_id)\n while len(q) > 0:\n t_id = q.popleft()\n if t_id not in term_set:\n term_set.add(t_id)\n for ch_id in self.ont[t_id]['children']:\n q.append(ch_id)\n return term_set\n\ndef read_fasta(lines):\n seqs = list()\n info = list()\n seq = ''\n inf = ''\n for line in lines:\n line = line.strip()\n if line.startswith('>'):\n if seq != '':\n seqs.append(seq)\n info.append(inf)\n seq = ''\n inf = line[1:]\n else:\n seq += line\n seqs.append(seq)\n info.append(inf)\n return info, seqs\n\n\nclass DataGenerator(object):\n\n def __init__(self, batch_size, is_sparse=False):\n self.batch_size = batch_size\n self.is_sparse = is_sparse\n\n def fit(self, inputs, targets=None):\n self.start = 0\n self.inputs = inputs\n self.targets = targets\n if isinstance(self.inputs, tuple) or isinstance(self.inputs, list):\n self.size = self.inputs[0].shape[0]\n else:\n self.size = self.inputs.shape[0]\n self.has_targets = targets is not None\n\n def __next__(self):\n return self.next()\n\n def reset(self):\n self.start = 0\n\n def next(self):\n if self.start < self.size:\n batch_index = np.arange(\n self.start, min(self.size, self.start + self.batch_size))\n if isinstance(self.inputs, tuple) or isinstance(self.inputs, list):\n res_inputs = []\n for inp in self.inputs:\n if self.is_sparse:\n res_inputs.append(\n inp[batch_index, :].toarray())\n else:\n res_inputs.append(inp[batch_index, :])\n else:\n if self.is_sparse:\n res_inputs = self.inputs[batch_index, :].toarray()\n else:\n res_inputs = self.inputs[batch_index, :]\n self.start += self.batch_size\n if self.has_targets:\n if self.is_sparse:\n labels = self.targets[batch_index, :].toarray()\n else:\n labels = self.targets[batch_index, :]\n return (res_inputs, labels)\n return res_inputs\n else:\n self.reset()\n return self.next()\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"618539065","text":"import json\n\n\ndef hello(event, context):\n body = {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"input\": event\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n return response\n\n # Use this code if you don't use the http event with the LAMBDA-PROXY\n # integration\n \"\"\"\n return {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"event\": event\n }\n \"\"\"\n\ndef NewMessage(event, context):\n statusMessage = \"Message saved successfully!!!\"\n\n #extract the message from the body of the message\n\n #store the message by making call to DAL\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(statusMessage)\n }\n return response\n\ndef IsNewMessage(event, context):\n retVal = false\n # Check the db for new messages\n\n statusMessage = \"Message Present: \" + str(retVal)\n response = {\n \"statusCode\":200,\n \"body\": json.dumps(statusMessage)\n }\n\ndef GetNewMessage(event, context):\n\n msg = ''\n #read if there is a new message is waiting to be read\n statusMessage = \"Message: \" + msg\n response = {\n \"statusCode\":200,\n \"body\": json.dumps(statusMessage)\n }","sub_path":"APIGW_SQS/Server/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131682479","text":"#python #abaqus #abaqustutorial #hnrwagner \r\n\r\n#------------------------------------------------------------------------------\r\n#------------------------------------------------------------------------------\r\n\r\n\r\nfrom abaqus import *\r\nfrom abaqusConstants import *\r\nimport regionToolset\r\nimport __main__\r\nimport section\r\nimport regionToolset\r\nimport part\r\nimport material\r\nimport assembly\r\nimport step\r\nimport interaction\r\nimport load\r\nimport mesh\r\nimport job\r\nimport sketch\r\nimport visualization\r\nimport xyPlot\r\nimport connectorBehavior\r\nimport odbAccess\r\nfrom operator import add\r\nimport numpy as np\r\n\r\n\r\n#------------------------------------------------------------------------------\r\n#------------------------------------------------------------------------------\r\n\r\n# functions\r\n\r\n#------------------------------------------------------------------------------\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Part_3D_Cylinder(radius,length,thickness,part,model):\r\n s1 = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\n g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints\r\n s1.setPrimaryObject(option=STANDALONE)\r\n s1.CircleByCenterPerimeter(center=(0.0, 0.0), point1=(radius, 0.0))\r\n s1.CircleByCenterPerimeter(center=(0.0, 0.0), point1=(radius-thickness, 0.0))\r\n p = mdb.models[model].Part(name=part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\r\n p = mdb.models[model].parts[part]\r\n p.BaseSolidExtrude(sketch=s1, depth=length)\r\n s1.unsetPrimaryObject()\r\n p = mdb.models[model].parts[part]\r\n del mdb.models[model].sketches['__profile__']\r\n \r\n\r\ndef Create_Part_2D_Cylinder(radius,length,part,model):\r\n s = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\n g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints\r\n s.setPrimaryObject(option=STANDALONE)\r\n s.CircleByCenterPerimeter(center=(0.0, 0.0), point1=(radius, 0.0))\r\n p = mdb.models[model].Part(name=part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\r\n p = mdb.models[model].parts[part]\r\n p.BaseShellExtrude(sketch=s, depth=length)\r\n s.unsetPrimaryObject()\r\n p = mdb.models[model].parts[part]\r\n del mdb.models[model].sketches['__profile__']\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Part_2D_Cone(radiusR,height,angle,part,model):\r\n s = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\n g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints\r\n s.setPrimaryObject(option=STANDALONE)\r\n s.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))\r\n s.FixedConstraint(entity=g[2])\r\n s.Line(point1=(radiusR, 0.0), point2=(radiusR+height*tan(angle*np.pi/180), height))\r\n p = mdb.models[model].Part(name=part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\r\n p = mdb.models[model].parts[part]\r\n p.BaseShellRevolve(sketch=s, angle=360.0, flipRevolveDirection=OFF)\r\n s.unsetPrimaryObject()\r\n p = mdb.models[model].parts[part]\r\n del mdb.models[model].sketches['__profile__']\r\n\r\n#------------------------------------------------------------------------------\r\ndef Create_Part_2D_Plate(radiusR,part,model): \r\n s = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\n g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints\r\n s.setPrimaryObject(option=STANDALONE)\r\n s.Line(point1=(-radiusR, 0.0), point2=(radiusR, 0.0))\r\n s.HorizontalConstraint(entity=g[2], addUndoState=False)\r\n p = mdb.models[model].Part(name=part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\r\n p = mdb.models[model].parts[part]\r\n p.BaseShellExtrude(sketch=s, depth=radiusR*2.0)\r\n s.unsetPrimaryObject()\r\n del mdb.models[model].sketches['__profile__']\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Part_Shim(radiusR,height,part,model,angle):\r\n s = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\n g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints\r\n s.setPrimaryObject(option=STANDALONE)\r\n s.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))\r\n s.FixedConstraint(entity=g[2])\r\n s.Line(point1=(radiusR, 0.0), point2=(radiusR, height))\r\n p = mdb.models[model].Part(name=part, dimensionality=THREE_D, type=DEFORMABLE_BODY)\r\n p = mdb.models[model].parts[part]\r\n p.BaseShellRevolve(sketch=s, angle=angle, flipRevolveDirection=OFF)\r\n s.unsetPrimaryObject()\r\n p = mdb.models[model].parts[part]\r\n del mdb.models[model].sketches['__profile__']\r\n \r\n \r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Datum_Plane_by_Principal(type_plane,part,model,offset_plane):\r\n p = mdb.models[model].parts[part]\r\n myPlane = p.DatumPlaneByPrincipalPlane(principalPlane=type_plane, offset=offset_plane)\r\n myID = myPlane.id\r\n return myID\r\n\r\n\r\ndef Create_Set_All_Cells(model,part,set_name):\r\n p = mdb.models[model].parts[part]\r\n c = p.cells[:]\r\n p.Set(cells=c, name=set_name)\r\n\r\ndef Create_Set_All_Faces(model,part,set_name):\r\n p = mdb.models[model].parts[part]\r\n f = p.faces[:]\r\n p.Set(faces=f, name=set_name)\r\n\r\ndef Create_Material_Data(model,material_name,e11,e22,e33,nu12,nu13,nu23,g12,g13,g23,lts,lcs,tts,tcs,lss,tss):\r\n mdb.models[model].Material(name=material_name)\r\n mdb.models[model].materials[material_name].Elastic(type=ENGINEERING_CONSTANTS, table=((e11,e22,e33,nu12,nu13,nu23,g12,g13,g23), ))\r\n mdb.models[model].materials[material_name].HashinDamageInitiation(table=((lts,lcs,tts,tcs,lss,tss), ))\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Material_Data_2D(model,material_name,e11,e22,nu12,g12,g13,g23):\r\n mdb.models[model].Material(name=material_name)\r\n mdb.models[model].materials[material_name].Elastic(type=LAMINA, table=((e11,e22,nu12,g12,g13,g23), ))\r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Set_Face(x,y,z,model,part,set_name):\r\n face = ()\r\n p = mdb.models[model].parts[part]\r\n f = p.faces\r\n myFace = f.findAt((x,y,z),)\r\n face = face + (f[myFace.index:myFace.index+1], )\r\n p.Set(faces=face, name=set_name)\r\n return myFace\r\n\r\ndef Create_Set_Edge(x,y,z,model,part,set_name):\r\n edge = ()\r\n p = mdb.models[model].parts[part]\r\n e = p.edges\r\n myEdge = e.findAt((x,y,z),)\r\n edge = edge + (e[myEdge.index:myEdge.index+1], )\r\n f = p.Set(edges=edge, name=set_name)\r\n return myEdge\r\n\r\n\r\n#-----------------------------------------------------------------------------\r\ndef Create_Set_Vertice(x,y,z,model,part,set_name):\r\n vertice = ()\r\n p = mdb.models[model].parts[part]\r\n v = p.vertices\r\n myVertice = v.findAt((x,y,z),)\r\n vertice = vertice + (v[myVertice.index:myVertice.index+1], )\r\n p.Set(vertices=vertice, name=set_name) \r\n\r\n#-----------------------------------------------------------------------------\r\ndef Create_Set_Vertice_2(x,y,z,model,part,set_name):\r\n vertice = ()\r\n a = mdb.models[model].rootAssembly\r\n v = a.instances[part].vertices\r\n myVertice = v.findAt((x,y,z),)\r\n vertice = vertice + (v[myVertice.index:myVertice.index+1], )\r\n a.Set(vertices=vertice, name=set_name)\r\n\r\n#-----------------------------------------------------------------------------\r\n\r\ndef Create_Set_Internal_Surface(x,y,z,model,part,set_name):\r\n face = ()\r\n p = mdb.models[model].parts[part]\r\n s = p.faces\r\n myFace = s.findAt((x,y,z),)\r\n face = face + (s[myFace.index:myFace.index+1], )\r\n p.Surface(side2Faces=face, name=set_name)\r\n#-----------------------------------------------------------------------------\r\n\r\ndef Create_Set_External_Surface(x,y,z,model,part,set_name):\r\n face = ()\r\n p = mdb.models[model].parts[part]\r\n s = p.faces\r\n myFace = s.findAt((x,y,z),)\r\n face = face + (s[myFace.index:myFace.index+1], )\r\n p.Surface(side1Faces=face, name=set_name)\r\n\r\n#-----------------------------------------------------------------------------\r\n\r\ndef Create_Assembly(model,part,instance_name):\r\n a = mdb.models[model].rootAssembly\r\n a.DatumCsysByDefault(CARTESIAN)\r\n p = mdb.models[model].parts[part]\r\n a.Instance(name=instance_name, part=p, dependent=ON)\r\n\r\n#-------------------------------------------------------------\r\n\r\ndef Create_Reference_Point(x,y,z,model,setname):\r\n a = mdb.models[model].rootAssembly\r\n myRP = a.ReferencePoint(point=(x, y, z))\r\n r = a.referencePoints\r\n myRP_Position = r.findAt((x, y, z),)\r\n refPoints1=(myRP_Position, )\r\n a.Set(referencePoints=refPoints1, name=setname)\r\n return myRP,myRP_Position\r\n\r\ndef Create_Constraint_Equation(model,constraint_name,set_name,set_name_rp):\r\n mdb.models[model].Equation(name=constraint_name, terms=((1.0, set_name, 2), (-1.0, set_name_rp, 2)))\r\n\r\n\r\ndef Create_Boundary_Condition_by_Instance(model,instance_name,set_name,BC_name,step_name,u1_BC,u2_BC,u3_BC,ur1_BC,ur2_BC,ur3_BC):\r\n a = mdb.models[model].rootAssembly\r\n region = a.instances[instance_name].sets[set_name]\r\n mdb.models[model].DisplacementBC(name=BC_name, createStepName=step_name, region=region, u1=u1_BC, u2=u2_BC, u3=u3_BC, ur1=ur1_BC, ur2=ur2_BC, ur3=ur3_BC, amplitude=UNSET, distributionType=UNIFORM, fieldName='', localCsys=None) \r\n\r\n\r\ndef Create_Boundary_Condition_for_Assembly(model,set_name,BC_name,step_name,u1_BC,u2_BC,u3_BC,ur1_BC,ur2_BC,ur3_BC):\r\n a = mdb.models[model].rootAssembly\r\n region = a.sets[set_name]\r\n mdb.models[model].DisplacementBC(name=BC_name, createStepName=step_name, region=region, u1=u1_BC, u2=u2_BC, u3=u3_BC, ur1=ur1_BC, ur2=ur2_BC, ur3=ur3_BC, amplitude=UNSET, distributionType=UNIFORM, fieldName='', localCsys=None) \r\n\r\ndef Create_Boundary_Condition_by_RP(model,RP_name,BC_name,step_name,u1_BC,u2_BC,u3_BC,ur1_BC,ur2_BC,ur3_BC):\r\n a = mdb.models[model].rootAssembly\r\n region = a.sets[RP_name]\r\n mdb.models[model].DisplacementBC(name=BC_name, createStepName=step_name, region=region, u1=u1_BC, u2=u2_BC, u3=u3_BC, ur1=ur1_BC, ur2=ur2_BC, ur3=ur3_BC, amplitude=UNSET, distributionType=UNIFORM, fieldName='', localCsys=None) \r\n\r\n\r\ndef Create_Analysis_Step(model,step_name,pre_step_name,Initial_inc,Max_inc,Min_inc,Inc_Number,NL_ON_OFF):\r\n a = mdb.models[model].StaticStep(name=step_name, previous=pre_step_name, initialInc=Initial_inc, maxInc=Max_inc, minInc=Min_inc)\r\n a = mdb.models[model].steps[step_name].setValues(maxNumInc=Inc_Number)\r\n a = mdb.models[model].steps[step_name].setValues(nlgeom=NL_ON_OFF)\r\n #a = mdb.models[model].steps[step_name].setValues(stabilizationMagnitude=1E-009, stabilizationMethod=DAMPING_FACTOR, continueDampingFactors=False, adaptiveDampingRatio=None)\r\n\r\ndef Create_Partion_by_Plane(model,part,id_plane):\r\n p = mdb.models[model].parts[part]\r\n c = p.cells[:]\r\n d = p.datums\r\n p.PartitionCellByDatumPlane(datumPlane=d[id_plane], cells=c)\r\n\r\n\r\ndef Create_Partion_by_Plane_2D(model,part,id_plane):\r\n p = mdb.models[model].parts[part]\r\n f = p.faces[:]\r\n d = p.datums\r\n p.PartitionFaceByDatumPlane(datumPlane=d[id_plane], faces=f)\r\n\r\n#-----------------------------------------------------------------------------\r\n\r\ndef Create_Composite_Layup(model,part,set_name,composite_name,number,material,thickness,angle):\r\n layupOrientation = None\r\n p = mdb.models[model].parts[part]\r\n region1=p.sets[set_name]\r\n normalAxisRegion = p.surfaces['Outer_Surface']\r\n primaryAxisRegion = p.sets['Set-Top-Edge']\r\n compositeLayup = mdb.models[model].parts[part].CompositeLayup(name=composite_name, description='', elementType=CONTINUUM_SHELL, symmetric=False)\r\n compositeLayup.Section(preIntegrate=OFF, integrationRule=SIMPSON, poissonDefinition=DEFAULT, thicknessModulus=None, temperature=GRADIENT, useDensity=OFF)\r\n for i in range(0,number,1):\r\n compositeLayup.CompositePly(suppressed=False, plyName='Ply-'+str(i), region=region1, material=material, thicknessType=SPECIFY_THICKNESS, thickness=thickness, orientationType=SPECIFY_ORIENT, orientationValue=angle[i], additionalRotationType=ROTATION_NONE, additionalRotationField='', axis=AXIS_3, angle=0.0, numIntPoints=3)\r\n compositeLayup.ReferenceOrientation(orientationType=DISCRETE, localCsys=None, additionalRotationType=ROTATION_ANGLE, angle=90.0, additionalRotationField='', axis=AXIS_3, stackDirection=STACK_3, normalAxisDefinition=SURFACE, normalAxisRegion=normalAxisRegion, normalAxisDirection=AXIS_3, flipNormalDirection=False, primaryAxisDefinition=EDGE, primaryAxisRegion=primaryAxisRegion, primaryAxisDirection=AXIS_2, flipPrimaryDirection=False)\r\n#-----------------------------------------------------------------------------\r\n\r\ndef Create_Composite_Layup_2D(model,part,set_name,composite_name,number,material,thickness,angle):\r\n layupOrientation = None\r\n p = mdb.models[model].parts[part]\r\n region1=p.sets[set_name]\r\n normalAxisRegion = p.surfaces['Outer_Surface']\r\n primaryAxisRegion = p.sets['Set-Top-Edge']\r\n compositeLayup = mdb.models[model].parts[part].CompositeLayup(name=composite_name, description='', elementType=SHELL, symmetric=False)\r\n compositeLayup.Section(preIntegrate=OFF, integrationRule=SIMPSON, poissonDefinition=DEFAULT, thicknessModulus=None, temperature=GRADIENT, useDensity=OFF)\r\n for i in range(0,number,1):\r\n compositeLayup.CompositePly(suppressed=False, plyName='Ply-'+str(i), region=region1, material=material, thicknessType=SPECIFY_THICKNESS, thickness=thickness, orientationType=SPECIFY_ORIENT, orientationValue=angle[i], additionalRotationType=ROTATION_NONE, additionalRotationField='', axis=AXIS_3, angle=0.0, numIntPoints=3)\r\n compositeLayup.ReferenceOrientation(orientationType=DISCRETE, localCsys=None, additionalRotationType=ROTATION_ANGLE, angle=90.0, additionalRotationField='', axis=AXIS_3, stackDirection=STACK_3, normalAxisDefinition=SURFACE, normalAxisRegion=normalAxisRegion, normalAxisDirection=AXIS_3, flipNormalDirection=False, primaryAxisDefinition=EDGE, primaryAxisRegion=primaryAxisRegion, primaryAxisDirection=AXIS_2, flipPrimaryDirection=False)\r\n \r\n#----------------------------------------------------------------------------\r\n\r\ndef Create_Mesh(model,part,size):\r\n p = mdb.models[model].parts[part]\r\n elemType1 = mesh.ElemType(elemCode=SC8R, elemLibrary=STANDARD, secondOrderAccuracy=OFF, hourglassControl=DEFAULT)\r\n elemType2 = mesh.ElemType(elemCode=SC6R, elemLibrary=STANDARD)\r\n elemType3 = mesh.ElemType(elemCode=UNKNOWN_TET, elemLibrary=STANDARD)\r\n cells = p.cells[:]\r\n pickedRegions =(cells, )\r\n p.setElementType(regions=pickedRegions, elemTypes=(elemType1, elemType2, elemType3))\r\n p.seedPart(size=size, deviationFactor=0.1, minSizeFactor=0.1)\r\n p.generateMesh()\r\n\r\ndef Create_Mesh_Solid(model,part,size):\r\n p = mdb.models[model].parts[part]\r\n elemType1 = mesh.ElemType(elemCode=C3D20R, elemLibrary=STANDARD)\r\n elemType2 = mesh.ElemType(elemCode=C3D15, elemLibrary=STANDARD)\r\n elemType3 = mesh.ElemType(elemCode=C3D10, elemLibrary=STANDARD)\r\n cells = p.cells[:]\r\n pickedRegions =(cells, )\r\n p.setElementType(regions=pickedRegions, elemTypes=(elemType1, elemType2, elemType3))\r\n p.seedPart(size=size, deviationFactor=0.1, minSizeFactor=0.1)\r\n p.generateMesh()\r\n \r\n#------------------------------------------------------------------------------\r\n\r\ndef Create_Mesh_Shell(model,part,size):\r\n p = mdb.models[model].parts[part]\r\n elemType1 = mesh.ElemType(elemCode=S4R, elemLibrary=STANDARD, secondOrderAccuracy=OFF, hourglassControl=DEFAULT)\r\n elemType2 = mesh.ElemType(elemCode=S3, elemLibrary=STANDARD)\r\n faces = p.faces[:]\r\n pickedRegions =(faces, )\r\n p.setElementType(regions=pickedRegions, elemTypes=(elemType1, elemType2))\r\n p.seedPart(size=size, deviationFactor=0.1, minSizeFactor=0.1)\r\n p.generateMesh()\r\n\r\ndef Create_SPLA(model,instance_name,set_name,load_name,step_name,load):\r\n a = mdb.models[model].rootAssembly\r\n region = a.instances[instance_name].sets[set_name]\r\n mdb.models[model].ConcentratedForce(name=load_name, createStepName=step_name, region=region, cf1=-load, distributionType=UNIFORM, field='', localCsys=None)\r\n\r\n\r\ndef Create_Pressure_Load(model,instance_name,load_name,step_name,surface,load):\r\n a = mdb.models[model].rootAssembly\r\n region = a.instances[instance_name].surfaces[surface]\r\n mdb.models[model].Pressure(name=load_name, createStepName=step_name, region=region, distributionType=UNIFORM, field='', magnitude=load, amplitude=UNSET) \r\n\r\ndef CreateCutout(model,part,radius_cutout,id_plane,edge,x,y,z):\r\n p = mdb.models[model].parts[part]\r\n e, d = p.edges, p.datums\r\n t = p.MakeSketchTransform(sketchPlane=d[id_plane], sketchUpEdge=edge, sketchPlaneSide=SIDE1, sketchOrientation=RIGHT, origin=(x, y, z))\r\n s = mdb.models[model].ConstrainedSketch(name='__profile__', sheetSize=2000.0, gridSpacing=20.0, transform=t)\r\n g, v, d1, c = s.geometry, s.vertices, s.dimensions, s.constraints\r\n s.setPrimaryObject(option=SUPERIMPOSE)\r\n p.projectReferencesOntoSketch(sketch=s, filter=COPLANAR_EDGES)\r\n s.CircleByCenterPerimeter(center=(0.0, 0.0), point1=(radius_cutout, 0.0))\r\n p.CutExtrude(sketchPlane=d[id_plane], sketchUpEdge=edge, sketchPlaneSide=SIDE1, sketchOrientation=RIGHT, sketch=s, flipExtrudeDirection=OFF)\r\n s.unsetPrimaryObject()\r\n del mdb.models[model].sketches['__profile__']\r\n\r\ndef AssignStack(model,part,face):\r\n p = mdb.models[model].parts[part]\r\n c = p.cells[:]\r\n p.assignStackDirection(referenceRegion=face, cells=c)\r\n\r\ndef CreateJob(model,job_name,cpu):\r\n a = mdb.models[model].rootAssembly\r\n mdb.Job(name=job_name, model=model, description='', type=ANALYSIS, atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90, memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True, explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF, modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='', scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT, numCpus=cpu, numDomains=cpu, numGPUs=0)\r\n\r\ndef SubmitJob(job_name):\r\n mdb.jobs[job_name].submit()\r\n mdb.jobs[job_name].waitForCompletion()\r\n\r\n\r\ndef Create_Material_Isotropic(model,material_name_isotropic,E,nu,Y):\r\n mdb.models[model].Material(name=material_name_isotropic)\r\n mdb.models[model].materials[material_name_isotropic].Elastic(table=((E, nu), ))\r\n #mdb.models[model].materials[material_name_isotropic].Plastic(table=((Y, 0.0), ))\r\n\r\ndef Create_Isotropic_Section(model,section_name,material_name_isotropic):\r\n mdb.models[model].HomogeneousSolidSection(name=section_name, material=material_name_isotropic, thickness=None)\r\n\r\ndef Create_Isotropic_Section_2D(model,section_name,material_name_isotropic,thickness):\r\n mdb.models[model].HomogeneousShellSection(name=section_name, preIntegrate=OFF, material=material_name_isotropic, thicknessType=UNIFORM, thickness=thickness, thicknessField='', nodalThicknessField='', idealization=NO_IDEALIZATION, poissonDefinition=DEFAULT, thicknessModulus=None, temperature=GRADIENT, useDensity=OFF, integrationRule=SIMPSON, numIntPts=5)\r\n\r\ndef Assign_Isotropic_Material(model,part,set_name,section_name):\r\n p = mdb.models[model].parts[part]\r\n region = p.sets[set_name]\r\n p.SectionAssignment(region=region, sectionName=section_name, offset=0.0, offsetType=MIDDLE_SURFACE, offsetField='', thicknessAssignment=FROM_SECTION)\r\n\r\ndef Deactivate_BC(model,BC_name,step_name):\r\n mdb.models[model].boundaryConditions[BC_name].deactivate(step_name)\r\n\r\ndef Rotate_Instance(model,instance,x,y,z,angle):\r\n a = mdb.models[model].rootAssembly\r\n a.rotate(instanceList=(instance, ), axisPoint=(0.0, 0.0, 0.0), axisDirection=(x, y, z), angle=angle)\r\n \r\n \r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Translate_Instance(model,instance,x,y,z):\r\n a = mdb.models[model].rootAssembly\r\n a.translate(instanceList=(instance, ), vector=(x, y, z))\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Boolean_Merge(model,instance_merge,instance_1,instance_2):\r\n a = mdb.models[model].rootAssembly\r\n a.InstanceFromBooleanMerge(name=instance_merge, instances=(a.instances[instance_1], a.instances[instance_2], ), keepIntersections=ON, originalInstances=SUPPRESS, domain=GEOMETRY)\r\n \r\n#------------------------------------------------------------------------------\r\n\r\ndef Open_ODB_and_Write_NodeSet_data_to_text(model,step_name,variable_name,set_name,Variable_component):\r\n # open ODB file - ABAQUS Result file\r\n odb = session.openOdb(str(model)+'.odb')\r\n \r\n # list for the VARIABLE you want to evaluate\r\n Variable_v = []\r\n \r\n # analysis step for your VARIABLE\r\n lastStep=odb.steps[step_name]\r\n \r\n #loop over all increments of the analysis step and save VARIABLE information from each increment\r\n for x in range(len(lastStep.frames)):\r\n lastFrame = lastStep.frames[x]\r\n Variable = lastFrame.fieldOutputs[variable_name]\r\n center = odb.rootAssembly.nodeSets[set_name]\r\n centerRForce = Variable.getSubset(region=center)\r\n \r\n # loop over the VARIABLE and save component (x,y,z - 0,1,2) to list\r\n for i in centerRForce.values:\r\n Variable_vr = [i.data[Variable_component]]\r\n Variable_v = Variable_v + Variable_vr\r\n \r\n # Max value of Variable_v\r\n Max_Variable = [np.max(Variable_v)] \r\n Max_Variable_v = [Max_Variable]\r\n \r\n # write VARIABLE - component to text file\r\n \r\n np.savetxt(str(variable_name)+'_'+str(myString)+'.txt',Variable_v)\r\n return Max_Variable\r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef Write_Variable_to_text(variable,variable_name):\r\n \r\n # write VARIABLE - component to text file\r\n \r\n np.savetxt(str(variable_name)+'_'+str(myString)+'.txt',variable) \r\n \r\n#------------------------------------------------------------------------------\r\n#------------------------------------------------------------------------------\r\n\r\n# variables\r\n\r\n#------------------------------------------------------------------------------\r\n#------------------------------------------------------------------------------\r\n\r\n\r\n# This script can only create trunecated cones and cylinders \r\n# so check if myLength is long enough (or the absolute of mySemi_Vertex_Angle is small enough)\r\n# myRadius_r cannot be zero or negativ\r\n\r\nmyRadius = 33.0\r\nmyThickness = 0.1\r\nmyLength = 100.0\r\nmySemi_Vertex_Angle = 0.0\r\nmySlant_Length = myLength/np.cos(mySemi_Vertex_Angle*np.pi/180.0)\r\nmyRadius_r = myRadius+myLength*tan(mySemi_Vertex_Angle*np.pi/180.0)\r\n\r\n# shim parameters in degree\r\n\r\nshim_width = 2 \r\n\r\n\r\nmyPart = \"Cone\"\r\nmyPart_2 = \"Shim\"\r\nmyPart_3 = \"Cone_&_Shim\"\r\nmyPart_4 = \"Plate\"\r\n# material parameters\r\n\r\nmyE11 = 208000\r\nmyE22 = 208000\r\nmyNu12 = 0.3\r\nmyG12 = 80000\r\nmyG13 = 80000\r\nmyG23 = 80000\r\n\r\nmyE = 208000\r\nmyNu = 0.3\r\nmyY = 208\r\n\r\n\r\nmyAngle = [45,-45,0,90,90,0,-45,45]\r\n\r\nmyPlyNumber = len(myAngle)\r\n\r\n\r\nMesh_Size = 0.91\r\n\r\nN = []\r\nP = []\r\nfor ic in range(1,21,1):\r\n \r\n myString = \"GNIA_SBPA_Loop_\"+str(ic)\r\n mdb.Model(name=myString)\r\n \r\n # Imperfection Parameter\r\n myPerturbation = 0.005*ic\r\n \r\n Create_Part_2D_Cone(myRadius,myLength,mySemi_Vertex_Angle,myPart,myString)\r\n \r\n myID_1 = Create_Datum_Plane_by_Principal(XZPLANE,myPart,myString,myLength/2.0)\r\n myID_2 = Create_Datum_Plane_by_Principal(XYPLANE,myPart,myString,0.0)\r\n myID_3 = Create_Datum_Plane_by_Principal(YZPLANE,myPart,myString,0.0)\r\n \r\n Create_Set_Edge(myRadius_r,myLength,0.0,myString,myPart,\"Set-RP-2\")\r\n Create_Set_Edge(myRadius,0.0,0.0,myString,myPart,\"Set-RP-1\")\r\n Create_Set_External_Surface((myRadius+myRadius_r)/2.0,myLength/2.0,0.0,myString,myPart,\"Outer_Surface\")\r\n Create_Set_Internal_Surface((myRadius+myRadius_r)/2.0,myLength/2.0,0.0,myString,myPart,\"Internal_Surface\")\r\n Create_Set_All_Faces(myString,myPart,\"Cone_2D\")\r\n \r\n \r\n Create_Material_Data_2D(myString,\"CFRP\",myE11,myE22,myNu12,myG12,myG13,myG23)\r\n Create_Material_Isotropic(myString,\"Steel\",myE,myNu,myY)\r\n \r\n #-------------------------------------------------\r\n \r\n # SBPA modification - start\r\n \r\n #-------------------------------------------------\r\n \r\n Create_Isotropic_Section_2D(myString,\"Isotropic_section\",\"Steel\",100)\r\n \r\n \r\n Create_Part_Shim(myRadius,myPerturbation,myPart_2,myString,shim_width)\r\n Create_Assembly(myString,myPart,\"Cone\")\r\n Create_Assembly(myString,myPart_2,\"Shim\")\r\n Rotate_Instance(myString,'Shim',0,1,0,shim_width/2.0)\r\n Rotate_Instance(myString,'Cone',0,1,0,180.0)\r\n Translate_Instance(myString,'Shim',0,-myPerturbation,0)\r\n Boolean_Merge(myString,myPart_3,myPart,myPart_2)\r\n Create_Set_Edge(myRadius,0.0,0.0,myString,myPart_3,\"Set-Shim-Top-Edge\")\r\n Create_Set_Edge(myRadius,-myPerturbation,0.0,myString,myPart_3,\"Set-Shim-Bottom-Edge\")\r\n Create_Set_Edge(myRadius*np.cos(shim_width/2.0*np.pi/180.0),-myPerturbation/2.0,myRadius*np.sin(shim_width/2.0*np.pi/180.0),myString,myPart_3,\"Set-Shim-Front-Edge\")\r\n Create_Set_Edge(myRadius*np.cos(-shim_width/2.0*np.pi/180.0),-myPerturbation/2.0,myRadius*np.sin(-shim_width/2.0*np.pi/180.0),myString,myPart_3,\"Set-Shim-Back-Edge\")\r\n Create_Part_2D_Plate(myRadius,myPart_4,myString)\r\n Create_Assembly(myString,myPart_4,\"Plate\")\r\n Translate_Instance(myString,'Plate',0,-myPerturbation,-myRadius)\r\n Create_Set_All_Faces(myString,myPart_4,\"Plate_2D\")\r\n Assign_Isotropic_Material(myString,myPart_4,\"Plate_2D\",\"Isotropic_section\")\r\n Create_Set_Face(myRadius,-myPerturbation/2.0,0.0,myString,myPart_3,\"Shim_Face\")\r\n Assign_Isotropic_Material(myString,myPart_3,\"Shim_Face\",\"Isotropic_section\")\r\n Create_Set_Internal_Surface(0.0,0.0,0.0,myString,myPart_4,\"Plate_Surface\")\r\n \r\n ##------------------------------------------------\r\n \r\n myRP1,myRP_Position1 = Create_Reference_Point(0.0,0.0,0.0,myString,\"RP-1\")\r\n myRP2,myRP_Position2 = Create_Reference_Point(0.0,myLength,0.0,myString,\"RP-2\")\r\n \r\n ##------------------------------------------------\r\n \r\n # Create Sets\r\n \r\n a=mdb.models[myString].parts[myPart_3]\r\n a.SetByBoolean(name='Set-No-Shim-Top-Edge', operation=DIFFERENCE, sets=(a.sets['Set-RP-1'], a.sets['Set-Shim-Top-Edge'], ))\r\n a.SetByBoolean(name='Set-SBPA-Edge', sets=(a.sets['Set-No-Shim-Top-Edge'], a.sets['Set-Shim-Back-Edge'], a.sets['Set-Shim-Bottom-Edge'], a.sets['Set-Shim-Front-Edge'], ))\r\n \r\n # Create Interaction and contact\r\n \r\n mdb.models[myString].ContactProperty('IntProp-1')\r\n mdb.models[myString].interactionProperties['IntProp-1'].TangentialBehavior(formulation=FRICTIONLESS)\r\n mdb.models[myString].interactionProperties['IntProp-1'].NormalBehavior(pressureOverclosure=HARD, allowSeparation=ON, constraintEnforcementMethod=DEFAULT)\r\n a = mdb.models[myString].rootAssembly\r\n region1=a.instances['Plate'].surfaces['Plate_Surface']\r\n region2=a.instances['Cone_&_Shim-1'].sets['Set-SBPA-Edge']\r\n mdb.models[myString].SurfaceToSurfaceContactStd(name='Int-1', createStepName='Initial', master=region1, slave=region2, sliding=FINITE, thickness=ON, interactionProperty='IntProp-1', adjustMethod=NONE, initialClearance=OMIT, datumAxis=None, clearanceRegion=None)\r\n \r\n # Rigid Body Constraint - Tie\r\n \r\n region1=a.instances['Cone_&_Shim-1'].sets['Set-RP-2']\r\n region2=a.sets['RP-2']\r\n mdb.models[myString].RigidBody(name='Constraint-1', refPointRegion=region2, tieRegion=region1)\r\n \r\n region1=a.instances['Plate'].sets['Plate_2D']\r\n region2=a.sets['RP-1']\r\n mdb.models[myString].RigidBody(name='Constraint-2', refPointRegion=region2, tieRegion=region1)\r\n \r\n #-------------------------------------------------\r\n \r\n # SBPA modification - end\r\n \r\n #-------------------------------------------------\r\n \r\n Create_Analysis_Step(myString,\"Step-1\",\"Initial\",0.01,0.01,1E-005,300,ON)\r\n \r\n myID_1 = Create_Datum_Plane_by_Principal(XZPLANE,myPart_3,myString,myLength/2.0)\r\n myID_2 = Create_Datum_Plane_by_Principal(XYPLANE,myPart_3,myString,0.0)\r\n myID_3 = Create_Datum_Plane_by_Principal(YZPLANE,myPart_3,myString,0.0)\r\n \r\n Create_Boundary_Condition_by_Instance(myString,\"Cone_&_Shim-1\",\"Set-SBPA-Edge\",\"BC-Set-RP-1\",\"Initial\",SET,UNSET,SET,SET,SET,SET)\r\n Create_Boundary_Condition_by_Instance(myString,\"Cone_&_Shim-1\",\"Set-RP-2\",\"BC-Set-RP-2\",\"Initial\",SET,SET,SET,SET,SET,SET)\r\n Create_Boundary_Condition_by_RP(myString,\"RP-1\",\"Displacement_Load\",\"Step-1\",SET,1,SET,SET,SET,SET)\r\n \r\n Create_Partion_by_Plane_2D(myString,myPart_3,myID_2)\r\n myEdge = Create_Set_Edge((myRadius+myRadius_r)/2.0,myLength/2.0,0.0,myString,myPart_3,\"Set-Top-Edge\")\r\n Create_Partion_by_Plane_2D(myString,myPart_3,myID_3)\r\n Create_Partion_by_Plane_2D(myString,myPart_3,myID_1)\r\n \r\n #Create_Set_Vertice_2((myRadius+myRadius_r)/2.0,myLength/2.0,0.0,myString,\"Cone\",\"SPLA_Point\")\r\n #Create_Boundary_Condition_for_Assembly(myString,\"SPLA_Point\",\"SPDA-Imperfection\",\"Step-1\",-myPerturbation,UNSET,UNSET,UNSET,UNSET,UNSET)\r\n #Create_SPLA(myString,\"Cylinder-1\",\"SPLA_Point\",\"BC-Imperfection\",\"Step-1\",myPerturbation)\r\n #Create_Pressure_Load(myString,\"Cone-1-1\",\"External_Pressure\",\"Step-1\",\"Outer_Surface\",1.0)\r\n Create_Composite_Layup_2D(myString,myPart_3,\"Cone_2D\",\"Layup\",myPlyNumber,\"CFRP\",myThickness/myPlyNumber,myAngle)\r\n Create_Mesh_Shell(myString,myPart_3,Mesh_Size)\r\n Create_Mesh_Shell(myString,myPart_4,Mesh_Size)\r\n #------------------------------------------------------------------------------\r\n #------------------------------------------------------------------------------\r\n \r\n # create Job for analysis\r\n \r\n #------------------------------------------------------------------------------\r\n #------------------------------------------------------------------------------\r\n \r\n CreateJob(myString,myString,8)\r\n \r\n #SubmitJob(myString)\r\n \r\n #------------------------------------------------------------------------------\r\n #------------------------------------------------------------------------------\r\n \r\n # evaluate ABAQUS results\r\n \r\n #------------------------------------------------------------------------------\r\n #------------------------------------------------------------------------------\r\n \r\n N.append(Open_ODB_and_Write_NodeSet_data_to_text(myString,\"Step-1\",\"RF\",\"RP-1\",1))\r\n P.append(myPerturbation)\r\n \r\n Write_Variable_to_text(N,\"Buckling Load\")\r\n Write_Variable_to_text(P,\"Boundary Perturbation Height\")\r\n","sub_path":"Script_2D_SBPA_cone_001.py","file_name":"Script_2D_SBPA_cone_001.py","file_ext":"py","file_size_in_byte":30910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70688925","text":"def main():\n import sys\n\n import argparse\n parser = argparse.ArgumentParser(\n usage=\"%(prog)s [options] [-m] SCRIPT-OR-MODULE-TO-RUN [SCRIPT_ARGS]\"\n )\n parser.add_argument(\"-s\", \"--steal-output\", action=\"store_true\"),\n\n # note: we're implementing -m as a boolean flag, mimicking pdb's behavior,\n # and makes it possible without much fuss to support cases like:\n # python -m pudb -m http.server -h\n # where the -h will be passed to the http.server module\n parser.add_argument(\"-m\", \"--module\", action=\"store_true\",\n help=\"Debug as module or package instead of as a script\")\n\n parser.add_argument(\"-le\", \"--log-errors\", nargs=1, metavar=\"FILE\",\n help=\"Log internal errors to the given file\")\n parser.add_argument(\"--pre-run\", metavar=\"COMMAND\",\n help=\"Run command before each program run\",\n default=\"\")\n parser.add_argument(\"script_args\", nargs=argparse.REMAINDER,\n help=\"Arguments to pass to script or module\")\n\n options = parser.parse_args()\n args = options.script_args\n\n if options.log_errors:\n from pudb.lowlevel import setlogfile\n setlogfile(options.log_errors[0])\n\n options_kwargs = {\n \"pre_run\": options.pre_run,\n \"steal_output\": options.steal_output,\n }\n\n if len(args) < 1:\n parser.print_help()\n sys.exit(2)\n\n mainpyfile = args[0]\n sys.argv = args\n\n if options.module:\n from pudb import runmodule\n runmodule(mainpyfile, **options_kwargs)\n else:\n from os.path import exists\n if not exists(mainpyfile):\n print(\"Error: %s does not exist\" % mainpyfile, file=sys.stderr)\n sys.exit(1)\n\n from pudb import runscript\n runscript(mainpyfile, **options_kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pudb/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267360552","text":"\n\nimport datetime\n\nfrom django.db import connections\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\nfrom rest_framework.test import APIClient\n\nfrom dqa.views.api.scans import scan_post_update, scan_status, scan_order\n\n\nclass Testmetrics(TestCase):\n\n databases = ['metrics', 'metricsold', 'default']\n\n @classmethod\n def setUpClass(cls):\n super(Testmetrics, cls).setUpClass()\n\n @staticmethod\n def create_token():\n \"\"\" Generate an authorization token to test protected API paths \"\"\"\n test_user = User.objects.create_user(username='testuser1', password='123456')\n token_object = Token.objects.create(user=test_user)\n return token_object.key\n\n @staticmethod\n def clean_scan_tables():\n with connections['metricsold'].cursor() as cursor:\n sql = f\"TRUNCATE TABLE public.tblscanmessage CASCADE;\"\n cursor.execute(sql)\n sql = f\"TRUNCATE TABLE public.tblscan CASCADE;\"\n cursor.execute(sql)\n\n def test_scan_update(self):\n self.clean_scan_tables()\n output = {}\n status = scan_post_update(data=output)\n self.assertTrue(status.startswith('KeyError:'))\n output = {'start_date': datetime.date(2021, 1, 1),\n 'end_date': datetime.date(2021, 1, 15),\n 'priority': 55,\n 'network_filter': 'IU',\n 'station_filter': 'AAAA',\n 'location_filter': '00',\n 'last_updated': '2021-01-20 01:01',\n 'ordering': '4:55:2021-01-15'\n }\n status = scan_post_update(data=output)\n self.assertEqual(status, 201)\n url = reverse('scansapi')\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n resp_data = resp.data['data'][0]\n del resp_data['id']\n del resp_data['status']\n del resp_data['message']\n self.assertEqual(output, resp_data, msg=\"Scan update, return not same as saved\")\n\n def test_scans(self):\n self.clean_scan_tables()\n url = reverse('scansapi')\n output = {'start_date': datetime.date(2022, 1, 1),\n 'end_date': datetime.date(2022, 1, 15),\n 'priority': 47,\n 'network_filter': 'IU',\n 'station_filter': 'OMNA',\n 'location_filter': '00,10',\n 'last_updated': '2022-01-20 01:01',\n 'ordering': '4:47:2022-01-15'\n }\n # requires auth\n apiclient = APIClient()\n post_resp = apiclient.post(url, data=output, format='json')\n self.assertEqual(post_resp.status_code, 401)\n # POST with auth\n apiclient.credentials(HTTP_AUTHORIZATION='Token ' + self.create_token())\n post_resp = apiclient.post(url, data=output, format='json')\n self.assertEqual(post_resp.status_code, 201)\n # GET does not require auth\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n resp_data = resp.data['data'][0]\n del resp_data['id']\n del resp_data['status']\n del resp_data['message']\n self.assertEqual(output, resp_data, msg=\"Scan read, return not same as saved\")\n\n def test_scan_status(self):\n status = scan_status(False, False, \"\", None, None)\n self.assertEqual('Pending', status, msg=\"Error in Pending\")\n status = scan_status(True, False, \"\", None, None)\n self.assertEqual('Complete', status, msg=\"Error in Complete\")\n status = scan_status(False, True, \"\", None, None)\n self.assertEqual('Running', status, msg=\"Error in Running\")\n status = scan_status(False, True, \"Got lost\", None, None)\n self.assertEqual('Error', status, msg=\"Error in Error\")\n status = scan_status(False, True, \"Got lost\", 100, 50)\n self.assertEqual('Error: 50.0%', status, msg=\"Error in Error\")\n\n def test_scan_order(self):\n # order = scan_order(finished, taken, message, priority, end_date)\n order = scan_order(False, False, \"\", 99, \"2022-01-01\")\n self.assertEqual('4:99:2022-01-01', order, msg=\"Error scan order Pending\")\n order = scan_order(True, False, \"\", 98, \"2022-01-02\")\n self.assertEqual('0:98:2022-01-02', order, msg=\"Error scan order Complete\")\n order = scan_order(False, True, \"\", 97, \"2022-01-03\")\n self.assertEqual('7:97:2022-01-03', order, msg=\"Error scan order Running\")\n order = scan_order(False, True, \"Got really lost\", 96, \"2022-01-04\")\n self.assertEqual('9:96:2022-01-04', order, msg=\"Error scan order Error\")\n order = scan_order(True, True, \"\", 0, \"\")\n self.assertEqual('0:0:', order, msg=\"Error scan order scrambled\")\n","sub_path":"dqa/tests/api/test_api_scans.py","file_name":"test_api_scans.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262103772","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.patheffects as path_effects\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nimport math\n\nimport io\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n\n#subplot utils\ndef add_outline_path_effect(o, lw=4):\n o.set_path_effects([path_effects.Stroke(linewidth=lw, foreground='black'),\n path_effects.Normal()])\n \ndef add_text_to_subplot(ax, pos, label, size='x-large', color='white'):\n text = ax.text(pos[0], pos[1], label, size=size, weight='bold', color=color, va='top')\n add_outline_path_effect(text, 2)\n \ndef hide_subplot_axes(ax):\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\ndef plot_image_tensor_in_subplot(ax, img_tensor):\n ima= img_tensor.cpu().numpy().transpose((1,2,0))\n ax.imshow(ima)\n\ndef show_img_in_subplot(ax, pil_image):\n im = np.array(pil_image)\n ax.imshow(im)\n \ndef draw_bbox(ax, bbox, color='white'): \n #patches expects (x,y), w, h\n rect_patch = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],fill=False, lw=2, ec=color) \n patch = ax.add_patch(rect_patch)\n add_outline_path_effect(patch, 4)\n \ndef plot_bbox_annotation(ax, bb, cat_label):\n draw_bbox(ax, bb)\n add_text_to_subplot(ax, (bb[0], bb[1]), cat_label)\n\ndef tensor_to_scalar(t):\n if t.dim()==0:\n return t.item()\n else:\n return t.numpy() \n\n#bbox utils \ndef yxyx_to_xywh(ann):\n return [ann[1], ann[0], ann[3]-ann[1], ann[2]-ann[0]]\n\n \n#plots used in largest item classifier \ndef plot_trn_image_with_annotations(im_id, jpeg_dic, JPEG_DIR, annotations_dic, category_dic, figsize=(10, 10)):\n fig, ax = plt.subplots(1, figsize=figsize)\n show_img_in_subplot(ax, Image.open(JPEG_DIR/jpeg_dic[im_id]['file_name']))\n hide_subplot_axes(ax)\n \n annotations = [annotations_dic[im_id]] if(type(annotations_dic[im_id]) == tuple) else annotations_dic[im_id]\n \n for ann in annotations:\n plot_bbox_annotation(ax, ann[0], category_dic[ann[1]])\n \n plt.show()\n\ndef plot_horizontal_bar_chart(counts, labels, title='', x_tick_step=200): \n sorted_items = sorted(zip(counts, labels), reverse=True)\n sorted_counts, sorted_labels = zip(*sorted_items)\n \n y_pos = np.arange(len(sorted_labels))\n \n fig, ax = plt.subplots(figsize=(15,8))\n ax.barh(y_pos, sorted_counts)\n ax.set_yticks(y_pos)\n ax.set_yticklabels(sorted_labels)\n ax.set_xticks(range(0, int(sorted_counts[0]) + x_tick_step, x_tick_step))\n ax.invert_yaxis() \n ax.set_facecolor('#f7f7f7')\n ax.set_title(title)\n \n for idx, val in enumerate(ax.patches):\n x_value = val.get_width() + 5\n y_value = 0.1 + val.get_y() + val.get_height()/2\n ax.text(x_value, y_value, int(sorted_counts[idx]))\n\n plt.show() \n \ndef plot_model_predictions_on_sample_batch(batch, pred_labels, actual_labels, get_label_fn, n_items=12, plot_from=0, figsize=(16,12)):\n n_rows, n_cols = (1,n_items) if n_items<=4 else (math.ceil(n_items/4), 4)\n \n fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize)\n for i,ax in enumerate(axes.flat):\n plot_idx = plot_from + i\n plot_image_tensor_in_subplot(ax, batch[plot_idx])\n\n pred_label = get_label_fn(tensor_to_scalar(pred_labels[plot_idx])) \n actual_label = get_label_fn(tensor_to_scalar(actual_labels[plot_idx])) \n\n hide_subplot_axes(ax)\n add_text_to_subplot(ax, (0,0), 'Pred: '+pred_label)\n add_text_to_subplot(ax, (0,30), 'Actual: '+actual_label, color='yellow')\n\n plt.tight_layout()\n \n \n# plots used in multi class classifier \ndef add_bar_height_labels(ax, rects):\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off\n\n for rect in rects:\n height = int(rect.get_height())\n ax.text(rect.get_x() + rect.get_width()*offset['center'], 1.01*height,\n '{}'.format(height), ha=ha['center'], va='bottom')\n \ndef plot_class_wise_preds_gt_true_preds(predictions, actual_instances, correct_predictions, categories): \n ind = np.arange(len(predictions)) # the x locations for the groups\n width = 0.3 # the width of the bars\n\n fig, ax = plt.subplots()\n fig.set_size_inches((25,10))\n\n rects1 = ax.bar(ind - width, predictions, width, \n label='Total class predictions')\n\n rects2 = ax.bar(ind, correct_predictions, width, \n label='Correct class predictions')\n \n rects3 = ax.bar(ind + width, actual_instances, width, \n label='Actual class instances')\n\n ax.set_xticks(ind)\n ax.set_xticklabels(categories)\n ax.legend()\n\n add_bar_height_labels(ax, rects1)\n add_bar_height_labels(ax, rects2)\n add_bar_height_labels(ax, rects3)\n plt.show()\n\ndef plot_class_precision_recall_curve(id, cats, ds_gt_label_logits, ds_pred_scores): \n sk_y_true, sk_y_pred = ds_gt_label_logits[:,id].numpy(), ds_pred_scores[:,id].numpy()\n \n average_precision = average_precision_score(sk_y_true, sk_y_pred)\n precision, recall, thresholds = precision_recall_curve(sk_y_true, sk_y_pred)\n \n plt.step(recall, precision, color='b', alpha=0.2,\n where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2,\n color='b')\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('Precision-Recall curve for class \"'+cats[id]+'\" : AP={0:0.2f}'.format(average_precision))\n\ndef get_graph_data_for_multi_class_pr_curves(ds_gt_label_logits, ds_pred_scores, n_classes): \n precision = dict()\n recall = dict()\n average_precision = dict()\n\n for i in range(n_classes):\n precision[i], recall[i], _ = precision_recall_curve(ds_gt_label_logits[:, i].numpy(), ds_pred_scores[:, i].numpy())\n average_precision[i] = average_precision_score(ds_gt_label_logits[:, i].numpy(), ds_pred_scores[:, i].numpy())\n\n\n # A \"micro-average\": quantifying score on all classes jointly\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(ds_gt_label_logits.numpy().ravel(), ds_pred_scores.numpy().ravel())\n average_precision[\"micro\"] = average_precision_score(ds_gt_label_logits.numpy(), ds_pred_scores.numpy(), average=\"micro\")\n\n print('Average precision score, micro-averaged over all classes: {0:0.2f}'\n .format(average_precision[\"micro\"]))\n \n return precision, recall, average_precision\n\ndef plot_average_precision_score_over_all_classes(precision, recall, average_precision):\n plt.figure()\n plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,\n where='post')\n plt.fill_between(recall[\"micro\"], precision[\"micro\"], step='post', alpha=0.2,\n color='b')\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title(\n 'Average precision score, micro-averaged over all classes: AP={0:0.2f}'\n .format(average_precision[\"micro\"]))\n plt.show()\n\ndef plot_precision_recall_curves_for_multi_class_labels(precision, recall, average_precision, cats):\n n_classes = len(cats)\n \n plt.figure(figsize=(20, 20))\n lines = []\n labels = []\n \n l, = plt.plot(recall[\"micro\"], precision[\"micro\"], color='gold', lw=2)\n lines.append(l)\n labels.append('micro-average Precision-recall (area = {0:0.2f})'\n ''.format(average_precision[\"micro\"]))\n\n \n for i in range(n_classes):\n color_RGB = np.random.rand(3)\n l, = plt.plot(recall[i], precision[i], color=color_RGB, lw=2, label=cats[i])\n lines.append(l)\n labels.append('\"{0}\" (area = {1:0.2f})'\n ''.format(cats[i], average_precision[i]))\n\n fig = plt.gcf()\n fig.subplots_adjust(bottom=0.25)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Precision Recall curves for all classes')\n plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=15))\n\n plt.show()\n\ndef plot_precision_recall_vs_threshold_for_all_classes(n_classes, ds_gt_label_logits, ds_pred_scores, cats):\n n_rows, n_cols = (1, n_classes) if n_classes < 4 else (math.ceil(n_classes/4), 4)\n \n fig, axes = plt.subplots(n_rows, n_cols, figsize=(16, 16))\n for i,ax in enumerate(axes.flat):\n precisions, recalls, thresholds = precision_recall_curve(ds_gt_label_logits[:, i].numpy(), ds_pred_scores[:,i].numpy())\n class_label = cats[i]\n\n ax.set_title('Class: \"'+class_label+'\"')\n ax.plot(thresholds, precisions[:-1], 'b--', label='precision')\n ax.plot(thresholds, recalls[:-1], 'g--', label = 'recall')\n ax.set_ylim([0,1])\n\n if i==0:\n ax.set_xlabel('Threshold')\n ax.legend(loc='upper left')\n\n plt.tight_layout()\n plt.show() \n \n\n#plots used in largest item bbox \ndef plot_image_with_bbox(img_tensor, bbox_yxyx):\n bbox = yxyx_to_xywh(bbox_yxyx)\n \n fig, ax = plt.subplots(1)\n plot_image_tensor_in_subplot(ax, img_tensor)\n draw_bbox(ax, bbox)\n hide_subplot_axes(ax)\n plt.show() \n \ndef plot_bbox_model_predictions_on_sample_batch(batch, pred_labels, actual_labels, n_items=12, plot_from=0, figsize=(16,12)):\n n_rows, n_cols = (1,n_items) if n_items<=4 else (math.ceil(n_items/4), 4) \n fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize)\n \n for i,ax in enumerate(axes.flat):\n plot_idx = plot_from + i\n \n pred_bbox = [int(x) for x in yxyx_to_xywh(pred_labels[plot_idx])]\n actual_bbox = yxyx_to_xywh(actual_labels[plot_idx])\n \n plot_image_tensor_in_subplot(ax, batch[plot_idx])\n draw_bbox(ax, pred_bbox)\n draw_bbox(ax, actual_bbox, color='yellow')\n \n add_text_to_subplot(ax, (pred_bbox[0], pred_bbox[1]), 'Pred:')\n add_text_to_subplot(ax, (actual_bbox[0], actual_bbox[1]), 'Actual:', color='yellow')\n \n hide_subplot_axes(ax)\n \n plt.tight_layout() \n \n \n#plots used in Concat model, largest item bbox plus classifier\ndef plot_concat_model_predictions_on_sample_batch(batch, pred_labels, actual_labels, get_label_fn, n_items=12, plot_from=0, figsize=(16,12)):\n pred_bboxes, pred_cat_ids = pred_labels\n actual_bboxes, actual_cat_ids = actual_labels\n \n n_rows, n_cols = (1,n_items) if n_items<=4 else (math.ceil(n_items/4), 4) \n fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize)\n \n for i,ax in enumerate(axes.flat):\n plot_idx = plot_from + i\n \n pred_bbox = [int(x) for x in yxyx_to_xywh(pred_bboxes[plot_idx])]\n actual_bbox = yxyx_to_xywh(actual_bboxes[plot_idx])\n \n plot_image_tensor_in_subplot(ax, batch[plot_idx])\n draw_bbox(ax, pred_bbox)\n draw_bbox(ax, actual_bbox, color='yellow')\n \n add_text_to_subplot(ax, (pred_bbox[0], pred_bbox[1]), 'Pred:'+get_label_fn(tensor_to_scalar(pred_cat_ids[plot_idx])))\n add_text_to_subplot(ax, (actual_bbox[0], actual_bbox[1]), 'Actual:'+get_label_fn(tensor_to_scalar(actual_cat_ids[plot_idx])) , color='yellow')\n hide_subplot_axes(ax)\n \n plt.tight_layout()\n\ndef plot_model_result_on_test_image(pred_bbox, pred_cat_id, get_label_fn, im_path):\n im = Image.open(im_path)\n w,h = im.size\n fig, ax = plt.subplots(1, 1, dpi = 100)\n \n bbox = pred_bbox[0].clone()\n bbox = bbox/224\n bbox[0] = bbox[0]*h\n bbox[1] = bbox[1]*w\n bbox[2] = bbox[2]*h\n bbox[3] = bbox[3]*w\n \n pred_bbox = [int(x) for x in yxyx_to_xywh(bbox)]\n \n show_img_in_subplot(ax, im)\n draw_bbox(ax, pred_bbox)\n\n add_text_to_subplot(ax, (pred_bbox[0], pred_bbox[1]), 'Pred:'+get_label_fn(tensor_to_scalar(pred_cat_id[0])))\n hide_subplot_axes(ax)\n\n plt.tight_layout()\n plt.show()\n\ndef get_result_on_test_image(pred_bbox, pred_cat_id, get_label_fn, im):\n w,h = im.size\n fig, ax = plt.subplots(1, 1, dpi = 100)\n \n bbox = pred_bbox[0].clone()\n bbox = bbox/224\n bbox[0] = bbox[0]*h\n bbox[1] = bbox[1]*w\n bbox[2] = bbox[2]*h\n bbox[3] = bbox[3]*w\n \n pred_bbox = [int(x) for x in yxyx_to_xywh(bbox)]\n \n show_img_in_subplot(ax, im)\n draw_bbox(ax, pred_bbox)\n\n add_text_to_subplot(ax, (pred_bbox[0], pred_bbox[1]), 'Pred:'+get_label_fn(tensor_to_scalar(pred_cat_id[0])))\n hide_subplot_axes(ax)\n\n #canvas = FigureCanvas(fig)\n #output = io.BytesIO()\n #canvas.print_png(output)\n \n buf = io.BytesIO()\n plt.savefig(buf, format='png', bbox_inches='tight')\n buf.seek(0)\n\n #plt.tight_layout()\n #buf = io.BytesIO()\n #plt.savefig(buf, format='png', bbox_inches='tight')\n #s = buf.getvalue()\n #buf.close()\n return buf\n","sub_path":"test/libs/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":13028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454568003","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 04/16/2016\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\nimport itertools\n\n__author__ = 'Nathan Starkweather'\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n_h = logging.StreamHandler()\n_f = logging.Formatter(\"%(created)s %(name)s %(levelname)s (%(lineno)s): %(message)s\")\n_h.setFormatter(_f)\nlogger.addHandler(_h)\nlogger.propagate = False\nlogger.setLevel(logging.DEBUG)\ndel _h, _f\n\nfrom matplotlib.backends import backend_tkagg\nfrom matplotlib.backend_bases import FigureManagerBase\nfrom matplotlib.ticker import NullFormatter, NullLocator\nfrom matplotlib.transforms import Bbox\n\nimport tkinter as tk\nfrom . import queues\n\n\nclass PlotWidgetError(Exception):\n pass\n\n\nclass _PlotWidgetFigManager(FigureManagerBase):\n pass\n\n\nclass RTPlotWidget():\n _fig_counter = itertools.count(1)\n\n __slots__ = 'frame', 'master', 'num', 'figmanager', 'figure', \\\n 'figcanvas', 'subplot', 'line', 'renderer', 'x_data', \\\n 'y_data', 'tkcanvas', 'background', 'xaxis', 'yaxis', 'all_bbox', \\\n '_setup', 'xlabel', 'ylabel', \"title_text\"\n\n def __init__(self, master, max_pts=1000):\n self.frame = tk.Frame(master)\n self.master = master\n self.num = next(self._fig_counter)\n self.figmanager = None\n self.figure = None\n self.figcanvas = None\n self.subplot = None\n self.line = None\n self.renderer = None\n self.xaxis = None\n self.yaxis = None\n self.tkcanvas = None\n self.background = None\n self.all_bbox = None\n\n if max_pts is None:\n self.x_data = queues.InfiniteBuffer()\n self.y_data = queues.InfiniteBuffer()\n else:\n self.x_data = queues.RingBuffer(max_pts)\n self.y_data = queues.RingBuffer(max_pts)\n\n self._setup = False\n\n def _cache_background(self, bbox):\n\n xaxis = self.xaxis\n yaxis = self.yaxis\n\n # cache formatters & locators\n xmjf = xaxis.get_major_formatter()\n xmjl = xaxis.get_major_locator()\n ymjf = yaxis.get_major_formatter()\n ymjl = yaxis.get_major_locator()\n\n # set null placeholders\n xaxis.set_major_formatter(NullFormatter())\n xaxis.set_major_locator(NullLocator())\n yaxis.set_major_formatter(NullFormatter())\n yaxis.set_major_locator(NullLocator())\n\n # cache background\n self.figcanvas.draw()\n background = self.figure.canvas.copy_from_bbox(bbox)\n\n # restore\n xaxis.set_major_formatter(xmjf)\n xaxis.set_major_locator(xmjl)\n yaxis.set_major_formatter(ymjf)\n yaxis.set_major_locator(ymjl)\n\n self.background = background\n\n def create_figure(self, figsize=(4, 2), dpi=None, facecolor=None,\n edgecolor=None, linewidth=0.0, frameon=None,\n subplotpars=None, tight_layout=None):\n \"\"\" Create an ordinary matplotlib figure. \"\"\"\n if self.figure:\n raise PlotWidgetError(\"Cannot override existing figure.\")\n f = backend_tkagg.Figure(figsize, dpi, facecolor, edgecolor, linewidth, frameon, subplotpars, tight_layout)\n self.figure = f\n return f\n\n def setup(self):\n if self.figure is None:\n self.create_figure()\n\n if self.subplot is None:\n self.create_subplot(1, 1, 1)\n\n # Explicitly named to avoid confusion later\n self.figcanvas = backend_tkagg.FigureCanvasTkAgg(self.figure, self.frame)\n self.tkcanvas = self.figcanvas._tkcanvas\n self.figmanager = _PlotWidgetFigManager(self.figcanvas, self.num)\n\n # initial draw to prepare plot background\n self.renderer = self.figcanvas.get_renderer()\n\n self.xaxis = xaxis = self.subplot.xaxis\n self.yaxis = yaxis = self.subplot.yaxis\n\n all_bbox = self._calculate_bbox()\n\n self.xlabel = xaxis.set_label_text(\"\")\n self.ylabel = yaxis.set_label_text(\"\")\n self.title_text = self.subplot.title\n self.title_text.set_text(\"\")\n\n self.figure.subplots_adjust(top=0.85, left=0.20, bottom=0.25)\n self._cache_background(all_bbox)\n self.all_bbox = all_bbox\n\n # this line must come after background is cached\n self.line, = self.subplot.plot(self.x_data.get(), self.y_data.get())\n self._setup = True\n\n def _calculate_bbox(self):\n r = self.renderer\n bboxes = self.xaxis.get_window_extent(r), self.yaxis.get_window_extent(r), self.subplot.bbox\n all_bbox = Bbox.union(bboxes)\n (x0, y0), (x1, y1) = all_bbox.get_points()\n w = x1 - x0\n h = y1 - y0\n all_bbox = Bbox.from_bounds(x0, y0, w * 1.02, h * 1.02)\n return all_bbox\n\n def _handle_text_update(self):\n self.all_bbox = self._calculate_bbox()\n self._cache_background(self.all_bbox)\n self._schedule_update()\n\n def set_xaxis(self, **kw):\n self.xlabel.set(**kw)\n self._handle_text_update()\n\n def set_xaxis_text(self, s):\n self.xlabel.set_text(s)\n self._handle_text_update()\n\n def set_yaxis(self, **kw):\n self.ylabel.set(**kw)\n self._handle_text_update()\n\n def set_yaxis_text(self, s):\n self.ylabel.set_text(s)\n self._handle_text_update()\n\n def set_title_text(self, s):\n self.title_text.set_text(s)\n self._handle_text_update()\n\n def create_subplot(self, *args, **kw):\n if self.subplot:\n raise PlotWidgetError(\"Cannot override existing subplot\")\n self.subplot = s = self.figure.add_subplot(*args, **kw)\n return s\n\n def grid(self, row=None, col=None):\n if not self._setup:\n self.setup()\n self.frame.grid(row=row, column=col)\n self.tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.frame.after(0, self._periodic_draw)\n\n def _periodic_draw(self):\n self.figure.draw(self.renderer)\n self.frame.after(5000, self._periodic_draw)\n\n def grid_forget(self):\n self.tkcanvas.grid_forget()\n self.frame.grid_forget()\n\n def set_data(self, x, y):\n self.x_data.clear()\n self.y_data.clear()\n self.x_data.extend(x)\n self.y_data.extend(y)\n self._schedule_update_data()\n\n def _schedule_update_data(self):\n self.frame.after(0, self._update_data)\n\n def extend_xy(self, x, y):\n self.x_data.extend(x)\n self.y_data.extend(y)\n self._schedule_update_data()\n\n def add_xy(self, x, y):\n self.x_data.put(x)\n self.y_data.put(y)\n self._schedule_update_data()\n\n def _update_data(self):\n self.line.set_data(self.x_data.get(), self.y_data.get())\n self.subplot.relim()\n self.subplot.autoscale_view(True, True, True)\n lower, upper = self.subplot.get_ybound()\n self.subplot.set_ylim(lower, upper + (upper - lower) * 0.02, True, None)\n self._update()\n\n def _schedule_update(self):\n self.frame.after(0, self._update)\n\n def _update(self):\n # self.figcanvas.restore_region(self.background)\n # r = self.renderer\n # self.xaxis.draw(r)\n # self.yaxis.draw(r)\n # self.line.draw(r)\n # self.figcanvas.blit(self.all_bbox)\n self.figcanvas.draw()\n","sub_path":"simplertplot/plotwidget.py","file_name":"plotwidget.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650196839","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.units import Quantity\nfrom astropy.tests.helper import assert_quantity_allclose, pytest\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ...spectrum import (\n LogEnergyAxis,\n integrate_spectrum,\n)\nfrom ..powerlaw import power_law_energy_flux, power_law_evaluate, power_law_flux\nfrom ..models import ExponentialCutoffPowerLaw\n\n\n@requires_dependency('scipy')\ndef test_LogEnergyAxis():\n from scipy.stats import gmean\n energy = Quantity([1, 10, 100], 'TeV')\n energy_axis = LogEnergyAxis(energy)\n\n energy = Quantity(gmean([1, 10]), 'TeV')\n pix = energy_axis.wcs_world2pix(energy.to('MeV'))\n assert_allclose(pix, 0.5)\n\n world = energy_axis.wcs_pix2world(pix)\n assert_quantity_allclose(world, energy)\n\n\ndef test_integrate_spectrum():\n \"\"\"\n Test numerical integration against analytical solution.\n \"\"\"\n e1 = Quantity(1, 'TeV')\n e2 = Quantity(10, 'TeV')\n einf = Quantity(1E10, 'TeV')\n e = Quantity(1, 'TeV')\n g = 2.3\n I = Quantity(1E-12, 'cm-2 s-1')\n\n ref = power_law_energy_flux(I=I, g=g, e=e, e1=e1, e2=e2)\n norm = power_law_flux(I=I, g=g, e=e, e1=e1, e2=einf)\n f = lambda x: x * power_law_evaluate(x, norm, g, e)\n val = integrate_spectrum(f, e1, e2)\n assert_quantity_allclose(val, ref)\n\n\n@requires_dependency('uncertainties')\ndef test_integrate_spectrum():\n \"\"\"\n Test numerical integration against analytical solution.\n \"\"\"\n from uncertainties import unumpy\n e1 = 1.\n e2 = 10.\n einf = 1E10\n e = 1.\n g = unumpy.uarray(2.3, 0.2)\n I = unumpy.uarray(1E-12, 1E-13)\n\n ref = power_law_energy_flux(I=I, g=g, e=e, e1=e1, e2=e2)\n norm = power_law_flux(I=I, g=g, e=e, e1=e1, e2=einf)\n f = lambda x: x * power_law_evaluate(x, norm, g, e)\n val = integrate_spectrum(f, e1, e2)\n\n assert_allclose(unumpy.nominal_values(val), unumpy.nominal_values(ref))\n assert_allclose(unumpy.std_devs(val), unumpy.std_devs(ref))\n\n\n@pytest.mark.xfail(reason='Spectral models cannot handle ufuncs properly')\n@requires_dependency('uncertainties')\ndef test_integrate_spectrum_ecpl():\n \"\"\"\n Test ecpl integration. Regression test for\n https://github.com/gammapy/gammapy/issues/687\n \"\"\"\n from uncertainties import unumpy\n amplitude = unumpy.uarray(1E-12, 1E-13)\n index = unumpy.uarray(2.3, 0.2)\n reference = 1\n lambda_ = 0.1\n ecpl = ExponentialCutoffPowerLaw(index, amplitude, reference, lambda_)\n emin, emax = 1, 1E10\n val = ecpl.integral(emin, emax)\n\n assert_allclose(unumpy.nominal_values(val), 5.956578235358054e-13)\n assert_allclose(unumpy.std_devs(val), 9.278302514378108e-14)\n","sub_path":"gammapy/spectrum/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232815750","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 12 15:53:05 2017\n\n@author: Dieter Erben\n\"\"\"\n\nimport pandas as pd, numpy as np\nimport statsmodels.api as sm\n\nreturns = pd.read_csv('return.csv', na_values=\"C\").dropna()\nreturns.date = returns.date//100\n\nfama = pd.read_csv('fama.csv')\nmomentum = pd.read_csv('mom.csv')\n\nmodel = pd.merge(fama,momentum,how='inner',on='date')\nboth = pd.merge(returns,model,how='inner',on='date')\n\nhonda = both[both.PERMCO == 2172]\nford = both[both.PERMCO == 20750]\ntoyota = both[both.PERMCO == 4521]\ngm1 = both[both.PERMCO == 20799]\ngm2 = both[both.PERMCO == 53554]\n\ncompanies = [honda,ford,toyota,gm1,gm2]\n\ndef syn(company):\n company['dep'] = company.RET - company.RF\n\n x = company[['SMB','HML','Mom','Mkt-RF']]\n y = company.dep\n model = sm.OLS(y, x).fit()\n model.summary()\n company['predict'] = model.predict(x)\n company['residual'] = company.RET - company.predict - company.RF\n\n company = company[['PERMNO','date','COMNAM','PERMCO',\n 'RET','RF','predict','residual']]\n company['ret-rf'] = company.RET - company.RF\n company['residual+predict'] = company.residual + company.predict\n company['ret-expret'] = company.residual + company.RF\n company['1+(ret-exp)'] = 1 + company['ret-expret']\n company['CAR'] = np.log(company['1+(ret-exp)'])\n company['Ret'] = np.log(1+company['RET'])\n company = company[['PERMNO','date','COMNAM','PERMCO',\n 'CAR','Ret']]\n return company\n\nfinal = [syn(x) for x in companies]\n\nfinal = pd.concat(final)\nfinal.to_csv('output.csv',index=False)\n\n#sample = both[:10]\n#sample['dep'] = sample.RET - sample.RF\n#\n#sample['ind1'] = sample.SMB\n#sample['ind2'] = sample.HML\n#sample['ind3'] = sample['Mom']\n#\n#sample2 = sample[['PERMNO','dep',\n# 'ind1','ind2','ind3']]\n#\n#x = sample2[['ind1','ind2','ind3']]\n#y = sample2.dep\n","sub_path":"returns/rets.py","file_name":"rets.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"413864993","text":"import pytest\n\n\nimport software.python_bindings as tbots\nfrom proto.validation_pb2 import *\n\n\nclass Validation(object):\n\n \"\"\"A validation function\"\"\"\n\n def get_validation_status(self, world) -> ValidationStatus:\n raise NotImplementedError(\"get_validation_status is not implemented\")\n\n def get_validation_type(self, world) -> ValidationType:\n raise NotImplementedError(\"get_validation_type is not implemented\")\n\n def get_validation_geometry(self, world) -> ValidationGeometry:\n raise NotImplementedError(\"get_validation_geometry is not implemented\")\n\n def __repr__(self):\n return \"String representation of validation not implemented\"\n\n\ndef create_validation_types(validation_class):\n \"\"\"Given a Validation implementation that returns ValidationStatus.PASSING\n when true and ValidationStatus.FAILING when false, create the 4 validation\n types with different visualization/passing/failing properties (described below)\n \n ┌───────────────────────┐ ┌─────────────────┐\n │ │──────► EventuallyTrue │\n │ │ └─────────────────┘\n │ │\n │ │ ┌─────────────────┐\n │ ├──────► EventuallyFalse │\n ┌─────────────────┐ │ │ └─────────────────┘\n │ Validation ├──────►create_validation_types│\n └─────────────────┘ │ │ ┌─────────────────┐\n │ ├──────► AlwaysTrue │\n │ │ └─────────────────┘\n │ │\n │ │ ┌─────────────────┐\n │ ├──────► AlwaysFalse │\n └───────────────────────┘ └─────────────────┘\n\n EventuallyTrue: Has to be true before the end of the test.\n EventuallyFalse: Has to be false before the end of the test. Inverts the\n validation so passing becomes failing (and vice versa)\n AlwaysTrue: Has to be true for the duration of the test.\n AlwaysFalse: Has to be false for the duration of the test.\n\n NOTE: EventuallyFalse validation is a flipped EventuallyTrue validation\n AlwaysTrue validation checks the same condition, but needs to be\n always true. AlwaysFalse is the flipped AlwaysTrue\n\n :param eventually_true: A validation function that is eventually_true\n :returns: EventuallyTrueValidation, EventuallyFalseValidation,\n AlwaysTrueValidation, AlwaysFalseValidation\n \"\"\"\n\n def constructor(self, *args, **kwargs):\n \"\"\"The 4 validation outputs will be composed of the input validation\n\n :param args/kwargs: Pass through to the validation_class\n\n \"\"\"\n self.validation = validation_class(*args, **kwargs)\n\n def flip_validation(self, world):\n \"\"\"Flip the validation status\n\n :param world: The world msg to validate on\n\n \"\"\"\n\n return {\n ValidationStatus.FAILING: ValidationStatus.PASSING,\n ValidationStatus.PASSING: ValidationStatus.FAILING,\n }[self.validation.get_validation_status(world)]\n\n # Generate the types: specifically, all Eventually validations will return\n # EVENTUALLY when get_validation_type is called, and all Always validations\n # will return ALWAYS. get_validation_status is inverted for the _False types.\n # We simply pass the validation_geometry from the validation object through.\n common = {\n \"__init__\": constructor,\n \"get_validation_geometry\": lambda self, world: self.validation.get_validation_geometry(\n world\n ),\n }\n\n eventually_true = type(\n \"EventuallyTrueValidation\",\n (Validation,),\n {\n **common,\n \"__repr__\": lambda self: \"EventuallyTrueValidation: \"\n + repr(self.validation),\n \"get_validation_type\": lambda self: ValidationType.EVENTUALLY,\n \"get_validation_status\": lambda self, world: self.validation.get_validation_status(\n world\n ),\n },\n )\n\n eventually_false = type(\n \"EventuallyFalseValidation\",\n (Validation,),\n {\n **common,\n \"__repr__\": lambda self: \"EventuallyFalseValidation: \"\n + repr(self.validation),\n \"get_validation_type\": lambda self: ValidationType.EVENTUALLY,\n \"get_validation_status\": lambda self, world: flip_validation(self, world),\n },\n )\n\n always_true = type(\n \"AlwaysTrueValidation\",\n (Validation,),\n {\n **common,\n \"__repr__\": lambda self: \"AlwaysTrueValidation: \" + repr(self.validation),\n \"get_validation_type\": lambda self: ValidationType.ALWAYS,\n \"get_validation_status\": lambda self, world: self.validation.get_validation_status(\n world\n ),\n },\n )\n\n always_false = type(\n \"AlwaysFalseValidation\",\n (Validation,),\n {\n **common,\n \"__repr__\": lambda self: \"AlwaysFalseValidation: \" + repr(self.validation),\n \"get_validation_type\": lambda self: ValidationType.ALWAYS,\n \"get_validation_status\": lambda self, world: flip_validation(self, world),\n },\n )\n\n return eventually_true, eventually_false, always_true, always_false\n\n\ndef run_validation_sequence_sets(\n world, eventually_validation_sequence_set, always_validation_sequence_set\n):\n \"\"\"Given both eventually and always validation sequence sets, (and world)\n run validation and aggregate the results in a validation proto set.\n\n :raises AssertionError: If the test fails\n :param world: World to validate with\n :param eventually_validation_sequence_set:\n A collection of sequences of eventually validations to validate.\n :param always_validation_sequence_set:\n A collection of sequences of always validations to validate.\n\n :returns: Eventually ValidationProtoSet, Always ValidationProtoSet\n\n \"\"\"\n\n # Proto that stores validation geometry and validation status of\n # all validations passed in\n always_validation_proto_set = ValidationProtoSet()\n eventually_validation_proto_set = ValidationProtoSet()\n\n def create_validation_proto_helper(validation_proto_set, validation):\n \"\"\"Helper function that computes the status and creates a\n validation_proto, and updates it in the validation_proto_set.\n\n :param validation_proto_set: The validation proto set to add to\n :param validation: The validation to put into the proto\n\n \"\"\"\n # Stores the validation result\n validation_proto = ValidationProto()\n\n # Get status\n status = validation.get_validation_status(world)\n\n # Create validation proto\n validation_proto.status = status\n validation_proto.failure_msg = str(validation) + \" failed\"\n validation_proto.validation_type = validation.get_validation_type()\n validation_proto.geometry.CopyFrom(validation.get_validation_geometry(world))\n\n validation_proto_set.validations.append(validation_proto)\n\n return status\n\n # Validate the eventually validations. Eventually valids\n for validation_sequence in list(eventually_validation_sequence_set):\n for validation in validation_sequence:\n\n # Add to validation_proto_set and get status\n status = create_validation_proto_helper(\n eventually_validation_proto_set, validation\n )\n\n # If the current validation is failing, we don't care about\n # the next one. Keep evaluating until this one passes.\n if status == ValidationStatus.FAILING:\n break\n\n # If the validation has passed, remove it from the set.\n if status == ValidationStatus.PASSING:\n validation_sequence.remove(validation)\n continue\n\n # Validate the always validations. We need to look at all of them\n for validation_sequence in always_validation_sequence_set:\n for validation in validation_sequence:\n create_validation_proto_helper(always_validation_proto_set, validation)\n\n return eventually_validation_proto_set, always_validation_proto_set\n\n\ndef check_validation(validation_proto_set):\n \"\"\"Check validation and make sure its always true\n\n :param validation_proto_set: Validation proto set\n :raises: AssertionError\n\n \"\"\"\n for validation_proto in validation_proto_set.validations:\n if validation_proto.status == ValidationStatus.FAILING:\n raise AssertionError(validation_proto.failure_msg)\n\n\ndef create_validation_geometry(geometry=[]) -> ValidationGeometry:\n \"\"\"Creates a ValidationGeometry which is a visual representation of the\n validation to be rendered as either green (PASSING) or red (FAILING)\n\n Given a list of (vectors, polygons, circles), creates a ValidationGeometry\n proto containing the protobuf representations.\n\n :param geometry: A list of geom\n :returns: ValidationGeometry\n\n \"\"\"\n\n validation_geometry = ValidationGeometry()\n\n CREATE_PROTO_DISPATCH = {\n tbots.Vector.__name__: tbots.createVectorProto,\n tbots.Polygon.__name__: tbots.createPolygonProto,\n tbots.Rectangle.__name__: tbots.createPolygonProto,\n tbots.Circle.__name__: tbots.createCircleProto,\n }\n\n ADD_TO_VALIDATION_GEOMETRY_DISPATCH = {\n tbots.Vector.__name__: validation_geometry.vectors.append,\n tbots.Polygon.__name__: validation_geometry.polygons.append,\n tbots.Rectangle.__name__: validation_geometry.polygons.append,\n tbots.Circle.__name__: validation_geometry.circles.append,\n }\n\n for geom in geometry:\n ADD_TO_VALIDATION_GEOMETRY_DISPATCH[type(geom).__name__](\n CREATE_PROTO_DISPATCH[type(geom).__name__](geom)\n )\n\n return validation_geometry\n","sub_path":"src/software/simulated_tests/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":10891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270011894","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='main-homepage'),\n path('help', views.help_view, name='main-help'),\n path('about', views.about_view, name='main-about'),\n path('index', views.IndexView.as_view(), name='index'),\n path('api-data/', views.get_data, name='api-data'),\n path('api-params-data/', views.get_params_data, name='api-params-data'),\n]\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"132956876","text":"\"\"\"\nChallenge 001 - Join to Range\nAuthor: Philip Mannering\nDate: 2018-09-07\n\"\"\"\n\n\nimport pandas as pd\n\n# Read the files\nranges = pd.read_csv(\"./files/range.csv\")\ncustomer = pd.read_csv(\"./files/customer.csv\", index_col=-1)\noutput = pd.read_csv(\"../files/output.csv\")\n\n#%% Generate the rows\n\ndf = ranges\n\n# Split out start and end of range and add to the dataframe\ndf[['rng1','rng2']] = df.Range.str.extract('(\\d+)-(\\d+)').astype(int)\n\n# Transpose the new columns\ndf = df.melt(id_vars=df.columns[:-2])\n\n# Set the index to value\ndf = df.set_index('value').sort_index()\n\n# Expand range - this is the generate rows bit\ndf = df.reindex(range(df.index.min(),df.index.max()+1), method = 'ffill')\n\n# =============================================================================\n# Should really fill in the range by this group\n# dg = df.groupby('Region')\n# df = dg.apply(lambda x: x.set_index(x['value'])\n# .reindex(range(x.value.min(),x.value.max()+1)))\n# =============================================================================\n\n#df = df.reset_index().drop(columns = ['Range','Expect Revenue','variable'])\n\n#%% Join\ndf = df.join(customer)\n\n#%% Summarize\n\n# Fields to groupby \nkey = ['Region','Sales Rep', 'Responder']\n\n# Group\ndf = df.groupby(key, as_index=False)['Customer ID'].count()\ndf = df.rename(columns={'Customer ID':'Count'})\n\n\n#%% Check answer\n(df == output).all()","sub_path":"Challenge-001-Join-to-Range/P-Man/Challenge-001-Join-to-Range.py","file_name":"Challenge-001-Join-to-Range.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638628717","text":"# -*- coding: utf-8 -*-\nimport Adafruit_DHT\nimport sys\n\n# Set sensor type : Options are DHT11,DHT22 or AM2302\nsensor=Adafruit_DHT.DHT11\n\n# Set GPIO sensor is connected to\ndht_pin=int(sys.argv[1])\n\n# Use read_retry method. This will retry up to 15 times to\n# get a sensor reading (waiting 2 seconds between each retry).\n# humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)\n\n# Reading the DHT11 is very sensitive to timings and occasionally\n# the Pi might fail to get a valid reading. So check if readings are valid.\ndef get_DHT():\n H, T = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, dht_pin)\n if H is not None and T is not None:\n return T, H\n else:\n print(\"wrong\")\n return 20, 60 # default fake value\n\nprint(get_DHT())","sub_path":"Raspi/test_DHT22.py","file_name":"test_DHT22.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564657625","text":"# name : slip \n# date : 2019/5/17 10:36 \n# e-mail : slip1233@126.com\n\nimport socket\n\nclient = socket.socket()\n\nclient.connect(('192.168.1.124', 7788))\n\nwhile True:\n\n text = input('>>>')\n client.send(text.encode('utf-8'))\n if text == 'b':\n break\n # get = client.recv(1024)\n # print(get.decode('utf-8'))\n\nclient.close()\n\n\n","sub_path":"网络编程/day27作业/聊天工具客户端.py","file_name":"聊天工具客户端.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"500245658","text":"import os\nimport sys\nimport re\nimport pandas as pd\nimport urllib3\nimport requests\nimport mimetypes\nfrom urllib.parse import urljoin, urlparse\nfrom bs4 import BeautifulSoup\n\nfrom api_core import magic\nfrom api_core.models import FlexibleModelFactory\nfrom settings_hkstp import settings\n\nPICTURE_FOLDER = settings.BASE_DIR + os.path.join(settings.DESIGN_URL, 'media/startup_logos/')\nPICTURE_FOLDER_REL = os.path.join(settings.DESIGN_URL, 'media/startup_logos/')\nREGEX_REDIRECT = r'''.*?window\\.location\\s*=\\s*\\\"([^\"]+)\\\"'''\n\nd = r'C:\\Users\\gampe\\OneDrive\\Software\\python\\Scripts\\Log'\nos.chdir(d)\nf = open(os.path.join(d, 'HKSTP.txt'), 'w')\n\n\ndef run_crawler(base_url, relative_path_start, post_data, get_details=False):\n \"\"\"\n Run the crawler\n \"\"\"\n next_page = urljoin(base_url, relative_path_start)\n response = requests.post(next_page, json=post_data)\n if response:\n print(\"Crawling directory from\", next_page)\n else:\n sys.exit()\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n company_dict_list = list()\n parent_tag = soup.find_all(\"div\", {\"class\": \"contentWrapper\"})\n counter = 0\n for link_tag in parent_tag:\n for link in link_tag.find_all(\"a\", {\"class\": \"title\"}):\n startup_name = link.text\n next_page = urljoin(base_url, link.get(\"href\"))\n counter += 1\n print(str(counter).zfill(3), startup_name, \"=>\", next_page)\n\n if startup_name is not None and get_details:\n company_dict_list.append(get_company_details(next_page, startup_name))\n\n print(company_dict_list, file=f)\n return company_dict_list\n\n\ndef get_company_details(url, name):\n \"\"\"\n Extract the company details from the HKSTP page\n \"\"\"\n\n company_dict = {\n \"name\": None,\n \"program\": None,\n \"tel\": None,\n \"fax\": None,\n \"email\": None,\n \"website\": None,\n \"address\": None,\n \"introduction\": None,\n \"product\": None,\n \"contact_person\": None,\n \"logo\": None\n }\n\n try:\n response = requests.get(url, timeout=5)\n except (\n requests.exceptions.ReadTimeout,\n requests.exceptions.ConnectionError,\n requests.exceptions.TooManyRedirects,\n urllib3.exceptions.LocationValueError\n ) as e:\n return company_dict\n\n soup = BeautifulSoup(response.content.decode('latin-1', 'ignore'), \"html.parser\")\n\n # Crawl company details\n company_dict['name'] = str(name)\n company_dict['program'] = soup.find(\"div\", {\"class\": \"content-title\"}).text.strip()\n parent_tag = soup.find_all(\"div\", {\"class\": \"info-list\"})\n for content_tag in parent_tag:\n key = magic.rewrite_with_technical_convention(content_tag.find(\"span\").text)\n if key in company_dict:\n company_dict[key] = str(content_tag.find(\"p\").text)\n else:\n print(\"key\", key)\n\n # Save company logo if possible\n if 'website' in company_dict:\n company_dict['logo'] = get_company_logo(company_dict.get('website'), name)\n\n return company_dict\n\n\ndef save_company_details_to_database(flexible_model_name, list_dict_company_details):\n \"\"\"\n Instantiates a FlexibleModel and save the company details in database\n \"\"\"\n # create the FlexibleModel startup\n model = FlexibleModelFactory.gen_flexible_model_obj(flexible_model_name, flag_df=False)\n model.content = pd.DataFrame(list_dict_company_details)\n model.content = model.content.drop_duplicates(subset='name')\n model.content = model.content.reset_index().rename(columns={'index': 'id'})\n model.has_dataframe = True\n\n # save to db\n message = model.save_to_db()\n if not(message):\n sys.exit(1)\n\n\ndef get_company_logo(url, name):\n \"\"\"\n Extract the company logo from the respective company website (if any)\n \"\"\"\n\n try:\n\n # Loop to follow redirections if any\n connection_attempts = 5\n i = 0\n for i in range(connection_attempts):\n i += i\n if i == connection_attempts-1:\n return None\n\n if '//' not in url:\n url = '%s%s' % ('http://', url)\n\n try:\n response = requests.get(url, timeout=3)\n except (\n requests.exceptions.ReadTimeout,\n requests.exceptions.ConnectionError,\n requests.exceptions.TooManyRedirects,\n urllib3.exceptions.LocationValueError\n ) as e:\n print(\"URL invalid\", url)\n continue\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n redirect = re.match(REGEX_REDIRECT, str(soup), re.M|re.S)\n if redirect:\n url = urljoin(url, redirect.group(1))\n print(\"Redirection to\", url)\n else:\n break\n\n # Find logo img\n for img_tag in soup.find_all(\"img\"):\n\n picture_url = urljoin(url, img_tag[\"src\"])\n if 'logo' in picture_url.lower():\n try:\n img_response = requests.get(picture_url, timeout=3)\n except (\n requests.exceptions.ReadTimeout,\n requests.exceptions.ConnectionError,\n requests.exceptions.TooManyRedirects,\n urllib3.exceptions.LocationValueError\n ) as e:\n print(\"URL image invalid\", picture_url)\n continue\n\n if img_response:\n content_type = img_response.headers['content-type']\n extension = mimetypes.guess_extension(content_type)\n\n if extension is not None:\n picture_name = magic.rewrite_with_technical_convention(name) + extension\n\n # saved_file = open(PICTURE_FOLDER + picture_name, 'wb')\n # for chunk in img_response.iter_content(100000): # number of iterations per loop\n # saved_file.write(chunk)\n # saved_file.close()\n\n return str(PICTURE_FOLDER_REL + picture_name)\n\n except (TypeError, KeyError) as e:\n return None\n\n return None\n","sub_path":"app_crawler/crawler_hkstp.py","file_name":"crawler_hkstp.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"403618750","text":"from typing import Callable, Generator, List, Optional\n\nimport googleapiclient.discovery\nimport googleapiclient.errors\n\nfrom data_access import DataAccess\n\nDEFAULT_MAX_RESULTS = 50\n\n\ndef gen_resources(resource: Callable, **list_params) -> Generator[List, None, None]:\n \"\"\"\n Paginates through all the data relevant to `resource`, yielding each set\n as it comes back.\n :param resource: The YouTube Data API resource function (ie: youtube.videos).\n :param list_params: Parameters to pass to the `list` call on `resource`.\n :return: Generator.\n \"\"\"\n print(\"Generating resources.\")\n if \"maxResults\" not in list_params.keys():\n list_params[\"maxResults\"] = DEFAULT_MAX_RESULTS\n\n next_page_token = None\n while True:\n if next_page_token:\n list_params[\"pageToken\"] = next_page_token\n\n request = resource().list(**list_params)\n # print(\"\\t\\tRequest made successfully.\")\n response = request.execute()\n # print(f\"\\t\\tRaw response: {response}\")\n\n data = response[\"items\"]\n print(f\"\\tRetrieved {len(data)}\")\n\n yield data\n\n if \"nextPageToken\" in response.keys():\n next_page_token = response[\"nextPageToken\"]\n else:\n print(\"\\tReached last page.\")\n break\n\n return None\n\n\ndef gen_resources_for_ids(\n resource: Callable, res_ids: List[str], **list_params\n) -> Generator[List, None, None]:\n \"\"\"\n Makes requests to retrieve all resources for `res_ids`, yielding each batch.\n :param resource: The YouTube Data API resource function (ie: youtube.videos).\n :param res_ids:\n :param list_params: Parameters to pass to the `list` call on `resource`.\n :return: Generator\n \"\"\"\n print(\"Generating resources for ids.\")\n total = len(res_ids)\n res_counter = 0\n\n if \"maxResults\" not in list_params.keys():\n list_params[\"maxResults\"] = DEFAULT_MAX_RESULTS\n max_results = DEFAULT_MAX_RESULTS\n else:\n max_results = list_params[\"maxResults\"]\n\n _res_ids = res_ids.copy()\n\n while len(_res_ids) > 0:\n request_ids = []\n for _ in range(max_results):\n request_ids.append(_res_ids.pop(0))\n\n if len(_res_ids) == 0:\n break\n\n print(\n f\"\\tRequesting {res_counter}-{res_counter + len(request_ids)} of {total}.\"\n )\n\n list_params[\"id\"] = \",\".join(request_ids)\n\n request = resource().list(**list_params)\n response = request.execute()\n yield response[\"items\"]\n\n res_counter += max_results\n\n print(\"\\tFinished requesting resources.\")\n return None\n\n\nclass YouTube:\n def __init__(self, api_key: str):\n api_service_name = \"youtube\"\n api_version = \"v3\"\n\n self.youtube = googleapiclient.discovery.build(\n api_service_name, api_version, developerKey=api_key\n )\n\n def get_pitems_for_pid(self, pid: str) -> List:\n print(f\"Requesting playlist items for {pid}.\")\n\n data = []\n\n for items in gen_resources(\n self.youtube.playlistItems, part=\"contentDetails\", playlistId=pid\n ):\n data += items\n\n return data\n\n def get_videos_for_pitems(self, pitems: List) -> List:\n print(\"Requesting videos for playlist items.\")\n\n vids: List[str] = [pitem[\"contentDetails\"][\"videoId\"] for pitem in pitems]\n data = []\n\n # Filter out videos we already have.\n da = DataAccess()\n vids = [vid for vid in vids if not da.have_video(vid)]\n\n for items in gen_resources_for_ids(\n self.youtube.videos, vids, part=\"snippet,statistics\",\n ):\n data += items\n\n return data\n\n def gen_comment_threads_for_videos(\n self, videos: List\n ) -> Generator[List, None, None]:\n \"\"\"\n Generates `commentThreads` for the `videos`, yielding on every video.\n :param videos:\n :return: Generator\n \"\"\"\n print(\"Requesting comment threads for videos.\")\n\n for video in videos:\n threads = self.get_comment_threads_for_video(video[\"id\"])\n\n yield threads\n\n return None\n\n def get_comment_threads_for_video(self, video_id: str) -> List:\n print(f\"Getting threads for {video_id}\")\n\n # Get all the threads for the video (paginated).\n threads = []\n for items in gen_resources(\n self.youtube.commentThreads,\n part=\"snippet\",\n videoId=video_id,\n textFormat=\"plainText\",\n maxResults=100,\n ): # Allows up to 100 at a time.\n threads += items\n pass\n\n for thread in threads:\n # print(thread)\n # Then get the top-level comments' replies (paginated).\n if thread[\"snippet\"][\"totalReplyCount\"] > 0:\n top_level_comment = thread[\"snippet\"][\"topLevelComment\"]\n\n print(f\"\\tGetting replies for {thread['id']}\")\n replies = []\n for items in gen_resources(\n self.youtube.comments,\n part=\"snippet\",\n parentId=top_level_comment[\"id\"],\n textFormat=\"plainText\",\n maxResults=100,\n ): # Allows up to 100 at a time.\n replies += items\n\n # And hydrate the thread with the retrieved comments.\n thread[\"replies\"] = {\"comments\": replies}\n\n return threads\n","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"80791820","text":"from test_env.envs.Mujoco_Maze import *\n\n\nclass Basic_Humanoid_v2(Mujoco_Maze):\n def __init__(self, paths):\n Mujoco_Maze.__init__(self, paths)\n\n def _get_obs(self, x, y):\n data = self.sim.data\n return np.concatenate([self._get_laser_dist(x, y),\n self.goal_direction(x, y),\n data.qpos.flat[2:],\n data.qvel.flat,\n data.cinert.flat,\n data.cvel.flat,\n data.qfrc_actuator.flat,\n data.cfrc_ext.flat])\n\n def step(self, a):\n if not hasattr(self, 'blocks_id'):\n self.calculate_block_pos()\n xposbefore, yposbefore = mass_center(self.model, self.sim)\n self.do_simulation(a, self.frame_skip)\n xposafter, yposafter = mass_center(self.model, self.sim)\n alive_bonus = c.survival_reward\n data = self.sim.data\n distbefore = self.distance_to_goal(xposbefore, yposbefore)\n distafter = self.distance_to_goal(xposafter, yposafter)\n lin_vel_cost = 0.25 / self.model.opt.timestep * (\n distbefore - distafter) + int(\n distafter < c.success_dist) * c.goal_r\n quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\n quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\n quad_impact_cost = min(quad_impact_cost, 10)\n reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\n qpos = self.sim.data.qpos\n done = bool(\n (qpos[2] < 1) or (qpos[2] > 2.5) or (distafter < c.success_dist))\n ob = self._get_obs(xposafter, yposafter)\n return ob, reward, done, dict(\n reward_forward=lin_vel_cost,\n reward_ctrl=-quad_ctrl_cost,\n reward_survive=alive_bonus,\n reward_contact=-quad_impact_cost,\n laserdist=ob[:4],\n dist=distafter,\n pos=[xposbefore, yposbefore],\n orientation=self.sim.data.qpos.flat[3:7],\n success=int(distafter < c.success_dist))\n\n def reset_model(self):\n const = 0.01\n qpos = self.init_qpos + self.np_random.uniform(low=-const, high=const,\n size=self.model.nq)\n qvel = self.init_qvel + self.np_random.uniform(low=-const, high=const,\n size=self.model.nv)\n self.set_state(qpos, qvel)\n\n return self._get_obs(qpos[0], qpos[1])\n","sub_path":"test_env/envs/Basic_Humanoid_v2.py","file_name":"Basic_Humanoid_v2.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"457026715","text":"'''\nSet up the version of Salt\n'''\n\n# Import python libs\nimport sys\n\n\n__version_info__ = (0, 16, 0)\n__version__ = '.'.join(map(str, __version_info__))\n\nGIT_DESCRIBE_REGEX = (\n r'(?P[\\d]{1,2})\\.(?P[\\d]{1,2})(?:\\.(?P[\\d]{0,2}))?'\n r'(?:(?:.*)-(?P[\\d]+)-(?P[a-z0-9]{8}))?'\n)\n\n\ndef __get_version(version, version_info):\n '''\n If we can get a version provided at installation time or from Git, use\n that instead, otherwise we carry on.\n '''\n try:\n # Try to import the version information provided at install time\n from salt._version import __version__, __version_info__ # pylint: disable=E0611\n return __version__, __version_info__\n except ImportError:\n pass\n\n # This might be a 'python setup.py develop' installation type. Let's\n # discover the version information at runtime.\n import os\n import re\n import warnings\n import subprocess\n\n try:\n cwd = os.path.abspath(os.path.dirname(__file__))\n except NameError:\n # We're most likely being frozen and __file__ triggered this NameError\n # Let's work around that\n import inspect\n cwd = os.path.abspath(\n os.path.dirname(inspect.getsourcefile(__get_version))\n )\n\n try:\n kwargs = dict(\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=cwd\n )\n\n if not sys.platform.startswith('win'):\n # Let's not import `salt.utils` for the above check\n kwargs['close_fds'] = True\n\n process = subprocess.Popen(['git', 'describe', '--tags'], **kwargs)\n out, err = process.communicate()\n out = out.strip()\n err = err.strip()\n\n if not out or err:\n return version, version_info\n\n match = re.search(GIT_DESCRIBE_REGEX, out)\n if not match:\n return version, version_info\n\n parsed_version = '{0}.{1}.{2}'.format(\n match.group('major'),\n match.group('minor'),\n match.group('bugfix') or '0'\n )\n\n if match.group('noc') is not None and match.group('sha') is not None:\n # This is not the exact point where a tag was created.\n # We have the extra information. Let's add it.\n parsed_version = '{0}-{1}-{2}'.format(\n parsed_version,\n match.group('noc'),\n match.group('sha')\n )\n\n parsed_version_info = tuple([\n int(g) for g in [h or '0' for h in match.groups()[:3]]\n if g.isdigit()\n ])\n\n if parsed_version_info > version_info:\n warnings.warn(\n 'The parsed version info, `{0}`, is bigger than the one '\n 'defined in the file, `{1}`. Missing version bump?'.format(\n parsed_version_info,\n version_info\n ),\n UserWarning,\n stacklevel=2\n )\n return version, version_info\n elif parsed_version_info < version_info:\n warnings.warn(\n 'The parsed version info, `{0}`, is lower than the one '\n 'defined in the file, `{1}`.'\n 'In order to get the proper salt version with the git hash '\n 'you need to update salt\\'s local git tags. Something like: '\n '\\'git fetch --tags\\' or \\'git fetch --tags upstream\\' if '\n 'you followed salt\\'s contribute documentation. The version '\n 'string WILL NOT include the git hash.'.format(\n parsed_version_info,\n version_info\n ),\n UserWarning,\n stacklevel=2\n )\n return version, version_info\n return parsed_version, parsed_version_info\n except OSError as os_err:\n if os_err.errno != 2:\n # If the errno is not 2(The system cannot find the file\n # specified), raise the exception so it can be catch by the\n # developers\n raise\n return version, version_info\n\n\n# Get additional version information if available\n__version__, __version_info__ = __get_version(__version__, __version_info__)\n# This function has executed once, we're done with it. Delete it!\ndel __get_version\n\n\ndef versions_information():\n '''\n Report on all of the versions for dependent software\n '''\n libs = (\n ('Salt', None, __version__),\n ('Python', None, sys.version.rsplit('\\n')[0].strip()),\n ('Jinja2', 'jinja2', '__version__'),\n ('M2Crypto', 'M2Crypto', 'version'),\n ('msgpack-python', 'msgpack', 'version'),\n ('msgpack-pure', 'msgpack_pure', 'version'),\n ('pycrypto', 'Crypto', '__version__'),\n ('PyYAML', 'yaml', '__version__'),\n ('PyZMQ', 'zmq', '__version__'),\n ('ZMQ', 'zmq', 'zmq_version')\n )\n for name, imp, attr in libs:\n if imp is None:\n yield name, attr\n continue\n try:\n imp = __import__(imp)\n version = getattr(imp, attr)\n if callable(version):\n version = version()\n if isinstance(version, (tuple, list)):\n version = '.'.join(map(str, version))\n yield name, version\n except ImportError:\n yield name, None\n\n\ndef versions_report():\n '''\n Yield each library properly formatted for a console clean output.\n '''\n libs = list(versions_information())\n\n padding = max(len(lib[0]) for lib in libs) + 1\n\n fmt = '{0:>{pad}}: {1}'\n\n for name, version in libs:\n yield fmt.format(name, version or 'Not Installed', pad=padding)\n\n\nif __name__ == '__main__':\n print(__version__)\n","sub_path":"salt/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196596992","text":"#CopyRight no@none.not\nimport numpy as np\n\n \ndef format_dict(d):\n return '{' + ', '.join('%d: %f' % (a, d[a]) for a in d) + '}'\n\nclass AlphaEggGameNode():\n def __init__(self, egg_leftover, player_label, parent = None):\n self.parent = parent\n self.player_label = player_label\n self.avg_gain = 0.0\n self.n_visits = 0\n self.egg_leftover = egg_leftover\n self.children = {}\n self.play_prob = {}\n\n def __str__(self):\n return '{\"avg_gain\": %f, \"n_visits\": %d, \"prob\": %s, \"egg_leftover\": %d, \"player\": %d' % (self.avg_gain, self.n_visits, format_dict(self.play_prob), self.egg_leftover, self.player_label) + ', \"children\": {' + ', '.join(['\\n \"%d-c-%d\": %s'%(self.egg_leftover, a, self.children[a]) for a in self.children]) + '}}'\n \n def expand(self, game):\n if self.egg_leftover > 0 and self.children == {}:\n actions = game.feasible_actions(self.egg_leftover)\n self.children = {a: AlphaEggGameNode(self.egg_leftover - a, self.player_label * -1, self)\n for a in actions}\n \n def foward_select_PUCT(self, P):\n assert self.children != {}\n N = self.n_visits\n C = 1.0\n scores = {a: np.random.rand() * 0.01 + self.children[a].avg_gain * self.player_label +\n C * P[a - 1] * np.sqrt(self.n_visits) / (1.0 + self.children[a].n_visits)\n for a in self.children}\n a = max(scores, key = scores.get)\n return self.children[a]\n\n def backward_update(self, gain_new):\n assert self != None\n self.avg_gain = (self.avg_gain * self.n_visits + gain_new) / (1.0 + self.n_visits)\n self.n_visits += 1.0\n return self.parent\n\n def select_next(self):\n assert self.children != {}\n assert self.n_visits > 0\n N = sum([self.children[a].n_visits for a in self.children])\n self.play_prob = {a: 1.0 * self.children[a].n_visits / N for a in self.children} \n choice = np.random.choice(list(self.play_prob.keys()), 1, p = list(self.play_prob.values()))[0]\n self.children = {choice: self.children[choice]}\n return self.children[choice]\n\n\nclass DDPGEggGameNode():\n def __init__(self, egg_leftover, player_label):\n self.egg_leftover = egg_leftover\n self.player_label = player_label\n self.action = -1\n \n def select_next(self, game, action_posibility):\n actions = game.feasible_actions(self.egg_leftover)\n action_sum = sum([action_posibility[i - 1] for i in actions]) + len(actions)\n play_prob = {i: (action_posibility[i - 1] + 1.0) / action_sum for i in actions}\n a = np.random.choice(list(play_prob.keys()), 1, p = list(play_prob.values()))[0]\n self.action = a\n return DDPGEggGameNode(self.egg_leftover - a, self.player_label * -1)\n \n","sub_path":"game_node.py","file_name":"game_node.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"684818","text":"# evadingliability.py\n# \n# Copyright 2015 Wangolo Joel \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \nfrom fraudcategory.subsystems import falseid \n\n\nclass EvadingLiability(falseid.FalseID):\n def __init__(self,category,code=6, length=15):\n super(EvadingLiability, self).__init__(category, code=5,length=length)\n \n self.subcategory = [\"FALSE REPORT FINANCIAL DOCS LOST\", \"FALSE REPORT OF ATM MISUSE\",\n \"SIGNIFICANT MISUSE OF ACCOUNT\", \"GOODS RETURNED (REFUNDS)\",\n \"DEBIT/STOP ORDER\", \"MULTILE ENCASHMENT\", \"CHEQUE\",\"CREDIT CARD\",\n \"PERSONAL LOAN\", \"MORTAGE FINANCE\", \"CAR LOAN\", \"RETAIL CREDIT\",\n \"INTERNET BANKING\", \"INTERNET SHOPPING\", \"OTHERS\"\n ]\n","sub_path":"CRB/fraudcategory/subsystems/evadingliability.py","file_name":"evadingliability.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"12694150","text":"\"\"\"coyotevoicetoolkit URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.voice_toolkit_home, name='home'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^efc/$', views.extension_forwarding_check, name='extension_forwarding_check'), \n url(r'^eutnc/$', views.end_user_telephone_number_check, name='end_user_telephone_number_check'),\n url(r'^aua/$', views.application_user_association, name='application_user_association'),\n url(r'^aud/$', views.application_user_disassociation, name='application_user_disassociation'),\n url(r'^uonb/$', views.user_onboarding, name='user_onboarding'),\n url(r'^uoffb/$',views.user_offboarding, name='user_offboarding'),\n url(r'^dc/$', views.device_copy, name='device_copy'),\n url(r'^dl/$', views.device_lookup, name='device_lookup'),\n url(r'^ipdl/$', views.ip_device_lookup, name='ip_device_lookup'),\n url(r'^dd/$', views.device_delete, name='device_delete'),\n url(r'^upr/$', views.user_pin_reset, name='user_pin_reset'),\n url(r'^ls/$', views.ldap_sync, name='ldap_sync'),\n url(r'^nc/$', views.name_change, name='name_change'),\n url(r'^emli/$', views.em_login, name='em_login'),\n url(r'^emlo/$', views.em_logout, name='em_logout'),\n url(r'^emuq/$', views.em_user_query, name='em_user_query'),\n url(r'^emdq/$', views.em_device_query, name='em_device_query'),\n url(r'^ec/$', views.extension_change, name='extension_change'),\n url(r'^tro_fwd/$', views.trowbridge_forwarding, name='trowbridge_forwarding'),\n url(r'^ed/$', views.enduser_devices, name='enduser_devices'),\n url(r'^ei/$', views.enduser_info, name='enduser_info'),\n url(r'^login/$', auth_views.login, name='login'),\n url(r'^logout/$', auth_views.logout_then_login, name='logout'),\n]\n","sub_path":"coyotevoicetoolkit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"562795439","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# Copyright (c) 2015 Wind River Systems, Inc.\n#\n\nimport mock\nimport six\n\nfrom oslo_config import cfg\n\nfrom heat.common import exception\nfrom heat.common import template_format\nfrom heat.engine.clients.os import nova\nfrom heat.engine import resource\nfrom heat.engine import scheduler\nfrom heat.tests.common import HeatTestCase\nfrom heat.tests import utils\n\nfrom heat.engine.resources.wr import wr_scaling_policy as sp\n\nas_template = '''\n{\n 'heat_template_version': '2013-05-23',\n 'resources': {\n 'scale_up': {\n 'type': 'OS::WR::ScalingPolicy',\n 'properties': {\n 'ServerName': '5678',\n 'ScalingResource': 'cpu',\n 'ScalingDirection': 'up',\n 'Cooldown': '60',\n }\n }\n }\n}\n'''\n\n\nclass ScalingPolicyTest(HeatTestCase):\n def setUp(self):\n super(ScalingPolicyTest, self).setUp()\n cfg.CONF.set_default('heat_waitcondition_server_url',\n 'http://server.test:8000/v1/waitcondition')\n self.stub_keystoneclient()\n self.ctx = utils.dummy_context()\n\n # For unit testing purpose. Register resource provider\n # explicitly.\n resource._register_class(\"OS::WR::ScalingPolicy\", sp.ScalingPolicy)\n\n def _stub_nova_server_get(self, not_found=False):\n mock_server = mock.MagicMock()\n mock_server.image = {'id': 'dd619705-468a-4f7d-8a06-b84794b3561a'}\n if not_found:\n self.patchobject(nova.NovaClientPlugin, 'get_server',\n side_effect=exception.EntityNotFound(\n entity='Server',\n name='5678'))\n else:\n self.patchobject(nova.NovaClientPlugin, 'get_server',\n return_value=mock_server)\n\n def create_scaling_policy(self, t, stack, resource_name):\n rsrc = stack[resource_name]\n self.assertIsNone(rsrc.validate())\n scheduler.TaskRunner(rsrc.create)()\n self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)\n return rsrc\n\n def test_resource_mapping(self):\n mapping = sp.resource_mapping()\n self.assertEqual(1, len(mapping))\n self.assertEqual(sp.ScalingPolicy, mapping['OS::WR::ScalingPolicy'])\n\n def test_scaling_policy_constraint_validation(self):\n self._stub_nova_server_get(not_found=True)\n t = template_format.parse(as_template)\n stack = utils.parse_stack(t)\n exc = self.assertRaises(exception.StackValidationFailed,\n stack.validate)\n self.assertIn(\"The Server (5678) could not be found.\",\n six.text_type(exc))\n self.m.ReplayAll()\n self.m.VerifyAll()\n\n def test_scaling_policy_creation(self):\n t = template_format.parse(as_template)\n stack = utils.parse_stack(t)\n self._stub_nova_server_get()\n self.m.ReplayAll()\n self.create_scaling_policy(t, stack, 'scale_up')\n self.m.VerifyAll()\n\n def test_scaling_policy_signal(self):\n t = template_format.parse(as_template)\n stack = utils.parse_stack(t)\n self._stub_nova_server_get()\n self.m.ReplayAll()\n up_policy = self.create_scaling_policy(t, stack, 'scale_up')\n up_policy.handle_signal()\n self.m.VerifyAll()\n","sub_path":"heat/tests/wr/test_wr_scaling_policy.py","file_name":"test_wr_scaling_policy.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503297137","text":"#! /usr/bin/python2.7\n\n#python2.7\n\nimport socket\nimport sys\nimport time\n\nPORT = 4242\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_address = ('127.0.0.1', PORT)\ns.bind(server_address)\n\ns.listen(1)\n\nwhile True:\n print(\"Waiting for connection\")\n connection, connection_address = s.accept()\n print(\"Connection from address: \", connection_address) \n try:\n while True:\n connection.send(\"Arbituary Data\\n\")\n time.sleep(1)\n except socket.error:\n connection.close()\n \n finally:\n connection.close()\n\ns.close()\n","sub_path":"Comms/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526205870","text":"#================================================\n#Plots for the Value Function Iteration Lab\n#================================================\nimport matplotlib\nmatplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')\nimport numpy as np\nimport math\nimport scipy as sp\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits . mplot3d import Axes3D\ndef eatCake(beta, N, Wmax=1., T=None, finite=True, plot=False):\n \"\"\"\n Solve the finite horizon cake-eating problem using Value Function iteration.\n Inputs:\n T -- final time period\n beta -- discount factor\n N -- number of discrete intervals to break up the cake\n size -- size of the cake to begin with\n plot -- boolean indicating whether to plot value function surface and policy function\n surface\n Returns:\n values -- numpy array of shape (N, T+2) (if finite=True) or shape (N,) (if finite=False)\n giving the value function at each time period for each state\n psi -- numpy array of shape (N, T+1) (if finite=True) or shape (N,) (if finite=False)\n giving the policy at each time period for each state.\n \"\"\"\n states = np.linspace(0,Wmax,N) #state space vector\n actions = np.tile(states, N).reshape((N,N)).T\n actions = actions - actions.T\n actions[actions<0] = 0\n rewards = np.sqrt(actions)\n rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function\n n_range = np.arange(N) #this is used several times, so initialize it once\n if finite:\n values = np.zeros((N, T+2))\n psi = np.zeros((N, T+1))\n for i in xrange(T,-1,-1):\n argmaxs = np.argmax(rewards + beta*values[:,i+1].reshape(1,N), axis=1)\n values[:,i] = (rewards + beta*values[:,i+1].reshape(1,N))[n_range,argmaxs]\n psi[:,i] = states[argmaxs]\n x=np.arange(0,N)\n \n if plot:\n x=np.arange(0,N)\n y=np.arange(0,T+2)\n X,Y=np.meshgrid(x,y)\n fig1 = plt.figure()\n ax1= Axes3D(fig1)\n ax1.plot_surface(states[X],Y,sp.transpose(values), cmap=cm.coolwarm)\n plt.show ()\n \n fig2 = plt.figure() \n ax2 = Axes3D(fig2)\n y = np.arange(0,T+1)\n X,Y=np.meshgrid(x,y)\n ax2.plot_surface(states[X],Y,sp.transpose(psi), cmap = cm.coolwarm)\n plt.show()\n else:\n values = np.zeros(N)\n psi = np.zeros(N)\n delta = 1.\n while delta >= 1e-9:\n values1 = values.copy()\n argmaxs = np.argmax(rewards + beta*values1.reshape(1,N), axis=1)\n values = (rewards + beta*values.reshape(1,N))[n_range, argmaxs]\n psi = states[argmaxs]\n delta = ((values-values1)**2).sum()\n if plot:\n plt.plot(states, psi)\n plt.show()\n \n return values, psi\n\ndef plot_finite_horiz():\n #First compute solution to problem 1\n beta = 0.9;\n T = 10;\n N = 100;\n u = lambda c: sp.sqrt(c);\n W = sp.linspace(0,1,N);\n X, Y = sp.meshgrid(W,W);\n Wdiff = Y-X\n index = Wdiff <0;\n Wdiff[index] = 0;\n util_grid = u(Wdiff);\n util_grid[index] = -10**10;\n V = sp.zeros((N,T+2));\n psi = sp.zeros((N,T+1));\n \n \n for k in xrange(T,-1,-1):\n val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1));\n vt = sp.amax(val, axis = 1);\n psi_ind = sp.argmax(val,axis = 1)\n V[:,k] = vt;\n psi[:,k] = W[psi_ind];\n \n #now create plots\n #fixed time plot\n \n plt.figure()\n plt.plot(V[:,5])\n plt.title(r'Value function for $t = 5$')\n plt.ylabel(r'$V$')\n plt.xlabel(r'$W$')\n plt.savefig('fixed_time.pdf') \n \n #fixed W plot\n plt.figure()\n plt.plot(V[50,:])\n plt.title(r'Value function for $W = 0.505$')\n plt.ylabel(r'$V$')\n plt.xlabel(r'$t$')\n plt.savefig('fixed_w.pdf')\n \n#plot delta -> 0 \ndef plot_delta(): \n beta = 0.99\n N = 1000\n u = lambda c: sp.sqrt(c)\n W = sp.linspace(0,1,N)\n X, Y = sp.meshgrid(W,W)\n Wdiff = sp.transpose(X-Y)\n index = Wdiff <0\n Wdiff[index] = 0\n util_grid = u(Wdiff)\n util_grid[index] = -10**10\n \n Vprime = sp.zeros((N,1))\n delta = sp.ones(1)\n tol = 10**-9\n it = 0\n max_iter = 500\n \n while (delta[-1] >= tol) and (it < max_iter):\n V = Vprime\n it += 1;\n print(it)\n val = util_grid + beta*sp.transpose(V)\n Vprime = sp.amax(val, axis = 1)\n Vprime = Vprime.reshape((N,1))\n delta = sp.append(delta,sp.dot(sp.transpose(Vprime - V),Vprime-V))\n \n plt.figure()\n plt.plot(delta[1:])\n plt.ylabel(r'$\\delta_k$')\n plt.xlabel('iteration')\n plt.savefig('convergence.pdf')\n\ndef infiniteHorizon():\n \"\"\"\n Plot policy function for infinite time horizon cake eating problem.\n \"\"\"\n values, psi = eatCake(.9, 100, finite=False)\n states = np.linspace(0,1,100)\n plt.figure()\n plt.title(r'Policy Function')\n plt.ylabel(r'$\\psi$')\n plt.xlabel(r'$W$')\n plt.plot(states, psi)\n plt.savefig('infiniteHorizon.pdf')\n plt.clf()\ninfiniteHorizon()\n","sub_path":"Algorithms/ValueFunctionIter/VFI_plots.py","file_name":"VFI_plots.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28945165","text":"import math\nclass Solution:\n def addBinary(self, a: str, b: str) -> str:\n def binaryToDecimal(a):\n su = 0\n a = [int(item) for item in a]\n a.reverse()\n for i in range(len(a)):\n su += 2**i * a[i]\n return su\n \n def decimalToBinary(a):\n if a == 0:\n return '0'\n power = int(math.log(a, 2))\n li = ['0'] * (power + 1)\n while a > 0:\n power = int(math.log(a, 2)) \n li[power] = '1'\n a -= 2**power\n li.reverse()\n return ''.join(li) \n return decimalToBinary(binaryToDecimal(a) + binaryToDecimal(b))\n","sub_path":"revamp/67_add_binary/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208375813","text":"import unittest\nfrom expr import Expr, AffExpr, QuadExpr\nfrom expr import AbsExpr, HingeExpr\nfrom expr import CompExpr, EqExpr, LEqExpr\nfrom expr import BoundExpr\n\nfrom variable import Variable\n\nimport numpy as np\n\nfrom ipdb import set_trace as st\n\nfs = [(lambda x: x, lambda x: np.array([[1]]),\n lambda x: np.array([[0]])),\n (lambda x: x**2, lambda x: 2*x,\n lambda x: np.array([[2]])),\n (lambda x: x**3, lambda x: 3*x**2,\n lambda x: 6*x)]\nxs = [1., 2., -1., 0.]\nxs = [np.array([[x]]) for x in xs]\nxs_flat = [x[0] for x in xs]\n\n# adding multi-dimensional fs and xs\nf = (lambda x: np.array([[x[0,0]**2+x[1,0]**2]]),\n lambda x: np.array([[2*x[0,0], 2*x[1,0]]]),\n lambda x: 2*np.eye(2))\nfs_multi = [f]\nxs_multi = [np.array([[0.0],[0.0]]), np.array([[2.0],[-2.0]]),\n np.array([[1.0],[0.0]]), np.array([[0.0],[1.0]]),\n np.array([[-1.0],[0.0]]), np.array([[0.0],[-1.0]])]\nN = 10\nd = 10\n\ndef test_expr_val_grad(ut, e, x, y, y_prime):\n y = np.array(y)\n y_prime = np.array(y_prime)\n y_e = np.array(e.eval(x))\n ut.assertTrue(np.allclose(y_e, y))\n y_prime_e = np.array(e.grad(x))\n ut.assertTrue(np.allclose(y_prime_e, y_prime))\n\ndef test_expr_val_grad_hess(ut, e, x, y, y_prime, y_d_prime):\n y = np.array(y)\n y_prime = np.array(y_prime)\n y_e = np.array(e.eval(x))\n ut.assertTrue(np.allclose(y_e, y))\n y_prime_e = np.array(e.grad(x))\n ut.assertTrue(np.allclose(y_prime_e, y_prime))\n y_d_prime_e = np.array(e.hess(x))\n ut.assertTrue(np.allclose(y_d_prime_e, y_d_prime))\n\ndef test_expr_val_grad_hess_with_num_check(ut, e, x, y, y_prime, y_d_prime):\n y = np.array(y)\n y_prime = np.array(y_prime)\n y_e = np.array(e.eval(x))\n ut.assertTrue(np.allclose(y_e, y))\n y_prime_e = np.array(e.grad(x, num_check=True))\n ut.assertTrue(np.allclose(y_prime_e, y_prime))\n y_d_prime_e = np.array(e.hess(x, num_check=True))\n ut.assertTrue(np.allclose(y_d_prime_e, y_d_prime))\n\nclass TestExpr(unittest.TestCase):\n\n def test_expr_eval_grad_hess(self):\n for f, fder, fhess in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n test_expr_val_grad_hess(self, e, x, y, y_prime, y_d_prime)\n\n def test_expr_eval_grad_hess_flat(self):\n for f, fder, fhess in fs:\n e = Expr(f)\n for x in xs_flat:\n with self.assertRaises(Exception) as cm:\n e.grad(np.array([[[x[0]]]]))\n self.assertTrue(\"Input shape not supported\" in cm.exception.message)\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n test_expr_val_grad_hess(self, e, x, y, y_prime, y_d_prime)\n\n def test_expr_eval_grad_hess_multi(self):\n for f, fder, fhess in fs_multi:\n e = Expr(f)\n for x in xs_multi:\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n test_expr_val_grad_hess(self, e, x, y, y_prime, y_d_prime)\n\n def test_expr_eval_grad_hess_w_fder_and_fhess(self):\n for f, fder, fhess in fs:\n e = Expr(f, fder, fhess)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n test_expr_val_grad_hess_with_num_check(self, e, x, y, y_prime, y_d_prime)\n\n def test_expr_eval_grad_hess_multi_w_fder_fhess_and_num_check(self):\n for f, fder, fhess in fs_multi:\n e = Expr(f, fder, fhess)\n for x in xs_multi:\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n test_expr_val_grad_hess_with_num_check(self, e, x, y, y_prime, y_d_prime)\n\n def test_expr_num_check(self):\n f, fder, fhess = fs_multi[0]\n x = xs_multi[0]\n e = Expr(f)\n with self.assertRaises(AssertionError):\n e.grad(x, num_check=True)\n with self.assertRaises(AssertionError):\n e.hess(x, num_check=True)\n\n # wrong fder and fhess\n fder = lambda x: np.array([[2*x[0,0]+1, 2*x[1,0]+1]])\n fhess = lambda x: 3*np.eye(2)\n e = Expr(f, fder, fhess)\n\n with self.assertRaises(Exception) as cm:\n e.grad(x, num_check=True)\n self.assertTrue(\"Numerical and analytical gradients aren't close\"\\\n in cm.exception.message)\n with self.assertRaises(Exception) as cm:\n e.hess(x, num_check=True)\n self.assertTrue(\"Numerical and analytical hessians aren't close\"\\\n in cm.exception.message)\n\n try:\n e.grad(x, num_check=True, atol=1.)\n e.hess(x, num_check=True, atol=1.)\n except Exception:\n self.fail(\"gradient and hessian calls should not raise exception.\")\n\n def test_convexify_deg_1(self):\n for f, fder, _ in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n\n aff_e = e.convexify(x, degree=1)\n self.assertIsInstance(aff_e, AffExpr)\n A = aff_e.A\n b = aff_e.b\n self.assertTrue(np.allclose(A, fder(x)))\n self.assertTrue(np.allclose(b, f(x) - A.dot(x)))\n self.assertTrue(np.allclose(aff_e.eval(x), f(x)))\n\n def test_convexify_deg_2_multi_dim(self):\n x0 = np.array([[5.0],[5.0]])\n f = lambda x: np.vstack((\\\n x[0,0]**2 + x[1,0]**2 - 4, \\\n -((x[0,0]-1)**2 +(x[1,0]**2-1)**2 - 0.25), \\\n -((x[0,0]+1)**2 +(x[1,0]**2-1)**2 - 0.25), \\\n -((x[0,0])**2 + 7*(x[1,0]+1-x[0,0]**2/2)**2 - 0.8)))\n e = Expr(f)\n aff_e = e.convexify(x0)\n self.assertTrue(aff_e.A.shape[0] == aff_e.b.shape[0])\n\n def test_convexify_deg_2(self):\n for f, fder, fhess in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n y_d_prime = fhess(x)\n y_d_prime = np.maximum(y_d_prime, np.zeros((1,1)))\n\n quad_e = e.convexify(x, degree=2)\n self.assertIsInstance(quad_e, QuadExpr)\n Q = np.maximum(quad_e.Q, np.zeros((1,1)))\n A = quad_e.A\n b = quad_e.b\n self.assertTrue(np.allclose(Q, y_d_prime))\n self.assertTrue(np.allclose(A, \\\n y_prime -np.transpose(x).dot(y_d_prime)))\n self.assertTrue(np.allclose(b, \\\n np.array(0.5*np.transpose(x).dot(y_d_prime)).dot(x) \\\n - y_prime.dot(x) + y))\n self.assertTrue(np.allclose(quad_e.eval(x), y))\n\n def test_convexify_deg_2_negative_hessian(self):\n f = lambda x: -x**2\n f_hess = np.array([[-2.0]])\n e = Expr(f)\n quad_e = e.convexify(np.zeros((1,1)), degree=2)\n self.assertIsInstance(quad_e, QuadExpr)\n Q = quad_e.Q\n self.assertTrue(np.allclose(Q, np.zeros((1,1))))\n\n\nclass TestAffExpr(unittest.TestCase):\n\n def test_aff_expr_eval_grad_hess(self):\n for _ in range(N):\n A = np.random.rand(d, d)\n b = np.random.rand(d, 1)\n x = np.random.rand(d, 1)\n y = A.dot(x) + b\n y_prime = A.T\n e = AffExpr(A, b)\n test_expr_val_grad(self, e, x, y, y_prime)\n\n hess = np.zeros((d,d))\n self.assertTrue(np.allclose(e.hess(b), hess))\n self.assertTrue(np.allclose(e.hess(np.ones((d,1))), hess))\n self.assertTrue(np.allclose(e.hess(np.zeros((d,1))), hess))\n self.assertTrue(np.allclose(e.hess(x), hess))\n\nclass TestQuadExpr(unittest.TestCase):\n\n def test_quad_expr_eval_grad_hess(self):\n for _ in range(N):\n A = np.random.rand(1, d)\n b = np.random.rand(1)\n Q = np.random.rand(d, d)\n x = np.random.rand(d, 1)\n y = 0.5*x.T.dot(Q.dot(x)) + A.dot(x) + b\n y_prime = 0.5*(Q.T.dot(x) + Q.dot(x)) + A.T\n e = QuadExpr(Q, A, b)\n\n test_expr_val_grad(self, e, x, y, y_prime)\n hess = Q\n self.assertTrue(np.allclose(e.hess(b), hess))\n self.assertTrue(np.allclose(e.hess(np.ones((d,1))), hess))\n self.assertTrue(np.allclose(e.hess(np.zeros((d,1))), hess))\n self.assertTrue(np.allclose(e.hess(x), hess))\n\nclass TestAbsExpr(unittest.TestCase):\n\n def test_abs_expr_eval(self):\n for _ in range(N):\n A = np.random.rand(d, d) - 0.5*np.ones((d,d))\n b = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n x = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n y = A.dot(x) + b\n y_prime = A.T\n e = AffExpr(A, b)\n abs_e = AbsExpr(e)\n self.assertTrue(np.allclose(np.absolute(e.eval(x)), abs_e.eval(x)))\n\nclass TestHingeExpr(unittest.TestCase):\n\n def test_hinge_expr_eval(self):\n for _ in range(N):\n A = np.random.rand(d, d) - 0.5*np.ones((d,d))\n b = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n x = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n y = A.dot(x) + b\n y_prime = A.T\n e = AffExpr(A, b)\n hinge_e = HingeExpr(e)\n zeros = np.zeros((1,1))\n self.assertTrue(np.allclose(np.maximum(e.eval(x), zeros), hinge_e.eval(x)))\n\nclass TestCompExpr(unittest.TestCase):\n\n def test_comp_expr(self):\n f, fder, _ = fs[0]\n e = Expr(f)\n val = np.array([0])\n comp_e = CompExpr(e, val)\n self.assertEqual(comp_e.expr, e)\n self.assertTrue(np.allclose(comp_e.val, val))\n\n # check to ensure that modifying val won't modifying the comp expr\n val[0] = 1\n self.assertTrue(not np.allclose(comp_e.val, val))\n\n with self.assertRaises(NotImplementedError) as nie:\n comp_e.eval(0)\n with self.assertRaises(NotImplementedError) as nie:\n comp_e.convexify(0)\n with self.assertRaises(Exception) as e:\n comp_e.grad(0)\n\nclass TestEqExpr(unittest.TestCase):\n\n def test_eq_expr_eval(self):\n for _ in range(N):\n A = np.random.rand(d, d) - 0.5*np.ones((d,d))\n b = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n x = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n y = A.dot(x) + b\n y_prime = A.T\n e = AffExpr(A, b)\n\n val = e.eval(x)\n eq_e = EqExpr(e, val)\n self.assertTrue(eq_e.eval(x, tol=0.0))\n self.assertTrue(eq_e.eval(x, tol=0.01))\n\n val = e.eval(x) + 0.1\n eq_e = EqExpr(e, val)\n self.assertFalse(eq_e.eval(x, tol=0.01))\n self.assertTrue(eq_e.eval(x, tol=0.1))\n\n def test_eq_expr_convexify(self):\n for f, fder, _ in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n\n eq_e = EqExpr(e, np.array([1.0]))\n abs_e = eq_e.convexify(x)\n self.assertIsInstance(abs_e, AbsExpr)\n\n aff_e = abs_e.expr\n A = aff_e.A\n b = aff_e.b\n self.assertTrue(np.allclose(A, fder(x)))\n self.assertTrue(np.allclose(b, f(x) - A.dot(x) - 1.0))\n\n self.assertTrue(np.allclose(abs_e.eval(x), np.absolute(y-1.0)))\n x2 = x+1.0\n self.assertTrue(np.allclose(abs_e.eval(x2), np.absolute(A.dot(x2)+b)))\n\nclass TestLEqExpr(unittest.TestCase):\n\n def test_leq_expr_eval(self):\n for _ in range(N):\n A = np.random.rand(d, d) - 0.5*np.ones((d,d))\n b = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n x = np.random.rand(d, 1) - 0.5*np.ones((d,1))\n y = A.dot(x) + b\n y_prime = A.T\n e = AffExpr(A, b)\n\n val = e.eval(x)\n leq_e = LEqExpr(e, val)\n self.assertTrue(leq_e.eval(x, tol=0.0))\n self.assertTrue(leq_e.eval(x, tol=0.01))\n\n val = e.eval(x) + 0.1\n leq_e = LEqExpr(e, val)\n self.assertTrue(leq_e.eval(x, tol=0.01))\n self.assertTrue(leq_e.eval(x, tol=0.1))\n\n val = e.eval(x) - 0.1\n leq_e = LEqExpr(e, val)\n self.assertFalse(leq_e.eval(x, tol=0.01))\n self.assertTrue(leq_e.eval(x, tol=0.1+1e-8))\n\n def test_leq_expr_convexify(self):\n for f, fder, _ in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n y_prime = fder(x)\n\n leq_e = LEqExpr(e, np.array([1.0]))\n hinge_e = leq_e.convexify(x)\n self.assertIsInstance(hinge_e, HingeExpr)\n\n aff_e = hinge_e.expr\n A = aff_e.A\n b = aff_e.b\n self.assertTrue(np.allclose(A, fder(x)))\n self.assertTrue(np.allclose(b, f(x) - A.dot(x) - 1.0))\n\n self.assertTrue(np.allclose(hinge_e.eval(x), np.maximum(y-1.0, np.zeros(y.shape))))\n\n x2 = x+1.0\n self.assertTrue(np.allclose(hinge_e.eval(x2), np.maximum(A.dot(x2)+b, np.zeros(y.shape))))\n\nclass TestBoundExpr(unittest.TestCase):\n\n def test_bound_expr(self):\n b_e = BoundExpr(1,2)\n self.assertEqual(b_e.expr, 1)\n self.assertEqual(b_e.var, 2)\n\n def test_bound_expr_eval_convexify(self):\n for f, fder, _ in fs:\n e = Expr(f)\n for x in xs:\n y = f(x)\n\n dummy_grb_vars = np.array([[1]])\n v = Variable(dummy_grb_vars, x)\n\n b_e = BoundExpr(e, v)\n self.assertTrue(np.allclose(b_e.eval(), e.eval(x)))\n\n cvx_b_e = b_e.convexify()\n self.assertIsInstance(cvx_b_e, BoundExpr)\n self.assertEqual(cvx_b_e.var, v)\n\n cvx_e = b_e.expr.convexify(x)\n self.assertTrue(np.allclose(cvx_e.A, cvx_b_e.expr.A))\n self.assertTrue(np.allclose(cvx_e.b, cvx_b_e.expr.b))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"sco/test/test_expr.py","file_name":"test_expr.py","file_ext":"py","file_size_in_byte":14219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71798386","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def __repr__(self):\n return str(self.val)\n\n @classmethod\n def fromList(cls, nums):\n nodes = [cls(num) if num else None for num in nums]\n for i, node in enumerate(nodes):\n if node is None:\n continue\n li = 2 * i + 1\n ri = 2 * i + 2\n if li < len(nums):\n node.left = nodes[li]\n if ri < len(nums):\n node.right = nodes[ri]\n return nodes[0]\n\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n def helper(node, p, q):\n if not node:\n return False, False, None\n\n pf, qf = False, False\n if node == p:\n pf = True\n if node == q:\n qf = True\n\n lpf, lqf, n = helper(node.left, p, q)\n if lpf and lqf:\n return lpf, lqf, n\n rpf, rqf, n = helper(node.right, p, q)\n if rpf and rqf:\n return rpf, rqf, n\n\n pf = pf or lpf or rpf\n qf = qf or lqf or rqf\n return pf, qf, node if pf and qf else None\n\n if p == q:\n return p\n\n _, _, common = helper(root, p, q)\n return common\n\n def lowestCommonAncestor2(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n def helper(node, p, q):\n if not node:\n return None\n elif node in (p, q):\n return node\n\n l = helper(node.left, p, q)\n r = helper(node.right, p, q)\n\n return node if l and r else l or r\n return helper(root, p, q)\n\n\nif __name__ == '__main__':\n s = Solution()\n nums = [3, 5, 1, 6, 2, 0, 8, None, None, 7, 4]\n root = TreeNode.fromList(nums)\n r = s.lowestCommonAncestor(root, root.left, root.right)\n print(r)\n r = s.lowestCommonAncestor2(root, root.left, root.right)\n print(r)\n r = s.lowestCommonAncestor(root, root.left, root.left.left)\n print(r)\n r = s.lowestCommonAncestor2(root, root.left, root.left.left)\n print(r)\n \n\n ","sub_path":"leetcode/medium/lowest-common-ancestor-of-a-binary-tree.py","file_name":"lowest-common-ancestor-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365795985","text":"from urllib.parse import quote\nimport requests, json, time\n\ndef search_epmc_for_keyword(search_term, base_url='https://www.ebi.ac.uk/europepmc/webservices/rest/search'):\n try:\n if search_term is None or \\\n search_term ==\"\":\n raise ValueError('No search term found')\n encoded_search_term = \\\n quote(search_term)\n formatted_url = \\\n '{0}?query={1}&format=json&sort_date:y%20BDESC'.\\\n format(base_url, encoded_search_term)\n all_data = list()\n cursor = ''\n while True:\n data, cursor = \\\n get_pmc_data(\n url=formatted_url,\n cursor=cursor)\n if len(data) > 0 or cursor !='':\n all_data.extend(data)\n time.sleep(2)\n if cursor == '':\n break\n return all_data\n except Exception as e:\n raise ValueError('Failed to search ePMC, error: {0}'.format(e))\n\n\ndef get_pmc_data(url, cursor=''):\n '''\n A method for fetching pmc data\n\n :param orcid_id: An orcid id\n :param cursor: A cursor string, default empty string\n '''\n try:\n data = list()\n url_str = \\\n '{0}&cursorMark={1}'.\\\n format(url, cursor)\n response = requests.get(url_str)\n if response.ok:\n json_data = \\\n json.loads(response.content.decode('utf-8'))\n data = json_data['resultList']['result']\n if 'nextCursorMark' in json_data and \\\n cursor != json_data['nextCursorMark']:\n cursor = json_data['nextCursorMark']\n else:\n cursor = ''\n return data, cursor\n except Exception as e:\n raise ValueError(e)","sub_path":"igf_data/utils/epmc_utils.py","file_name":"epmc_utils.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"118697718","text":"import os\nimport numpy\nimport datetime\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\ndef get_microseconds(entry):\n start = entry.index(\",\")+1\n end = entry.index(\"+\")\n value = entry[start:end] \n return int(value)\n\ndef get_iso_time_str(entry):\n end = entry.index(\"+\")\n return entry[:end]\n\ndef get_data_amount_time(entry):\n values = entry.split(\" \", 1)[1].split(\",\")\n return [value.split(\":\")[1].strip() for value in values]\n\ndef create_datetime(iso_str):\n return datetime.datetime.strptime(iso_str, \"%Y-%m-%dT%H:%M:%S,%f\")\n\ndef get_duration_monotonic(filename):\n durations = []\n f = open(os.path.join(__location__, \"data\", filename), \"r\")\n content = f.readlines()\n data_provider = (content[i:i+2] for i in range(0, len(content)-1, 1))\n for entry in data_provider:\n data_amount = int(get_data_amount_time(entry[1])[0])\n t1_logtime = create_datetime(get_iso_time_str(entry[1]))\n t0_logtime = create_datetime(get_iso_time_str(entry[0]))\n t1_monotonic_time = int(get_data_amount_time(entry[1])[1])\n t0_monotonic_time = int(get_data_amount_time(entry[0])[1])\n durations.append((data_amount,\n (t1_logtime - t0_logtime).microseconds, # .total_seconds()\n t1_monotonic_time - t0_monotonic_time))\n return durations\n\ndef get_duration_system_monotonic(filepath):\n durations = []\n f = open(filepath, \"r\")\n content = f.readlines()\n data_provider = (content[i:i+2] for i in range(0, len(content)-1, 1))\n for entry in data_provider:\n data_amount = int(get_data_amount_time(entry[1])[0])\n t1_logtime = create_datetime(get_iso_time_str(entry[1]))\n t0_logtime = create_datetime(get_iso_time_str(entry[0]))\n t1_system_time = int(get_data_amount_time(entry[1])[1])\n t0_system_time = int(get_data_amount_time(entry[0])[1])\n t1_monotonic_time = int(get_data_amount_time(entry[1])[2])\n t0_monotonic_time = int(get_data_amount_time(entry[0])[2])\n durations.append((data_amount,\n (t1_logtime - t0_logtime).microseconds, # .total_seconds()\n t1_system_time - t0_system_time,\n t1_monotonic_time - t0_monotonic_time))\n return durations\n\ndef get_dummy_duration(filepath):\n durations = []\n f = open(filepath, \"r\")\n content = f.readlines()\n data_provider = (content[i:i+2] for i in range(0, len(content)-1, 1))\n for entry in data_provider:\n t1 = create_datetime(get_iso_time_str(entry[1]))\n t0 = create_datetime(get_iso_time_str(entry[0]))\n duration = t1 - t0\n durations.append(duration.microseconds)\n return durations\n\ndef log_output(header, durations):\n print(header)\n print(\"num values: \", len(durations))\n print(\"mean (us): \", numpy.mean(durations))\n print(\"median (us): \", numpy.median(durations))\n print(\"std (us): \", numpy.std(durations))\n print(\"min (us): \", numpy.min(durations))\n print(\"max (us): \", numpy.max(durations))\n print(\"range of changes (us): \", numpy.max(durations) - numpy.min(durations))\n print(\"range of changes (%): \", 100 * (numpy.max(durations) - numpy.min(durations)) / float(numpy.max(durations)))\n print(\"---------------------------------\")\n\ndef test_realtime_system(folder):\n data_entries = [(folder + \"10us\", \"10 us, 10.000 values = 100.000 us\"),\n (folder + \"20us\", \"20 us, 10.000 values = 200.000 us\")]\n for entry in data_entries:\n filename = entry[0]\n header = entry[1]\n duration = get_dummy_duration(filename)\n log_output(header, duration)\n\ndef test_different_clocks(folder):\n data_filepath = os.path.join(folder, \"time_data_speed_all_clocks\")\n durations = get_duration_system_monotonic(data_filepath)\n print(\"logging: \", numpy.mean([duration[1] for duration in durations]))\n print(\"system: \", numpy.mean([duration[2] for duration in durations]) / 1000.0) # ns to us\n print(\"monotonic: \", numpy.mean([duration[3] for duration in durations]) / 1000.0)\n\nif __name__ == '__main__':\n # dmesg --time-format=iso\n folder = \"./data/\"\n test_realtime_system(folder)\n test_different_clocks(folder)\n","sub_path":"light-communication-signaling/src/light_timing/time_analysis.py","file_name":"time_analysis.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"31328983","text":"import cv2\nimport torch\nimport numpy as np\nimport random\n\nimport os\nimport mmap\nimport sys\nimport simplejpeg\n\n\ndef readImageWithMmap(path):\n fd = os.open(path, os.O_RDONLY | os.O_DIRECT) \n mm = mmap.mmap(fd, 0, access=mmap.ACCESS_READ) \n os.close(fd)\n img = np.fromstring(mm.read(), dtype=\"uint8\")\n\n return img\n\ndef random_crop(image, crop_height, crop_width):\n max_x = image.shape[1] - crop_width + 1\n max_y = image.shape[0] - crop_height + 1\n x = np.random.randint(0, max_x)\n y = np.random.randint(0, max_y)\n crop = image[y: y + crop_height, x: x + crop_width]\n return crop\n\n\ndef resize(img, square=224):\n height, width, _ = img.shape\n if height > width:\n height = height * square / width\n width = square\n else:\n width = width * square / height\n height = square\n dim = (int(width), int(height))\n img = cv2.resize(img, dim)\n return img\n\n\ndef transform(image):\n # image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n try:\n image = simplejpeg.decode_jpeg(image, colorspace=\"bgr\")\n except:\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n image = resize(image)\n image = random_crop(image,224,224)\n\n\n if random.random()<0.5:\n image = cv2.flip(image, 1)\n\n image = image / 255.0\n image = image - (0.485, 0.456, 0.406)\n image = image / (0.229, 0.224, 0.225)\n image = image.transpose((2, 0, 1))\n # print(image.shape)\n image = torch.from_numpy(image)\n return image","sub_path":"src/modified-pytorch/modifiedprep.py","file_name":"modifiedprep.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564450291","text":"import pickle\n\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\nfrom .utils import noisify_sym\n\n\nclass CIFAR10(Dataset):\n \n train_list = [\n ['data_batch_1', 'c99cafc152244af753f735de768cd75f'],\n ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],\n ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],\n ['data_batch_4', '634d18415352ddfa80567beed471001a'],\n ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],\n ]\n \n test_list = [\n ['test_batch', '40351d587109b95175f43aff81a1287e'],\n ]\n \n def __init__(self, train=True, transform = transforms.ToTensor(), target_transform=None, noise_rate = 0.2):\n self.train = train\n self.transform = transform\n self.train_data = np.zeros((0,3,32,32))\n self.train_labels = np.zeros((0,))\n self.noise_rate = noise_rate \n\n if self.train:\n for fentry in self.train_list:\n f = fentry[0]\n file = './data/cifar-10-batches-py/' + f\n fo = open(file, 'rb')\n entry = pickle.load(fo, encoding='latin1')\n self.train_data = np.concatenate((self.train_data, entry['data'].reshape((10000,3,32,32))))\n self.train_labels = np.concatenate((self.train_labels, np.array(entry['labels'])))\n self.train_labels = torch.from_numpy(noisify_sym(self.train_labels.astype('uint8'), noise_rate = self.noise_rate, random_state = 0)[0])\n \n \n else:\n f = self.test_list[0][0]\n file = './data/cifar-10-batches-py/' + f\n fo = open(file, 'rb')\n entry = pickle.load(fo, encoding='latin1')\n self.train_data = np.concatenate((self.train_data, entry['data'].reshape((10000,3,32,32))))\n self.train_labels = torch.from_numpy(np.concatenate((self.train_labels, np.array(entry['labels']))))\n\n self.train_data = self.train_data.transpose(0,2,3,1)\n \n\n def __getitem__(self,index):\n \n img, label = self.train_data[index], self.train_labels[index]\n img = Image.fromarray(img.astype('uint8'))\n if self.transform is not None:\n img = self.transform(img)\n return img, label, index\n \n def __len__(self):\n \n return self.train_data.shape[0]\n","sub_path":"data/cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184933376","text":"from glob import glob\nimport os\nimport shutil\n\nif __name__ == '__main__':\n label_name = list(open('label.txt'))\n for i in range(len(label_name)):\n dir_name = label_name[i].replace('\\n', '')\n os.mkdir('images/{}'.format(dir_name))\n for k in range(1, 81):\n shutil.move('jpg/image_{}.jpg'.format(str(k+i*80).zfill(4)), 'images/{}/{}.jpg'.format(dir_name, k))\n","sub_path":"flower/recognition/data/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611734920","text":"import datetime\nDATE = datetime.datetime.now()\n\nclass Item:\n\n def __init__(self):\n self.newItem = []\n\n def addItem(self, newItem):\n Factory.addItem(self.newItem)\n\n\nclass Factory:\n\n def __init__(self):\n self.list_of_items = []\n self.items_on_the_list = 0\n\n def addItem(self, item_attribute_list):\n self.list_of_items.append(item_attribute_list)\n self.items_on_the_list = self.items_on_the_list + 1\n\n def printItemList(self):\n itemPrint = 0\n numberOfItems = (self.items_on_the_list - 1)\n while itemPrint < numberOfItems:\n print(self.list_of_items[itemPrint][0] + \", \" + self.list_of_items[itemPrint][1] + \", \" +self.list_of_items[itemPrint][2])\n itemPrint += 1\n\nitem = Item()\nnewItem = []\nnewItem = ['iPhone', '', 'X1Y']\nitem.addItem(newItem)\nnewItem = ['Samsung s5', '', 'X2A']\nitem.addItem(newItem)\nnewItem = ['Orange Juice', '2018, 6, 20', 'X1Y']\nitem.addItem(newItem)\nnewItem = ['Veggie Juice', '2019, 6, 20', 'Z3Z']\nitem.addItem(newItem)\nnewItem = ['School Bag', '', 'Y2A']\nitem.addItem(newItem)\nitem.printItemList()\n","sub_path":"old_versions/warehousev3.py","file_name":"warehousev3.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"50864555","text":"#abstract base class\nfrom abc import ABC\nfrom django.db import models\nfrom django.db.models.deletion import CASCADE\n\nfrom django.contrib.auth.models import AbstractUser\n\n#Room for scaling?\nclass Teams(models.Model):\n\n team_name = models.CharField(\n verbose_name='Name of the team', \n blank=False, null=False,\n max_length=200,\n help_text='Provide a name for your team'\n )\n\n team_description = models.TextField(\n verbose_name='Describe the team', \n blank=False, null=False,\n help_text='A short blurb about the team'\n )\n\n def __str__(self):\n return self.team_name\n\n class Meta:\n pass\n\nclass User( AbstractUser ):\n '''\n Team members model \n '''\n \n class Skill_level(ABC):\n '''\n An abstract class that helps use numberical skill level values in a more readable way.\n The vaiables in this class may be used in queries instead of numberical values\n '''\n JUNIOR = 1\n INTERMEDIATE = 2\n SENIOR = 3 \n\n #This tuple defines the choices available for the skill level field, which is a numerical value\n skillLevelChoices = (\n\n (Skill_level.JUNIOR, \"Junior\"),\n (Skill_level.INTERMEDIATE, \"Intermediate\"),\n (Skill_level.SENIOR, \"Senior\"),\n )\n\n #====== FIELDS ==========================\n\n USERNAME_FIELD = 'email'\n EMAIL_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name']\n\n email = models.EmailField(verbose_name='email address', unique=True)\n username = models.CharField(max_length=1, help_text='Not used', default='', unique=False, null=True, blank=True)\n\n\n skill_level = models.PositiveIntegerField(\n choices=skillLevelChoices, \n default=Skill_level.JUNIOR\n )\n\n #Room for scaling?\n # team = models.ForeignKey(\n # Teams,\n # verbose_name=\"Name of team\",\n # help_text=\"Which team does this member belong to?\",\n # blank=False, \n # null=False,\n # on_delete=CASCADE\n # )\n\n #===== END OF FIELDS ====================\n\n def __str__(self):\n return __class__.__name__\n\n class Meta:\n app_label = 'teamaker'\n ordering = []\n verbose_name = \"Team member\"\n verbose_name_plural = \"Team members\"","sub_path":"backend/teamaker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"582521472","text":"import socket\nimport thread\nimport time\n\ndef handle(s):\n print(repr(s.recv(4096)))\n sock.send(r'''HTTP/1.0 200 OK\n Content-Type: text/plain\n\n Hello, world!\n\n ''')\n # s.send('''\n # HTTP/1.1 101 Web Socket Protocol Handshake\\r\n # Upgrade: WebSocket\\r\n # Connection: Upgrade\\r\n # WebSocket-Origin: http://localhost:8888\\r\n # WebSocket-Location: ws://localhost:9876/\\r\n # WebSocket-Protocol: sample\n # '''.strip() + '\\r\\n\\r\\n')\n time.sleep(1)\n s.send('hello')\n time.sleep(1)\n s.send('world')\n s.close()\n\ns = socket.socket()\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('', 9876));\ns.listen(1);\nwhile 1:\n t,_ = s.accept();\n thread.start_new_thread(handle,(t,))\n # threading.Thread(target = handle, args = (t,)).start()\n\nt.close()\n","sub_path":"server/server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324506940","text":"'''\n# Задание-1:\n# Доработайте реализацию программы из примера examples/5_with_args.py,\n# добавив реализацию следующих команд (переданных в качестве аргументов):\n# cp - создает копию указанного файла\n# rm - удаляет указанный файл (запросить подтверждение операции)\n# cd - меняет текущую директорию на указанную\n# ls - отображение полного пути текущей директории\n# путь считать абсолютным (full_path) -\n# в Linux начинается с /, в Windows с имени диска,\n# все остальные пути считать относительными.\n\n# Важно! Все операции должны выполняться в той директории, в который вы находитесь.\n# Исходной директорией считать ту, в которой был запущен скрипт.\n\n# P.S. По возможности, сделайте кросс-платформенную реализацию.\n'''\n\nimport os\nfrom os.path import splitext as split\nimport sys\nfrom shutil import copyfile as copy\n\n\nprint('sys.argv = ', sys.argv)\n\n\ndef print_help():\n print(\"help - получение справки\")\n print(\"cp - создает копию указанного файла\")\n print(\"rm - удаляет указанный файл (запросить подтверждение операции)\")\n print(\"cd - меняет текущую директорию на указанную\")\n print(\"ls - отображение полного пути текущей директории\")\n\ndef cp_def():\n print(\"cp - создает копию указанного файла\")\n try:\n copy(dir_name, split(dir_name)[0] + \"(copy)\" + split(dir_name)[1])\n print(\"Создана копия файла {} под именем {}\".format(dir_name, split(dir_name)[0] + \"(copy)\" + split(dir_name)[1]))\n except FileNotFoundError:\n print(\"Файл с таким именем не найден\")\n except TypeError:\n print(\"Не введено имя файла\")\n\ndef rm_def():\n print(\"rm - удаляет указанный файл\")\n q = input(\"Вы уверены, что хотите удалить {}? y/n:\".format(dir_name))\n while q != \"y\" and q != \"n\":\n print(\"Введено неверное значение.\")\n q = input(\"Вы уверены, что хотите удалить {}? y/n:\".format(dir_name))\n if q == \"y\":\n try:\n os.remove(dir_name)\n print(\"Файл {} удален\".format(dir_name))\n except FileNotFoundError:\n print(\"Файл с таким именем не найден\")\n else:\n print(\"Операция отменена пользователем\")\n return\n\ndef cd_def():\n print(\"cd - меняет текущую директорию на указанную\")\n try:\n os.chdir(dir_name)\n print(\"Текущей директорией стала {}:\".format(dir_name))\n print(\"os.getcwd() =\", os.getcwd())\n except FileNotFoundError:\n q = input(\"Директории {} не существует. Создать? y/n:\".format(dir_name))\n while q != \"y\" and q != \"n\":\n print(\"Введено неверное значение.\")\n q = input(\"Создать директорию {}? y/n:\".format(dir_name))\n if q == \"y\":\n os.mkdir(dir_name)\n print(\"Создана директория\", dir_name)\n os.chdir(dir_name)\n print(\"Текущая директория (os.getcwd()):\", os.getcwd())\n else:\n print(\"Операция отменена пользователем\")\n return\n\ndef ls_def():\n print(\"ls - отображение полного пути текущей директории\")\n print(\"Текущая директория:\", os.getcwd())\n\ndo = {\n \"help\": print_help,\n \"cp\": cp_def,\n \"rm\": rm_def,\n \"cd\": cd_def,\n \"ls\": ls_def,\n}\n\ntry:\n dir_name = sys.argv[2]\nexcept IndexError:\n dir_name = None\n\ntry:\n key = sys.argv[1]\nexcept IndexError:\n key = None\n\n\nif key:\n if do.get(key):\n do[key]()\n else:\n print(\"Задан неверный ключ\")\n print(\"Укажите ключ help для получения справки\")","sub_path":"les5-hard1.py","file_name":"les5-hard1.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"151498887","text":"#!/usr/bin/python\n\ndef primeFactors(x):\n\n # define index to start at 2 to not get infinite loop in while loop\n i = 2\n # create empty list\n factors = list()\n\n # iterate index to x\n while i <= x:\n # while i is a factor of x\n while x % i == 0:\n # add it to the list\n factors.append(i)\n # and perform division\n x /= i\n # interate i\n i += 1\n # once complete return\n return factors\n","sub_path":"primefactors.py","file_name":"primefactors.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"24318288","text":"\n# This script creates the input files for a set of options\n\nfrom string import Template\nfrom shutil import copyfile\nimport numpy as np\nimport sys\n\nif len(sys.argv)<5:\n print(\"Usage: python {} tag selection_data_tag selection_random_tag apply_dataselection_to_random\")\n sys.exit(0)\n\nprint(\"tag: {}\".format(sys.argv[1]))\nprint(\"selection_data_tag: {}\".format(sys.argv[2]))\nprint(\"selection_random_tag: {}\".format(sys.argv[3]))\nprint(\"apply_dataselection_to_random: {}\".format(sys.argv[4]))\n\nf=open('Templates/input_22pin_template.py',mode='r')\nmyinput = Template( f.read() )\nf.close\n\nif sys.argv[2]=='None':\n mySDT=None\nelse:\n mySDT=\"'\"+sys.argv[2]+\"'\"\n\nif sys.argv[3]=='None':\n mySRT=None\nelse:\n mySRT=\"'\"+sys.argv[3]+\"'\"\n\nf=open('input_22pin{}.py'.format(sys.argv[1]),mode='w')\nf.write(myinput.substitute(SDT = mySDT, SRT = mySRT, ADTR = sys.argv[4]))\nf.close()\n\nprint(\"Written file input_22pin{}.py\".format(sys.argv[1]))\n","sub_path":"Pipeline/Example/createInput_22pin.py","file_name":"createInput_22pin.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209547490","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport datetime\nimport hamster.client\nimport reports\nimport argparse\nimport pdfkit\nimport gettext\ngettext.install('brainz', '../datas/translations/')\n\n# custom settings:\n\nreportTitle = \"My Activities Report\"\nactivityFilter = \"unfiled\"\n\ndef valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\").date()\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\n# find dates:\n\ntoday = datetime.date.today()\nfirst = today.replace(day=1)\npreviousLast = first - datetime.timedelta(days=1)\npreviousFirst = previousLast.replace(day=1)\n\n# assign arguments:\n\nparser = argparse.ArgumentParser(description=\"export the hamster database to pdf\")\nparser.add_argument(\"--thismonth\", action=\"store_true\", help=\"export this month's records\")\nparser.add_argument(\"--lastmonth\", action=\"store_true\", help=\"export last month's records\")\nparser.add_argument(\"-s\", dest=\"startDate\", default=today, help=\"start date (default: today)\", type=valid_date)\nparser.add_argument(\"-e\", dest=\"endDate\", default=today, help=\"end date (default: today)\", type=valid_date)\nparser.add_argument(\"-o\", dest=\"reportFile\", default=\"report.pdf\", help=\"output file (default: report.pdf)\")\n\n# parse arguments:\n\nargs = parser.parse_args()\n\nif args.thismonth:\n\targs.startDate = first\n\targs.endDate = today\n\nif args.lastmonth:\n\targs.startDate = previousFirst\n\targs.endDate = previousLast\n\n# prepare filenames:\n\nhtmlFilename = os.path.splitext(args.reportFile)[0]+\".html\"\npdfFilename = os.path.splitext(args.reportFile)[0]+\".pdf\"\n\nstorage = hamster.client.Storage()\nfacts = storage.get_facts(args.startDate, args.endDate)\n\n# generate report\n\nreports.simple(facts, args.startDate, args.endDate, htmlFilename)\n\n# convert .html to .pdf file:\n\npdfkit.from_file(htmlFilename, pdfFilename)\n","sub_path":"hamster2pdf.py","file_name":"hamster2pdf.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638181654","text":"############################################################\n# -*- coding: utf-8 -*-\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n# Python v3.5\n#\n# Michael Würtenberger\n# (c) 2016, 2017\n#\n# Licence APL2.0\n#\n############################################################\nimport datetime\nimport json\nimport logging\nimport logging.handlers\n# numerics\nimport math\nimport os\nimport platform\nimport sys\n\nimport numpy\n\nif platform.system() == 'Windows':\n # application handling\n from winreg import *\n# commands to threads\nfrom queue import Queue\n# import for the PyQt5 Framework\nimport PyQt5\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n# matplotlib\nfrom matplotlib import use\nuse('Qt5Agg')\nfrom matplotlib import pyplot as plt\nfrom matplotlib import figure as figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n# import the UI part, which is done via QT Designer and exported\nfrom baseclasses import widget\nfrom widgets import modelplotWindow\nfrom widgets import imageWindow\nfrom widgets import analyseWindow\nfrom gui import wizzard_main_ui\n\n# modeling\nfrom modeling import modelThread\n# import mount functions classes\nfrom mount import mountThread\nfrom relays import relays\nfrom remote import remoteThread\nfrom dome import ascomDomeThread\nfrom indi import indi_client\nfrom environment import ascomEnvirThread\n\nif platform.system() == 'Windows':\n from automation import uploadThread\nfrom wakeonlan import wol\n\n\nclass ShowModel(FigureCanvas):\n\n def __init__(self, parent=None):\n self.fig = figure.Figure(dpi=75, facecolor=(25/256, 25/256, 25/256))\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n FigureCanvas.updateGeometry(self)\n\n\nclass MountWizzardApp(widget.MwWidget):\n logger = logging.getLogger(__name__)\n\n def __init__(self):\n super(MountWizzardApp, self).__init__()\n self.setObjectName(\"Main\")\n # setting up communication queues for inter thread communication\n self.mountCommandQueue = Queue()\n self.modelLogQueue = Queue()\n self.modelCommandQueue = Queue()\n self.messageQueue = Queue()\n self.imageQueue = Queue()\n self.INDISendCommandQueue = Queue()\n self.INDIDataQueue = Queue()\n # loading config data\n self.config = self.loadConfigData()\n # initializing the gui\n self.ui = wizzard_main_ui.Ui_MainWindow()\n self.ui.setupUi(self)\n # special setups for gui including box for matplotlib. margins to 0\n self.initUI()\n self.setWindowTitle('MountWizzard ' + BUILD_NO)\n self.ui.le_mwWorkingDir.setText(os.getcwd())\n helper = QVBoxLayout(self.ui.model)\n helper.setContentsMargins(0, 0, 0, 0)\n self.modelWidget = ShowModel(self.ui.model)\n # noinspection PyArgumentList\n helper.addWidget(self.modelWidget)\n # instantiating all subclasses and connecting thread signals\n self.relays = relays.Relays(self)\n self.mount = mountThread.Mount(self)\n self.mount.setObjectName(\"Mount\")\n self.mount.signalMountConnected.connect(self.setMountStatus)\n self.INDIworker = indi_client.INDIClient(self)\n self.INDIthread = QThread()\n self.INDIthread.setObjectName(\"INDI\")\n self.INDIworker.moveToThread(self.INDIthread)\n # noinspection PyUnresolvedReferences\n self.INDIthread.started.connect(self.INDIworker.run)\n self.INDIworker.status.connect(self.setINDIStatus)\n # threading for ascom environment data\n if platform.system() == 'Windows':\n self.workerAscomEnvironment = ascomEnvirThread.AscomEnvironment(self)\n self.threadAscomEnvironment = PyQt5.QtCore.QThread()\n self.threadAscomEnvironment.setObjectName(\"Environ\")\n self.workerAscomEnvironment.moveToThread(self.threadAscomEnvironment)\n # noinspection PyUnresolvedReferences\n self.threadAscomEnvironment.started.connect(self.workerAscomEnvironment.run)\n self.workerAscomEnvironment.finished.connect(self.workerAscomEnvironmentStop)\n self.workerAscomEnvironment.signalAscomEnvironmentConnected.connect(self.setEnvironmentStatus)\n self.threadAscomEnvironment.start()\n # threading for ascom dome data\n if platform.system() == 'Windows':\n self.workerAscomDome = ascomDomeThread.AscomDome(self)\n self.threadAscomDome = PyQt5.QtCore.QThread()\n self.threadAscomDome.setObjectName(\"Dome\")\n self.workerAscomDome.moveToThread(self.threadAscomDome)\n # noinspection PyUnresolvedReferences\n self.threadAscomDome.started.connect(self.workerAscomDome.run)\n self.workerAscomDome.finished.connect(self.workerAscomDomeStop)\n self.workerAscomDome.signalAscomDomeConnected.connect(self.setDomeStatus)\n self.threadAscomDome.start()\n # threading for remote shutdown\n self.workerRemote = remoteThread.Remote(self)\n self.threadRemote = PyQt5.QtCore.QThread()\n self.threadRemote.setObjectName(\"Remote\")\n self.workerRemote.moveToThread(self.threadRemote)\n # noinspection PyUnresolvedReferences\n self.threadRemote.started.connect(self.workerRemote.run)\n self.workerRemote.finished.connect(self.workerRemoteStop)\n # thread start will be done when enabled\n # self.threadRemote.start()\n self.workerRemote.signalRemoteShutdown.connect(self.saveConfigQuit)\n # threading for updater automation\n if platform.system() == 'Windows':\n self.workerUpload = uploadThread.UpdaterAuto(self)\n self.threadUpload = PyQt5.QtCore.QThread()\n self.threadUpload.setObjectName(\"Upload\")\n self.workerUpload.moveToThread(self.threadUpload)\n # noinspection PyUnresolvedReferences\n self.threadUpload.started.connect(self.workerUpload.run)\n self.workerUpload.finished.connect(self.workerUploadStop)\n self.threadUpload.start()\n self.workerModeling = modelThread.Modeling(self)\n self.threadModeling = PyQt5.QtCore.QThread()\n self.threadModeling.setObjectName(\"Model\")\n self.workerModeling.moveToThread(self.threadModeling)\n # noinspection PyUnresolvedReferences\n self.threadModeling.started.connect(self.workerModeling.run)\n self.workerModeling.finished.connect(self.workerModelingStop)\n self.workerModeling.signalModelConnected.connect(self.setCameraPlateStatus)\n # thread start will be done when enabled\n self.threadModeling.start()\n self.analyseWindow = analyseWindow.AnalyseWindow(self)\n self.modelWindow = modelplotWindow.ModelPlotWindow(self)\n self.imageWindow = imageWindow.ImagesWindow(self)\n # starting the threads\n self.mount.start()\n if platform.system() == 'Windows':\n self.checkASCOM()\n self.enableDisableRemoteAccess()\n self.enableDisableINDI()\n self.initConfig()\n self.mappingFunctions()\n self.checkPlatformDependableMenus()\n # print('main app', PyQt5.QtCore.QObject.thread(self), int(PyQt5.QtCore.QThread.currentThreadId()))\n # starting loop for cyclic data to gui from threads\n self.mainLoop()\n\n def workerAscomEnvironmentStop(self):\n self.threadAscomEnvironment.quit()\n self.threadAscomEnvironment.wait()\n\n def workerAscomEnvironmentSetup(self):\n # first stopping the thread for environment, than setting up, than starting the thread\n if self.workerAscomEnvironment.isRunning:\n self.workerAscomEnvironment.stop()\n self.workerAscomEnvironment.setupDriver()\n self.ui.le_ascomEnvironmentDriverName.setText(self.workerAscomEnvironment.driverName)\n self.threadAscomEnvironment.start()\n\n def setEnvironmentStatus(self, status):\n if status == 0:\n self.ui.btn_environmentConnected.setStyleSheet('QPushButton {background-color: gray;}')\n elif status == 1:\n self.ui.btn_environmentConnected.setStyleSheet('QPushButton {background-color: red;}')\n elif status == 2:\n self.ui.btn_environmentConnected.setStyleSheet('QPushButton {background-color: yellow;}')\n elif status == 3:\n self.ui.btn_environmentConnected.setStyleSheet('QPushButton {background-color: green;}')\n else:\n self.ui.btn_environmentConnected.setStyleSheet('QPushButton {background-color: black;}')\n\n def fillEnvironmentData(self):\n for valueName in self.workerAscomEnvironment.data:\n if valueName == 'DewPoint':\n self.ui.le_dewPoint.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'Temperature':\n self.ui.le_temperature.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'Humidity':\n self.ui.le_humidity.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'Pressure':\n self.ui.le_pressure.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'CloudCover':\n self.ui.le_cloudCover.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'RainRate':\n self.ui.le_rainRate.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'WindSpeed':\n self.ui.le_windSpeed.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'WindDirection':\n self.ui.le_windDirection.setText('{0:4.1f}'.format(self.workerAscomEnvironment.data[valueName]))\n elif valueName == 'SQR':\n self.ui.le_SQR.setText('{0:4.2f}'.format(self.workerAscomEnvironment.data[valueName]))\n self.modelWindow.ui.le_SQR.setText('{0:4.2f}'.format(self.workerAscomEnvironment.data[valueName]))\n\n def workerAscomDomeStop(self):\n self.threadAscomDome.quit()\n self.threadAscomDome.wait()\n\n def workerAscomDomeSetup(self):\n # first stopping the thread for environment, than setting up, than starting the thread\n if self.workerAscomDome.isRunning:\n self.workerAscomDome.stop()\n self.workerAscomDome.setupDriver()\n self.ui.le_ascomDomeDriverName.setText(self.workerAscomDome.driverName)\n self.threadAscomDome.start()\n\n def setDomeStatus(self, status):\n if status == 0:\n self.ui.btn_domeConnected.setStyleSheet('QPushButton {background-color: gray;}')\n elif status == 1:\n self.ui.btn_domeConnected.setStyleSheet('QPushButton {background-color: red;}')\n elif status == 2:\n self.ui.btn_domeConnected.setStyleSheet('QPushButton {background-color: yellow;}')\n elif status == 3:\n self.ui.btn_domeConnected.setStyleSheet('QPushButton {background-color: green;}')\n else:\n self.ui.btn_domeConnected.setStyleSheet('QPushButton {background-color: black;}')\n\n def workerRemoteStop(self):\n self.threadRemote.quit()\n self.threadRemote.wait()\n\n def workerUploadStop(self):\n self.threadUpload.quit()\n self.threadUpload.wait()\n\n def workerModelingStop(self):\n self.threadModeling.quit()\n self.threadModeling.wait()\n\n def enableDisableRemoteAccess(self):\n if self.ui.checkEnableRemoteAccess.isChecked():\n self.messageQueue.put('Remote Access enabled')\n self.threadRemote.start()\n else:\n self.messageQueue.put('Remote Access disabled')\n if self.workerRemote.isRunning:\n self.workerRemote.stop()\n\n # noinspection PyArgumentList\n def mappingFunctions(self):\n self.ui.btn_mountQuit.clicked.connect(self.saveConfigQuit)\n self.ui.btn_mountSave.clicked.connect(self.saveConfig)\n self.ui.btn_mountBoot.clicked.connect(self.mountBoot)\n self.ui.btn_mountShutdown.clicked.connect(self.mountShutdown)\n self.ui.btn_mountPark.clicked.connect(lambda: self.mountCommandQueue.put('hP'))\n self.ui.btn_mountUnpark.clicked.connect(lambda: self.mountCommandQueue.put('PO'))\n self.ui.btn_startTracking.clicked.connect(lambda: self.mountCommandQueue.put('AP'))\n self.ui.btn_stopTracking.clicked.connect(lambda: self.mountCommandQueue.put('RT9'))\n self.ui.btn_setTrackingLunar.clicked.connect(lambda: self.mountCommandQueue.put('RT0'))\n self.ui.btn_setTrackingSolar.clicked.connect(lambda: self.mountCommandQueue.put('RT1'))\n self.ui.btn_setTrackingSideral.clicked.connect(lambda: self.mountCommandQueue.put('RT2'))\n self.ui.btn_stop.clicked.connect(lambda: self.mountCommandQueue.put('STOP'))\n self.ui.btn_mountPos1.clicked.connect(self.mountPosition1)\n self.ui.btn_mountPos2.clicked.connect(self.mountPosition2)\n self.ui.btn_mountPos3.clicked.connect(self.mountPosition3)\n self.ui.btn_mountPos4.clicked.connect(self.mountPosition4)\n self.ui.btn_mountPos5.clicked.connect(self.mountPosition5)\n self.ui.btn_mountPos6.clicked.connect(self.mountPosition6)\n self.ui.le_parkPos1Text.textChanged.connect(lambda: self.ui.btn_mountPos1.setText(self.ui.le_parkPos1Text.text()))\n self.ui.le_parkPos2Text.textChanged.connect(lambda: self.ui.btn_mountPos2.setText(self.ui.le_parkPos2Text.text()))\n self.ui.le_parkPos3Text.textChanged.connect(lambda: self.ui.btn_mountPos3.setText(self.ui.le_parkPos3Text.text()))\n self.ui.le_parkPos4Text.textChanged.connect(lambda: self.ui.btn_mountPos4.setText(self.ui.le_parkPos4Text.text()))\n self.ui.le_parkPos5Text.textChanged.connect(lambda: self.ui.btn_mountPos5.setText(self.ui.le_parkPos5Text.text()))\n self.ui.le_parkPos6Text.textChanged.connect(lambda: self.ui.btn_mountPos6.setText(self.ui.le_parkPos6Text.text()))\n self.ui.btn_setHorizonLimitHigh.clicked.connect(self.setHorizonLimitHigh)\n self.ui.btn_setHorizonLimitLow.clicked.connect(self.setHorizonLimitLow)\n self.ui.btn_setSlewRate.clicked.connect(self.setSlewRate)\n self.ui.btn_setDualTracking.clicked.connect(self.setDualTracking)\n self.ui.btn_setUnattendedFlip.clicked.connect(self.setUnattendedFlip)\n if platform.system() == 'Windows':\n self.ui.btn_setupMountDriver.clicked.connect(self.mount.MountAscom.setupDriver)\n self.ui.btn_setupDomeDriver.clicked.connect(self.workerAscomDomeSetup)\n self.ui.btn_setupAscomEnvironmentDriver.clicked.connect(self.workerAscomEnvironmentSetup)\n self.ui.btn_setRefractionParameters.clicked.connect(lambda: self.mountCommandQueue.put('SetRefractionParameter'))\n # setting lambda make the signal / slot a dedicated call. So if you press cancel without lambda, the thread affinity is to modeling,\n # because the signal is passed to the event queue of modeling and handled there. If you press cancel with lambda, the thread\n # affinity is in main, because you don't transfer it to the other event queue, but you leave it to gui event queue.\n self.ui.btn_cancelModel1.clicked.connect(lambda: self.workerModeling.cancelModeling())\n self.ui.btn_cancelModel2.clicked.connect(lambda: self.workerModeling.cancelModeling())\n self.ui.btn_cancelAnalyseModel.clicked.connect(lambda: self.workerModeling.cancelAnalyseModeling())\n self.ui.le_horizonPointsFileName.doubleClicked.connect(self.modelWindow.selectHorizonPointsFileName)\n self.ui.le_modelPointsFileName.doubleClicked.connect(self.selectModelPointsFileName)\n self.ui.checkUseMinimumHorizonLine.stateChanged.connect(self.modelWindow.selectHorizonPointsMode)\n self.ui.checkUseFileHorizonLine.stateChanged.connect(self.modelWindow.selectHorizonPointsMode)\n self.ui.altitudeMinimumHorizon.valueChanged.connect(self.modelWindow.selectHorizonPointsMode)\n self.ui.le_analyseFileName.doubleClicked.connect(self.selectAnalyseFileName)\n self.ui.btn_showActualModel.clicked.connect(lambda: self.mountCommandQueue.put('ShowAlignmentModel'))\n self.ui.checkPolarPlot.clicked.connect(self.setShowAlignmentModelMode)\n self.ui.btn_setRefractionCorrection.clicked.connect(self.setRefractionCorrection)\n self.ui.btn_runTargetRMSAlignment.clicked.connect(lambda: self.mountCommandQueue.put('RunTargetRMSAlignment'))\n self.ui.btn_deleteWorstPoint.clicked.connect(lambda: self.mountCommandQueue.put('DeleteWorstPoint'))\n self.ui.btn_flipMount.clicked.connect(lambda: self.mountCommandQueue.put('FLIP'))\n self.ui.btn_saveBackupModel.clicked.connect(lambda: self.mountCommandQueue.put('SaveBackupModel'))\n self.ui.btn_loadBackupModel.clicked.connect(lambda: self.mountCommandQueue.put('LoadBackupModel'))\n self.ui.btn_saveSimpleModel.clicked.connect(lambda: self.mountCommandQueue.put('SaveSimpleModel'))\n self.ui.btn_loadSimpleModel.clicked.connect(lambda: self.mountCommandQueue.put('LoadSimpleModel'))\n self.ui.btn_saveRefinementModel.clicked.connect(lambda: self.mountCommandQueue.put('SaveRefinementModel'))\n self.ui.btn_loadRefinementModel.clicked.connect(lambda: self.mountCommandQueue.put('LoadRefinementModel'))\n self.ui.btn_saveBaseModel.clicked.connect(lambda: self.mountCommandQueue.put('SaveBaseModel'))\n self.ui.btn_loadBaseModel.clicked.connect(lambda: self.mountCommandQueue.put('LoadBaseModel'))\n self.ui.btn_saveDSO1Model.clicked.connect(lambda: self.mountCommandQueue.put('SaveDSO1Model'))\n self.ui.btn_loadDSO1Model.clicked.connect(lambda: self.mountCommandQueue.put('LoadDSO1Model'))\n self.ui.btn_saveDSO2Model.clicked.connect(lambda: self.mountCommandQueue.put('SaveDSO2Model'))\n self.ui.btn_loadDSO2Model.clicked.connect(lambda: self.mountCommandQueue.put('LoadDSO2Model'))\n self.ui.btn_openAnalyseWindow.clicked.connect(self.analyseWindow.showAnalyseWindow)\n self.ui.btn_openModelingPlotWindow.clicked.connect(self.modelWindow.showModelingPlotWindow)\n self.ui.btn_openImageWindow.clicked.connect(self.imageWindow.showImageWindow)\n self.ui.checkEnableRemoteAccess.stateChanged.connect(self.enableDisableRemoteAccess)\n self.ui.checkEnableINDI.stateChanged.connect(self.enableDisableINDI)\n\n def enableDisableINDI(self):\n # todo: enable INDI Subsystem as soon as INDI is tested\n if self.ui.checkEnableINDI.isChecked():\n self.INDIthread.start()\n else:\n self.INDIworker.stop()\n self.INDIthread.quit()\n self.INDIthread.wait()\n\n def mountBoot(self):\n wol.send_magic_packet(self.ui.le_mountMAC.text().strip())\n self.messageQueue.put('Send WOL and boot mount !')\n self.logger.debug('Send WOL packet and boot Mount')\n\n def mountShutdown(self):\n self.mountCommandQueue.put('Shutdown')\n\n def showModelErrorPolar(self):\n if not self.workerModeling.modelData:\n return\n data = dict()\n for i in range(0, len(self.workerModeling.modelData)):\n for (keyData, valueData) in self.workerModeling.modelData[i].items():\n if keyData == 'azimuth':\n return\n if keyData in data:\n data[keyData].append(valueData)\n else:\n data[keyData] = [valueData]\n self.modelWidget.fig.clf()\n self.modelWidget.axes = self.modelWidget.fig.add_subplot(1, 1, 1, polar=True)\n self.modelWidget.axes.grid(True, color='gray')\n self.modelWidget.fig.subplots_adjust(left=0.025, right=0.975, bottom=0.075, top=0.925)\n self.modelWidget.axes.set_facecolor((32/256, 32/256, 32/256))\n self.modelWidget.axes.tick_params(axis='x', colors='white')\n self.modelWidget.axes.tick_params(axis='y', colors='white')\n self.modelWidget.axes.set_theta_zero_location('N')\n self.modelWidget.axes.set_theta_direction(-1)\n self.modelWidget.axes.set_yticks(range(0, 90, 10))\n yLabel = ['', '80', '', '60', '', '40', '', '20', '', '0']\n self.modelWidget.axes.set_yticklabels(yLabel, color='white')\n azimuth = numpy.asarray(data['Azimuth'])\n altitude = numpy.asarray(data['Altitude'])\n # self.modelWidget.axes.plot(azimuth / 180.0 * math.pi, 90 - altitude, color='black')\n cm = plt.cm.get_cmap('RdYlGn_r')\n colors = numpy.asarray(data['ModelError'])\n # noinspection PyTypeChecker\n scaleError = int(max(colors) / 4 + 1) * 4\n area = [125 if x >= max(colors) else 50 for x in data['ModelError']]\n theta = azimuth / 180.0 * math.pi\n r = 90 - altitude\n scatter = self.modelWidget.axes.scatter(theta, r, c=colors, vmin=0, vmax=scaleError, s=area, cmap=cm)\n scatter.set_alpha(0.75)\n colorbar = self.modelWidget.fig.colorbar(scatter)\n colorbar.set_label('Error [arcsec]', color='white')\n plt.setp(plt.getp(colorbar.ax.axes, 'yticklabels'), color='white')\n self.modelWidget.axes.set_rmax(90)\n self.modelWidget.axes.set_rmin(0)\n self.modelWidget.draw()\n\n def checkASCOM(self):\n appAvailable, appName, appInstallPath = self.checkRegistrationKeys('ASCOM Platform')\n if appAvailable:\n self.messageQueue.put('Found: {0}'.format(appName))\n self.logger.info('Name: {0}, Path: {1}'.format(appName, appInstallPath))\n else:\n self.logger.warning('Application ASCOM not found on computer')\n\n def checkRegistrationKeys(self, appSearchName):\n if platform.machine().endswith('64'):\n regPath = 'SOFTWARE\\\\Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall' # regpath for 64 bit windows\n else:\n regPath = 'SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall' # regpath for 32 bit windows\n appInstallPath = ''\n appInstalled = False\n appName = ''\n try:\n key = OpenKey(HKEY_LOCAL_MACHINE, regPath) # open registry\n for i in range(0, QueryInfoKey(key)[0]): # run through all registry application\n name = EnumKey(key, i) # get registry names of applications\n subkey = OpenKey(key, name) # open subkeys of applications\n for j in range(0, QueryInfoKey(subkey)[1]): # run through all subkeys\n values = EnumValue(subkey, j)\n if values[0] == 'DisplayName':\n appName = values[1]\n if values[0] == 'InstallLocation':\n appInstallPath = values[1]\n if appSearchName in appName:\n appInstalled = True\n CloseKey(subkey)\n break\n else:\n CloseKey(subkey) # closing the subkey for later usage\n CloseKey(key) # closing main key for later usage\n if not appInstalled:\n appInstallPath = ''\n appName = ''\n except Exception as e:\n self.logger.debug('Name: {0}, Path: {1}, error: {2}'.format(appName, appInstallPath, e))\n finally:\n return appInstalled, appName, appInstallPath\n\n def checkPlatformDependableMenus(self):\n if platform.system() != 'Windows':\n self.ui.settingsTabWidget.removeTab(5)\n self.ui.settingsTabWidget.removeTab(2)\n\n def initConfig(self):\n try:\n if 'ParkPosText1' in self.config:\n self.ui.le_parkPos1Text.setText(self.config['ParkPosText1'])\n self.ui.btn_mountPos1.setText(self.ui.le_parkPos1Text.text())\n if 'ParkPosAlt1' in self.config:\n self.ui.le_altParkPos1.setText(self.config['ParkPosAlt1'])\n if 'ParkPosAz1' in self.config:\n self.ui.le_azParkPos1.setText(self.config['ParkPosAz1'])\n if 'ParkPosText2' in self.config:\n self.ui.le_parkPos2Text.setText(self.config['ParkPosText2'])\n self.ui.btn_mountPos2.setText(self.ui.le_parkPos2Text.text())\n if 'ParkPosAlt2' in self.config:\n self.ui.le_altParkPos2.setText(self.config['ParkPosAlt2'])\n if 'ParkPosAz2' in self.config:\n self.ui.le_azParkPos2.setText(self.config['ParkPosAz2'])\n if 'ParkPosText3' in self.config:\n self.ui.le_parkPos3Text.setText(self.config['ParkPosText3'])\n self.ui.btn_mountPos3.setText(self.ui.le_parkPos3Text.text())\n if 'ParkPosAlt3' in self.config:\n self.ui.le_altParkPos3.setText(self.config['ParkPosAlt3'])\n if 'ParkPosAz3' in self.config:\n self.ui.le_azParkPos3.setText(self.config['ParkPosAz3'])\n if 'ParkPosText4' in self.config:\n self.ui.le_parkPos4Text.setText(self.config['ParkPosText4'])\n self.ui.btn_mountPos4.setText(self.ui.le_parkPos4Text.text())\n if 'ParkPosAlt4' in self.config:\n self.ui.le_altParkPos4.setText(self.config['ParkPosAlt4'])\n if 'ParkPosAz4' in self.config:\n self.ui.le_azParkPos4.setText(self.config['ParkPosAz4'])\n if 'ParkPosText5' in self.config:\n self.ui.le_parkPos5Text.setText(self.config['ParkPosText5'])\n self.ui.btn_mountPos5.setText(self.ui.le_parkPos5Text.text())\n if 'ParkPosAlt5' in self.config:\n self.ui.le_altParkPos5.setText(self.config['ParkPosAlt5'])\n if 'ParkPosAz5' in self.config:\n self.ui.le_azParkPos5.setText(self.config['ParkPosAz5'])\n if 'ParkPosText6' in self.config:\n self.ui.le_parkPos6Text.setText(self.config['ParkPosText6'])\n self.ui.btn_mountPos6.setText(self.ui.le_parkPos6Text.text())\n if 'ParkPosAlt6' in self.config:\n self.ui.le_altParkPos6.setText(self.config['ParkPosAlt6'])\n if 'ParkPosAz6' in self.config:\n self.ui.le_azParkPos6.setText(self.config['ParkPosAz6'])\n if 'ModelPointsFileName' in self.config:\n self.ui.le_modelPointsFileName.setText(self.config['ModelPointsFileName'])\n if 'CameraBin' in self.config:\n self.ui.cameraBin.setValue(self.config['CameraBin'])\n if 'CameraExposure' in self.config:\n self.ui.cameraExposure.setValue(self.config['CameraExposure'])\n if 'ISOSetting' in self.config:\n self.ui.isoSetting.setValue(self.config['ISOSetting'])\n if 'CheckFastDownload' in self.config:\n self.ui.checkFastDownload.setChecked(self.config['CheckFastDownload'])\n if 'SettlingTime' in self.config:\n self.ui.settlingTime.setValue(self.config['SettlingTime'])\n if 'CheckUseBlindSolve' in self.config:\n self.ui.checkUseBlindSolve.setChecked(self.config['CheckUseBlindSolve'])\n if 'TargetRMS' in self.config:\n self.ui.targetRMS.setValue(self.config['TargetRMS'])\n if 'PixelSize' in self.config:\n self.ui.pixelSize.setValue(self.config['PixelSize'])\n if 'FocalLength' in self.config:\n self.ui.focalLength.setValue(self.config['FocalLength'])\n if 'ScaleSubframe' in self.config:\n self.ui.scaleSubframe.setValue(self.config['ScaleSubframe'])\n if 'CheckDoSubframe' in self.config:\n self.ui.checkDoSubframe.setChecked(self.config['CheckDoSubframe'])\n if 'CheckKeepImages' in self.config:\n self.ui.checkKeepImages.setChecked(self.config['CheckKeepImages'])\n if 'CheckRunTrackingWidget' in self.config:\n self.modelWindow.ui.checkRunTrackingWidget.setChecked(self.config['CheckRunTrackingWidget'])\n if 'CheckClearModelFirst' in self.config:\n self.ui.checkClearModelFirst.setChecked(self.config['CheckClearModelFirst'])\n if 'CheckKeepRefinement' in self.config:\n self.ui.checkKeepRefinement.setChecked(self.config['CheckKeepRefinement'])\n if 'AltitudeBase' in self.config:\n self.ui.altitudeBase.setValue(self.config['AltitudeBase'])\n if 'AzimuthBase' in self.config:\n self.ui.azimuthBase.setValue(self.config['AzimuthBase'])\n if 'NumberGridPointsCol' in self.config:\n self.ui.numberGridPointsCol.setValue(self.config['NumberGridPointsCol'])\n if 'NumberGridPointsRow' in self.config:\n self.ui.numberGridPointsRow.setValue(self.config['NumberGridPointsRow'])\n if 'AltitudeMin' in self.config:\n self.ui.altitudeMin.setValue(self.config['AltitudeMin'])\n if 'AltitudeMax' in self.config:\n self.ui.altitudeMax.setValue(self.config['AltitudeMax'])\n if 'NumberPointsDSO' in self.config:\n self.ui.numberPointsDSO.setValue(self.config['NumberPointsDSO'])\n if 'NumberHoursDSO' in self.config:\n self.ui.numberHoursDSO.setValue(self.config['NumberHoursDSO'])\n if 'AnalyseFileName' in self.config:\n self.ui.le_analyseFileName.setText(self.config['AnalyseFileName'])\n if 'AltitudeTimeChange' in self.config:\n self.ui.altitudeTimeChange.setValue(self.config['AltitudeTimeChange'])\n if 'AzimuthTimeChange' in self.config:\n self.ui.azimuthTimeChange.setValue(self.config['AzimuthTimeChange'])\n if 'NumberRunsTimeChange' in self.config:\n self.ui.numberRunsTimeChange.setValue(self.config['NumberRunsTimeChange'])\n if 'DelayTimeTimeChange' in self.config:\n self.ui.delayTimeTimeChange.setValue(self.config['DelayTimeTimeChange'])\n if 'AltitudeHysterese1' in self.config:\n self.ui.altitudeHysterese1.setValue(self.config['AltitudeHysterese1'])\n if 'AltitudeHysterese2' in self.config:\n self.ui.altitudeHysterese2.setValue(self.config['AltitudeHysterese2'])\n if 'AzimuthHysterese1' in self.config:\n self.ui.azimuthHysterese1.setValue(self.config['AzimuthHysterese1'])\n if 'AzimuthHysterese2' in self.config:\n self.ui.azimuthHysterese2.setValue(self.config['AzimuthHysterese2'])\n if 'NumberRunsHysterese' in self.config:\n self.ui.numberRunsHysterese.setValue(self.config['NumberRunsHysterese'])\n if 'DelayTimeHysterese' in self.config:\n self.ui.delayTimeHysterese.setValue(self.config['DelayTimeHysterese'])\n if 'WindowPositionX' in self.config:\n self.move(self.config['WindowPositionX'], self.config['WindowPositionY'])\n except Exception as e:\n self.logger.error('Item in config.cfg not be initialize, error:{0}'.format(e))\n finally:\n pass\n\n def storeConfig(self):\n self.config['ParkPosText1'] = self.ui.le_parkPos1Text.text()\n self.config['ParkPosAlt1'] = self.ui.le_altParkPos1.text()\n self.config['ParkPosAz1'] = self.ui.le_azParkPos1.text()\n self.config['ParkPosText2'] = self.ui.le_parkPos2Text.text()\n self.config['ParkPosAlt2'] = self.ui.le_altParkPos2.text()\n self.config['ParkPosAz2'] = self.ui.le_azParkPos2.text()\n self.config['ParkPosText3'] = self.ui.le_parkPos3Text.text()\n self.config['ParkPosAlt3'] = self.ui.le_altParkPos3.text()\n self.config['ParkPosAz3'] = self.ui.le_azParkPos3.text()\n self.config['ParkPosText4'] = self.ui.le_parkPos4Text.text()\n self.config['ParkPosAlt4'] = self.ui.le_altParkPos4.text()\n self.config['ParkPosAz4'] = self.ui.le_azParkPos4.text()\n self.config['ParkPosText5'] = self.ui.le_parkPos5Text.text()\n self.config['ParkPosAlt5'] = self.ui.le_altParkPos5.text()\n self.config['ParkPosAz5'] = self.ui.le_azParkPos5.text()\n self.config['ParkPosText6'] = self.ui.le_parkPos6Text.text()\n self.config['ParkPosAlt6'] = self.ui.le_altParkPos6.text()\n self.config['ParkPosAz6'] = self.ui.le_azParkPos6.text()\n self.config['ModelPointsFileName'] = self.ui.le_modelPointsFileName.text()\n self.config['CameraBin'] = self.ui.cameraBin.value()\n self.config['CameraExposure'] = self.ui.cameraExposure.value()\n self.config['CheckFastDownload'] = self.ui.checkFastDownload.isChecked()\n self.config['ISOSetting'] = self.ui.isoSetting.value()\n self.config['SettlingTime'] = self.ui.settlingTime.value()\n self.config['CheckUseBlindSolve'] = self.ui.checkUseBlindSolve.isChecked()\n self.config['TargetRMS'] = self.ui.targetRMS.value()\n self.config['PixelSize'] = self.ui.pixelSize.value()\n self.config['FocalLength'] = self.ui.focalLength.value()\n self.config['ScaleSubframe'] = self.ui.scaleSubframe.value()\n self.config['CheckDoSubframe'] = self.ui.checkDoSubframe.isChecked()\n self.config['CheckKeepImages'] = self.ui.checkKeepImages.isChecked()\n self.config['CheckRunTrackingWidget'] = self.modelWindow.ui.checkRunTrackingWidget.isChecked()\n self.config['AltitudeBase'] = self.ui.altitudeBase.value()\n self.config['AzimuthBase'] = self.ui.azimuthBase.value()\n self.config['NumberGridPointsRow'] = self.ui.numberGridPointsRow.value()\n self.config['NumberGridPointsCol'] = self.ui.numberGridPointsCol.value()\n self.config['AltitudeMin'] = self.ui.altitudeMin.value()\n self.config['AltitudeMax'] = self.ui.altitudeMax.value()\n self.config['NumberPointsDSO'] = self.ui.numberPointsDSO.value()\n self.config['NumberHoursDSO'] = self.ui.numberHoursDSO.value()\n self.config['WindowPositionX'] = self.pos().x()\n self.config['WindowPositionY'] = self.pos().y()\n self.config['AnalyseFileName'] = self.ui.le_analyseFileName.text()\n self.config['AltitudeTimeChange'] = self.ui.altitudeTimeChange.value()\n self.config['AzimuthTimeChange'] = self.ui.azimuthTimeChange.value()\n self.config['NumberRunsTimeChange'] = self.ui.numberRunsTimeChange.value()\n self.config['DelayTimeTimeChange'] = self.ui.delayTimeTimeChange.value()\n self.config['AltitudeHysterese1'] = self.ui.altitudeHysterese1.value()\n self.config['AltitudeHysterese2'] = self.ui.altitudeHysterese2.value()\n self.config['AzimuthHysterese1'] = self.ui.azimuthHysterese1.value()\n self.config['AzimuthHysterese2'] = self.ui.azimuthHysterese2.value()\n self.config['NumberRunsHysterese'] = self.ui.numberRunsHysterese.value()\n self.config['DelayTimeHysterese'] = self.ui.delayTimeHysterese.value()\n self.config['CheckClearModelFirst'] = self.ui.checkClearModelFirst.isChecked()\n self.config['CheckKeepRefinement'] = self.ui.checkKeepRefinement.isChecked()\n\n def loadConfigData(self):\n try:\n with open('config/config.cfg', 'r') as data_file:\n return json.load(data_file)\n except Exception as e:\n self.messageQueue.put('Config.cfg could not be loaded !')\n self.logger.error('Item in config.cfg not loaded error:{0}'.format(e))\n return {}\n\n def saveConfigData(self):\n self.storeConfig()\n self.mount.storeConfig()\n self.workerModeling.storeConfig()\n if platform.system() == 'Windows':\n self.workerAscomEnvironment.storeConfig()\n self.workerAscomDome.storeConfig()\n self.workerUpload.storeConfig()\n self.modelWindow.storeConfig()\n self.imageWindow.storeConfig()\n self.analyseWindow.storeConfig()\n self.relays.storeConfig()\n self.INDIworker.storeConfig()\n try:\n if not os.path.isdir(os.getcwd() + '/config'): # if config dir doesn't exist, make it\n os.makedirs(os.getcwd() + '/config') # if path doesn't exist, generate is\n with open('config/config.cfg', 'w') as outfile:\n json.dump(self.config, outfile)\n outfile.close()\n except Exception as e:\n self.messageQueue.put('Config.cfg could not be saved !')\n self.logger.error('Item in config.cfg not saved error {0}'.format(e))\n return\n self.mount.saveActualModel() # save current loaded modeling from mount\n\n def saveConfigQuit(self):\n self.saveConfigData()\n # noinspection PyArgumentList\n QCoreApplication.instance().quit()\n\n def saveConfig(self):\n self.saveConfigData()\n self.messageQueue.put('Configuration saved.')\n\n def selectModelPointsFileName(self):\n dlg = QFileDialog()\n dlg.setViewMode(QFileDialog.List)\n dlg.setNameFilter(\"Text files (*.txt)\")\n dlg.setFileMode(QFileDialog.ExistingFile)\n # noinspection PyArgumentList\n a = dlg.getOpenFileName(self, 'Open file', os.getcwd()+'/config', 'Text files (*.txt)')\n if a[0] != '':\n self.ui.le_modelPointsFileName.setText(os.path.basename(a[0]))\n else:\n self.logger.warning('no file selected')\n\n def selectAnalyseFileName(self):\n dlg = QFileDialog()\n dlg.setViewMode(QFileDialog.List)\n dlg.setNameFilter(\"Data Files (*.dat)\")\n dlg.setFileMode(QFileDialog.AnyFile)\n # noinspection PyArgumentList\n a = dlg.getOpenFileName(self, 'Open file', os.getcwd()+'/analysedata', 'Data Files (*.dat)')\n if a[0] != '':\n self.ui.le_analyseFileName.setText(os.path.basename(a[0]))\n else:\n self.logger.warning('no file selected')\n\n def setHorizonLimitHigh(self):\n _value = int(self.ui.le_horizonLimitHigh.text())\n if _value < 0:\n _value = 0\n elif _value > 90:\n _value = 90\n self.mountCommandQueue.put('Sh+{0:02d}'.format(_value))\n\n def setHorizonLimitLow(self):\n _value = int(self.ui.le_horizonLimitLow.text())\n if _value < 0:\n _value = 0\n elif _value > 90:\n _value = 90\n self.mountCommandQueue.put('So+{0:02d}'.format(_value))\n\n def setDualTracking(self):\n _value = self.ui.le_telescopeDualTrack.text()\n if _value == 'ON':\n _value = 0\n self.ui.le_telescopeDualTrack.setText('OFF')\n else:\n _value = 1\n self.ui.le_telescopeDualTrack.setText('ON')\n self.mountCommandQueue.put('Sdat{0:01d}'.format(_value))\n\n def setUnattendedFlip(self):\n _value = self.ui.le_telescopeUnattendedFlip.text()\n if _value == 'ON':\n _value = 0\n self.ui.le_telescopeUnattendedFlip.setText('OFF')\n else:\n _value = 1\n self.ui.le_telescopeUnattendedFlip.setText('ON')\n self.mountCommandQueue.put('Suaf{0: 01d}'.format(_value))\n\n def setSlewRate(self):\n _value = int(self.ui.le_slewRate.text())\n if _value < 1:\n _value = 1\n elif _value > 15:\n _value = 15\n self.mountCommandQueue.put('Sw{0:02d}'.format(_value))\n\n def setRefractionCorrection(self):\n _value = self.ui.le_refractionStatus.text()\n if _value == 'ON':\n _value = 0\n self.ui.le_refractionStatus.setText('OFF')\n else:\n _value = 1\n self.ui.le_refractionStatus.setText('ON')\n self.mountCommandQueue.put('SREF{0: 01d}'.format(_value))\n\n def mountPosition1(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos1.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos1.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n def mountPosition2(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos2.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos2.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n def mountPosition3(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos3.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos3.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n def mountPosition4(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos4.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos4.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n def mountPosition5(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos5.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos5.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n def mountPosition6(self):\n self.mountCommandQueue.put('PO') # unpark first\n self.mountCommandQueue.put('Sz{0:03d}*00'.format(int(self.ui.le_azParkPos6.text()))) # set az\n self.mountCommandQueue.put('Sa+{0:02d}*00'.format(int(self.ui.le_altParkPos6.text()))) # set alt\n self.mountCommandQueue.put('MA') # start Slewing\n\n @QtCore.Slot(int)\n def setINDIStatus(self, status):\n if status == 0:\n self.ui.le_INDIStatus.setText('UnconnectedState')\n elif status == 1:\n self.ui.le_INDIStatus.setText('HostLookupState')\n elif status == 2:\n self.ui.le_INDIStatus.setText('ConnectingState')\n elif status == 3:\n self.ui.le_INDIStatus.setText('ConnectedState')\n elif status == 6:\n self.ui.le_INDIStatus.setText('ClosingState')\n else:\n self.ui.le_INDIStatus.setText('Error')\n\n @QtCore.Slot(dict)\n def fillINDIData(self, data):\n if data['Name'] == 'Telescope':\n self.ui.le_INDITelescope.setText(data['value'])\n elif data['Name'] == 'CCD':\n self.ui.le_INDICCD.setText(data['value'])\n elif data['Name'] == 'WEATHER':\n self.ui.le_INDIWeather.setText(data['value'])\n elif data['Name'] == 'CameraStatus':\n self.imageWindow.ui.le_INDICameraStatus.setText(data['value'])\n\n def setShowAlignmentModelMode(self):\n if self.ui.checkPolarPlot.isChecked():\n self.ui.alignErrorStars.setVisible(False)\n else:\n self.ui.alignErrorStars.setVisible(True)\n\n @QtCore.Slot(bool)\n def setMountStatus(self, status):\n if status:\n self.ui.btn_driverMountConnected.setStyleSheet('QPushButton {background-color: green;}')\n else:\n self.ui.btn_driverMountConnected.setStyleSheet('QPushButton {background-color: red;}')\n\n @QtCore.Slot(dict)\n def fillMountData(self):\n for valueName in self.mount.data:\n if valueName == 'Reply':\n pass\n if valueName == 'DualAxisTracking':\n if self.mount.data[valueName] == '1':\n self.ui.le_telescopeDualTrack.setText('ON')\n else:\n self.ui.le_telescopeDualTrack.setText('OFF')\n if valueName == 'NumberAlignmentStars':\n self.ui.le_alignNumberStars.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelRMSError':\n self.ui.le_alignErrorRMS.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelErrorPosAngle':\n self.ui.le_alignErrorPosAngle.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelPolarError':\n self.ui.le_alignErrorPolar.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelOrthoError':\n self.ui.le_alignErrorOrtho.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelTerms':\n self.ui.le_alignNumberTerms.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelKnobTurnAz':\n self.ui.le_alignKnobTurnAz.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelKnobTurnAlt':\n self.ui.le_alignKnobTurnAlt.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelErrorAz':\n self.ui.le_alignErrorAz.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelErrorAlt':\n self.ui.le_alignErrorAlt.setText(str(self.mount.data[valueName]))\n if valueName == 'ModelStarError':\n self.ui.alignErrorStars.setText(self.mount.data[valueName])\n if valueName == 'CurrentHorizonLimitLow':\n self.ui.le_horizonLimitLow.setText(str(self.mount.data[valueName]))\n if valueName == 'CurrentHorizonLimitHigh':\n self.ui.le_horizonLimitHigh.setText(str(self.mount.data[valueName]))\n if valueName == 'CurrentSiteLongitude':\n self.ui.le_siteLongitude.setText(str(self.mount.data[valueName]))\n if valueName == 'CurrentSiteLatitude':\n self.ui.le_siteLatitude.setText(str(self.mount.data[valueName]))\n if valueName == 'CurrentSiteElevation':\n self.ui.le_siteElevation.setText(str(self.mount.data[valueName]))\n if valueName == 'JulianDate':\n self.ui.le_JulianDate.setText(str(self.mount.data[valueName]))\n if valueName == 'LocalSiderealTime':\n self.ui.le_localSiderealTime.setText(str(self.mount.data[valueName]))\n if valueName == 'TelescopeTempDEC':\n self.ui.le_telescopeTempDECMotor.setText(str(self.mount.data[valueName]))\n if valueName == 'RefractionTemperature':\n self.ui.le_refractionTemperature.setText(str(self.mount.data[valueName]))\n if valueName == 'RefractionPressure':\n self.ui.le_refractionPressure.setText(str(self.mount.data[valueName]))\n if valueName == 'RefractionStatus':\n if self.mount.data[valueName] == '1':\n self.ui.le_refractionStatus.setText('ON')\n else:\n self.ui.le_refractionStatus.setText('OFF')\n if valueName == 'MountStatus':\n self.ui.le_mountStatus.setText(str(self.mount.statusReference[self.mount.data[valueName]]))\n if valueName == 'TelescopeDEC':\n self.ui.le_telescopeDEC.setText(self.mount.data[valueName])\n if valueName == 'TelescopeRA':\n self.ui.le_telescopeRA.setText(str(self.mount.data[valueName]))\n if valueName == 'TelescopeAltitude':\n self.ui.le_telescopeAltitude.setText(str(self.mount.data[valueName]))\n self.modelWindow.ui.le_telescopeAltitude.setText(str(self.mount.data[valueName]))\n if valueName == 'TelescopeAzimuth':\n self.ui.le_telescopeAzimut.setText(str(self.mount.data[valueName]))\n self.modelWindow.ui.le_telescopeAzimut.setText(str(self.mount.data[valueName]))\n if valueName == 'SlewRate':\n self.ui.le_slewRate.setText(str(self.mount.data[valueName]))\n if valueName == 'MeridianLimitTrack':\n self.ui.le_meridianLimitTrack.setText(str(self.mount.data[valueName]))\n if valueName == 'MeridianLimitSlew':\n self.ui.le_meridianLimitSlew.setText(str(self.mount.data[valueName]))\n if valueName == 'UnattendedFlip':\n if self.mount.data[valueName] == '1':\n self.ui.le_telescopeUnattendedFlip.setText('ON')\n else:\n self.ui.le_telescopeUnattendedFlip.setText('OFF')\n if valueName == 'TimeToFlip':\n self.ui.le_timeToFlip.setText(str(self.mount.data[valueName]))\n if valueName == 'TimeToMeridian':\n self.ui.le_timeToMeridian.setText(str(self.mount.data[valueName]))\n if valueName == 'FirmwareProductName':\n self.ui.le_firmwareProductName.setText(str(self.mount.data[valueName]))\n if valueName == 'FirmwareNumber':\n self.ui.le_firmwareNumber.setText(str(self.mount.data[valueName]))\n if valueName == 'FirmwareDate':\n self.ui.le_firmwareDate.setText(str(self.mount.data[valueName]))\n if valueName == 'FirmwareTime':\n self.ui.le_firmwareTime.setText(str(self.mount.data[valueName]))\n if valueName == 'HardwareVersion':\n self.ui.le_hardwareVersion.setText(str(self.mount.data[valueName]))\n if valueName == 'TelescopePierSide':\n self.ui.le_telescopePierSide.setText(str(self.mount.data[valueName]))\n if valueName == 'UTCDataValid':\n if self.mount.data[valueName] == 'V':\n self.ui.le_UTCDataValid.setText('VALID')\n elif self.mount.data[valueName] == 'E':\n self.ui.le_UTCDataValid.setText('EXPIRED')\n else:\n self.ui.le_UTCDataValid.setText('INVALID')\n if valueName == 'UTCDataExpirationDate':\n self.ui.le_UTCDataExpirationDate.setText(str(self.mount.data[valueName]))\n\n @QtCore.Slot(int)\n def setCameraPlateStatus(self, status):\n if status == 3:\n self.ui.btn_camPlateConnected.setStyleSheet('QPushButton {background-color: green;}')\n elif status == 2:\n self.ui.btn_camPlateConnected.setStyleSheet('QPushButton {background-color: yellow;}')\n elif status == 1:\n self.ui.btn_camPlateConnected.setStyleSheet('QPushButton {background-color: red;}')\n else:\n self.ui.btn_camPlateConnected.setStyleSheet('QPushButton {background-color: gray;}')\n\n def mainLoop(self):\n self.fillMountData()\n self.fillEnvironmentData()\n while not self.INDIDataQueue.empty():\n data = self.INDIDataQueue.get()\n self.fillINDIData(data)\n while not self.messageQueue.empty():\n text = self.messageQueue.get()\n self.ui.errorStatus.setText(self.ui.errorStatus.toPlainText() + text + '\\n')\n self.messageQueue.task_done()\n self.ui.errorStatus.moveCursor(QTextCursor.End)\n while not self.imageQueue.empty():\n filename = self.imageQueue.get()\n if self.imageWindow.showStatus:\n self.imageWindow.showFitsImage(filename)\n while not self.modelLogQueue.empty():\n text = self.modelLogQueue.get()\n if text == 'delete':\n self.modelWindow.ui.modellingLog.clear()\n elif text == 'backspace':\n for i in range(0, 6):\n self.modelWindow.ui.modellingLog.textCursor().deletePreviousChar()\n elif text.startswith('status'):\n self.modelWindow.ui.le_modelingStatus.setText(text[6:])\n elif text.startswith('percent'):\n self.modelWindow.ui.bar_modelingStatusPercent.setValue(int(1000 * float(text[7:])))\n elif text.startswith('timeleft'):\n self.modelWindow.ui.le_modelingStatusTime.setText(text[8:])\n elif text.startswith('#BW'):\n self.modelWindow.ui.modellingLog.setTextColor(self.COLOR_WHITE)\n self.modelWindow.ui.modellingLog.setFontWeight(QFont.Bold)\n self.modelWindow.ui.modellingLog.insertPlainText(text[3:])\n elif text.startswith('#BG'):\n self.modelWindow.ui.modellingLog.setTextColor(self.COLOR_GREEN)\n self.modelWindow.ui.modellingLog.setFontWeight(QFont.Bold)\n self.modelWindow.ui.modellingLog.insertPlainText(text[3:])\n elif text.startswith('#BY'):\n self.modelWindow.ui.modellingLog.setTextColor(self.COLOR_YELLOW)\n self.modelWindow.ui.modellingLog.setFontWeight(QFont.Bold)\n self.modelWindow.ui.modellingLog.insertPlainText(text[3:])\n else:\n self.modelWindow.ui.modellingLog.setTextColor(self.COLOR_ASTRO)\n self.modelWindow.ui.modellingLog.setFontWeight(QFont.Normal)\n self.modelWindow.ui.modellingLog.insertPlainText(text)\n self.modelWindow.ui.modellingLog.moveCursor(QTextCursor.End)\n self.modelLogQueue.task_done()\n # noinspection PyCallByClass,PyTypeChecker\n QTimer.singleShot(500, self.mainLoop)\n\n\nif __name__ == \"__main__\":\n import traceback\n import warnings\n\n def except_hook(typeException, valueException, tbackException): # manage unhandled exception here\n logging.error(traceback.format_exception(typeException, valueException, tbackException))\n sys.__excepthook__(typeException, valueException, tbackException) # then call the default handler\n\n BUILD_NO = '2.7.1 beta'\n\n warnings.filterwarnings(\"ignore\")\n name = 'mount.{0}.log'.format(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n handler = logging.handlers.RotatingFileHandler(name, backupCount=3)\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s [%(levelname)7s][%(filename)20s][%(lineno)5s][%(funcName)20s][%(threadName)10s] - %(message)s',\n handlers=[handler], datefmt='%Y-%m-%d %H:%M:%S')\n\n if not os.path.isdir(os.getcwd() + '/analysedata'):\n os.makedirs(os.getcwd() + '/analysedata')\n if not os.path.isdir(os.getcwd() + '/images'):\n os.makedirs(os.getcwd() + '/images')\n if not os.path.isdir(os.getcwd() + '/config'):\n os.makedirs(os.getcwd() + '/config')\n\n logging.info('-----------------------------------------')\n logging.info('MountWizzard v ' + BUILD_NO + ' started !')\n logging.info('-----------------------------------------')\n logging.info('Platform: ' + platform.system())\n logging.info('Release: ' + platform.release())\n logging.info('Version: ' + platform.version())\n logging.info('Machine: ' + platform.machine())\n\n logging.info('working directory: {0}'.format(os.getcwd()))\n if not os.access(os.getcwd(), os.W_OK):\n logging.error('no write access to workdir')\n if not os.access(os.getcwd() + '/images', os.W_OK):\n logging.error('no write access to /images')\n if not os.access(os.getcwd() + '/config', os.W_OK):\n logging.error('no write access to /config')\n if not os.access(os.getcwd() + '/analysedata', os.W_OK):\n logging.error('no write access to /analysedata')\n\n app = QApplication(sys.argv)\n\n sys.excepthook = except_hook\n # noinspection PyCallByClass,PyTypeChecker,PyArgumentList\n app.setStyle(QStyleFactory.create('Fusion'))\n app.setWindowIcon(QIcon('mw.ico'))\n\n mountApp = MountWizzardApp()\n if mountApp.modelWindow.showStatus:\n mountApp.modelWindow.redrawModelingWindow()\n mountApp.modelWindow.showModelingPlotWindow()\n if mountApp.imageWindow.showStatus:\n mountApp.imageWindow.showImageWindow()\n if mountApp.analyseWindow.showStatus:\n mountApp.analyseWindow.showAnalyseWindow()\n mountApp.show()\n sys.exit(app.exec_())\n","sub_path":"mountwizzard/mountwizzard.py","file_name":"mountwizzard.py","file_ext":"py","file_size_in_byte":58287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271958946","text":"import csv\nfrom datetime import datetime\n\nfrom dtk.generic.demographics import distribution_types\nfrom dtk.tools.demographics.Node import Node, nodeid_from_lat_lon\n\nfrom simtools.Utilities.General import init_logging\nlogger = init_logging('DemographicsGenerator')\n\nclass InvalidResolution(BaseException):\n pass\n\nclass DemographicsGenerator:\n \"\"\"\n Generates demographics file based on population input file.\n The population input file is csv with structure::\n\n node_label*, lat, lon, pop*\n\n *-ed columns are optional\n \"\"\"\n\n # mapping of requested arcsecond resolution -> demographic metadata arcsecond resolution.\n # All Hash values must be integers.\n CUSTOM_RESOLUTION = 'custom'\n DEFAULT_RESOLUTION = 30\n VALID_RESOLUTIONS = {\n 30: 30,\n 250: 250,\n CUSTOM_RESOLUTION: 30\n }\n\n def __init__(self, cb, nodes, demographics_type='static', res_in_arcsec=DEFAULT_RESOLUTION,\n update_demographics=None, default_pop=1000):\n \"\"\"\n Initialize the SpatialManager\n\n :param cb: config builder reference, updated after the demographics file is generated.\n :param demographics_type: could be 'static', 'growing' or a different type; currently only static is implemented in generate_nodes(self)\n :param res_in_arsec: sim grid resolution\n :param update_demographics: provide the user with a chance to update the demographics file before it's written via a user-defined function; (e.g. scale larval habitats based on initial population per node in the demographics file) see generate_demographics(self)\n \n :return:\n \"\"\"\n self.nodes = nodes\n\n self.cb = cb\n self.demographics_type = demographics_type\n self.set_resolution(res_in_arcsec)\n self.update_demographics = update_demographics\n\n # demographics data dictionary (working DTK demographics file when dumped as json)\n self.demographics = None\n self.default_pop = default_pop\n\n @staticmethod\n def arcsec_to_deg(arcsec):\n return arcsec / 3600.0\n\n @classmethod\n def from_file(cls, cb, population_input_file, demographics_type='static', res_in_arcsec=DEFAULT_RESOLUTION,\n update_demographics=None, default_pop=1000):\n nodes_list = list()\n with open(population_input_file, 'r') as pop_csv:\n reader = csv.DictReader(pop_csv)\n for row in reader:\n # Latitude\n if not 'lat' in row: raise ValueError('Column lat is required in input population file.')\n lat = float(row['lat'])\n\n # Longitude\n if not 'lon' in row: raise ValueError('Column lon is required in input population file.')\n lon = float(row['lon'])\n\n # Node label\n res_in_deg = cls.arcsec_to_deg(res_in_arcsec)\n node_label = row['node_label'] if 'node_label' in row else nodeid_from_lat_lon(lat, lon, res_in_deg)\n\n # Population\n pop = int(float(row['pop'])) if 'pop' in row else default_pop\n\n # Append the newly created node to the list\n nodes_list.append(Node(lat, lon, pop, node_label))\n\n return cls(cb, nodes_list, demographics_type, res_in_arcsec, update_demographics, default_pop)\n\n def set_demographics_type(self, demographics_type):\n self.demographics_type = demographics_type\n\n def set_update_demographics(self, update_demographics):\n self.update_demographics = update_demographics # callback function\n\n @classmethod\n def validate_res_in_arcsec(cls, res_in_arcsec):\n try:\n cls.VALID_RESOLUTIONS[res_in_arcsec]\n except KeyError:\n raise InvalidResolution(\"%s is not a valid arcsecond resolultion. Must be one of: %s\" %\n (res_in_arcsec, cls.VALID_RESOLUTIONS.keys()))\n\n def set_resolution(self, res_in_arcsec):\n \"\"\"\n The cannonical way to set arcsecond/degree resolutions on a DemographicsGenerator object. Verifies everything\n is set properly.\n\n :param res_in_arcsec: The requested resolution. e.g. 30, 250, 'custom'\n \n :return: No return value.\n \"\"\"\n self.validate_res_in_arcsec(res_in_arcsec)\n self.res_in_arcsec = self.VALID_RESOLUTIONS[res_in_arcsec]\n self.custom_resolution = True if res_in_arcsec == self.CUSTOM_RESOLUTION else False\n self.res_in_degrees = self.arcsec_to_deg(self.res_in_arcsec)\n logger.debug(\"Setting resolution to %s arcseconds (%s deg.) from selection: %s\" %\n (self.res_in_arcsec, self.res_in_degrees, res_in_arcsec))\n\n def generate_defaults(self):\n \"\"\"\n Generate the defaults section of the demographics file\n\n all of the below can be taken care of by a generic Demographics class\n (see note about refactor in dtk.generic.demographics)\n \"\"\"\n # Currently support only static population; after demographics related refactor this whole method will likely disappear anyway\n if self.demographics_type == 'static':\n self.cb.set_param(\"Birth_Rate_Dependence\", \"FIXED_BIRTH_RATE\")\n else:\n raise ValueError(\"Demographics type \" + str(self.demographics_type) + \" is not implemented!\")\n\n exponential_age_param = 0.0001068 # Corresponds to Kayin state age dist\n population_removal_rate = 23 # Based on live births & population in 2014 Kayin state census\n\n mod_mortality = {\n \"NumDistributionAxes\": 2,\n \"AxisNames\": [\"gender\", \"age\"],\n \"AxisUnits\": [\"male=0,female=1\", \"years\"],\n \"AxisScaleFactors\": [1, 365],\n \"NumPopulationGroups\": [2, 1],\n \"PopulationGroups\": [\n [0, 1],\n [0]\n ],\n \"ResultUnits\": \"annual deaths per 1000 individuals\",\n \"ResultScaleFactor\": 2.74e-06,\n \"ResultValues\": [\n [population_removal_rate],\n [population_removal_rate]\n ]\n }\n\n individual_attributes = {\n \"MortalityDistribution\": mod_mortality,\n \"AgeDistributionFlag\": distribution_types[\"EXPONENTIAL_DISTRIBUTION\"],\n \"AgeDistribution1\": exponential_age_param,\n \"RiskDistribution1\": 1,\n \"PrevalenceDistributionFlag\": 1,\n \"AgeDistribution2\": 0,\n \"PrevalenceDistribution1\": 0.13,\n \"PrevalenceDistribution2\": 0.15,\n \"RiskDistributionFlag\": 0,\n \"RiskDistribution2\": 0,\n \"MigrationHeterogeneityDistribution1\": 1,\n \"SusceptibilityDistributionFlag\": 0,\n \"MigrationHeterogeneityDistributionFlag\": 0,\n \"SusceptibilityDistribution1\": 1,\n \"MigrationHeterogeneityDistribution2\": 0,\n \"AgeDistributionFlag\": 3,\n \"SusceptibilityDistribution2\": 0\n }\n\n node_attributes = {\n \"Urban\": 0,\n \"AbovePoverty\": 0.5,\n \"Region\": 1,\n \"Seaport\": 0,\n \"Airport\": 0,\n \"Altitude\": 0\n }\n\n if self.default_pop:\n node_attributes.update({\"InitialPopulation\": self.default_pop})\n\n defaults = {\n 'IndividualAttributes': individual_attributes,\n 'NodeAttributes': node_attributes,\n }\n\n return defaults\n\n def generate_nodes(self):\n \"\"\"\n this function is currently replicated to a large extent in dtk.tools.demographics.node.nodes_for_DTK() but perhaps should not belong there\n it probably belongs to a generic Demographics class (also see one-liner note about refactor in dtk.generic.demographics)\n \"\"\"\n\n nodes = []\n for i, node in enumerate(self.nodes):\n # if res_in_degrees is custom assume node_ids are generated for a household-like setup and not based on lat/lon\n if self.custom_resolution:\n node_id = i + 1\n else:\n node_id = nodeid_from_lat_lon(float(node.lat), float(node.lon), self.res_in_degrees)\n node_attributes = node.to_dict()\n\n if self.demographics_type == 'static':\n # value correspond to a population removal rate of 45: 45/365\n birth_rate = (float(node.pop) / (1000 + 0.0)) * 0.12329\n node_attributes.update({'BirthRate': birth_rate})\n else:\n # perhaps similarly to the DTK we should have error logging modes and good generic types exception raising/handling\n # to avoid code redundancy\n print(self.demographics_type)\n raise ValueError(\"Demographics type \" + str(self.demographics_type) + \" is not implemented!\")\n\n nodes.append({'NodeID': node_id, 'NodeAttributes': node_attributes})\n\n return nodes\n\n def generate_metadata(self):\n \"\"\"\n generate demographics file metadata\n \"\"\"\n\n metadata = {\n \"Author\": \"idm\",\n \"Tool\": \"dtk-tools\",\n \"IdReference\": \"Gridded world grump%darcsec\" % self.res_in_arcsec,\n \"DateCreated\": str(datetime.now()),\n \"NodeCount\": len(self.nodes),\n \"Resolution\": int(self.res_in_arcsec)\n }\n\n return metadata\n\n def generate_demographics(self):\n \"\"\"\n return all demographics file components in a single dictionary; a valid DTK demographics file when dumped as json\n \"\"\"\n self.demographics = {'Nodes': self.generate_nodes(),\n 'Defaults': self.generate_defaults(),\n 'Metadata': self.generate_metadata()}\n\n if self.update_demographics:\n # update demographics before dict is written to file, via a user defined function and arguments\n # self.update_demographics is a partial object (see python docs functools.partial) and self.update_demographics.func references the user's function\n # the only requirement for the user defined function is that it needs to take a keyword argument demographics\n self.update_demographics(demographics=self.demographics)\n\n return self.demographics","sub_path":"dtk/tools/spatialworkflow/DemographicsGenerator.py","file_name":"DemographicsGenerator.py","file_ext":"py","file_size_in_byte":10276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359841401","text":"\"\"\"\n공유기 설치 https://www.acmicpc.net/problem/2110\npypy3 : 292ms\npython3 : 5412ms\n\"\"\"\n\n\ndef fn(param):\n count = 1\n gong = 0\n for i in range(1, n):\n if data[i] - data[gong] >= param:\n count += 1\n gong = i\n return count >= c\n\n\ndef search():\n lo = -1\n hi = data[-1] + 1\n while lo + 1 < hi:\n mid = (lo + hi) // 2 # mid값은 data의 집의 좌표를 의미하는게 아니다.\n if fn(mid):\n lo = mid\n else:\n hi = mid\n return lo\n\n\nn, c = map(int, input().split())\ndata = [int(input()) for _ in range(n)]\ndata.sort()\n\nprint(search())\n","sub_path":"8주차 이분탐색,그래프/공유기설치/하현준.py","file_name":"하현준.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"311468748","text":"# All imports here\nimport random\n\nclass QGenerator:\n '''\n A class that assists in auto-generating queries for the template generator\n '''\n\n # Number of queries that need to be generated\n num = 0\n\n def __init__(self, num):\n '''\n Parameterized constructor for taking the number of queries to be generated as input\n :param num: Number of queries to be generated\n '''\n self.num = num\n\n # Types of queries that can be generated for the choice of products\n Queries = {\n 0: \"SELECT\",\n 1: \"INSERT\",\n # 2: \"CREATE\",\n # 2: \"DROP\",\n # 3: \"UPDATE\",\n # 4: \"DELETE\",\n # 5: \"TRUNCATE\"\n }\n\n # List of Dummy Names that can be used with their\n Names = {\n 0: \"Pavan\",\n 1: \"Yogeesh\",\n 2: \"Alankar\",\n 3: \"Tim\",\n 4: \"Andy\",\n 5: \"Robert\",\n 6: \"Anna\",\n 7: \"Cindy\",\n 8: \"Nona\",\n 9: \"Vandy\",\n 10: \"Dilan\"\n }\n\n # A list of addresses a user can belong to\n UserAddress = {\n 0: \"California\",\n 1: \"New York\",\n 2: \"Arizona\",\n 3: \"Ohio\",\n 4: \"Washington\",\n 5: \"Texas\",\n 6: \"Philadelphia\",\n 7: \"New Jersey\",\n 8: \"Georgia\",\n 9: \"Florida\",\n 10: \"Vermont\",\n 11: \"Massachusetts\"\n }\n\n # A list of categories an user can choose from\n Category = {\n 0: \"Mobile Phone\",\n 1: \"Laptop\",\n 2: \"Computer Mouse\",\n 3: \"Keyboard\",\n 4: \"Kindle\",\n 5: \"Book\",\n 6: \"CD\",\n 7: \"Headphones\",\n 8: \"Bag\",\n 9: \"Watch\",\n 10: \"Shoe\"\n }\n\n # A user's id which can be used as a foreign key\n userid = 1000\n\n def generateQueries(self):\n '''\n A method that generates queries randomly.\n :return: None\n '''\n # increase the range to generate however many queries are required to be generated\n for id in range(self.num):\n # generates a random query\n query = int(random.uniform(0, 2)) # Changes to be made here\n\n if query == 0:\n print('SELECT * from products where category = \"' + self.Category[int(random.uniform(0, 10))] \\\n + '\" and id = ' + str(self.userid) + ';', end=\" \\n\")\n self.userid += 1\n elif query == 1:\n print('INSERT INTO products(id, name, price, category) VALUES(' + str(self.userid) \\\n + \", '\" + (self.Names[int(random.uniform(0, 10))] + str(id)) \\\n + \"', \" + str(round(random.uniform(0, 1000), 2)) + ', \\'' \\\n + ', \\'' + self.Category[int(random.uniform(0, 10))] + '\\');', end=\" \\n\")\n self.userid += 1\n elif query == 2:\n print('SELECT * from products where category = \"' + self.Category[int(random.uniform(0, 10))] + '\"', end=\" \")\n elif query == 3:\n print('SELECT * from products where category = \"' + self.Category[int(random.uniform(0, 10))] + '\"', end=\" \")\n elif query == 4:\n print('SELECT * from products where category = \"' + self.Category[int(random.uniform(0, 10))] + '\"', end=\" \")\n else:\n print('SELECT * from products where category = \"' + self.Category[int(random.uniform(0, 10))] + '\"', end=\" \")\n\ndef main():\n '''\n The main program\n :return: None\n '''\n while True:\n num = input('Please enter the number of queries that need to be generated: (Eg: 10)')\n if num.isdigit():\n break\n\n num = int(num)\n # Object of class QGenerator\n qGen = QGenerator(num)\n\n # Starts the random query generation\n qGen.generateQueries()\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/QGenerator.py","file_name":"QGenerator.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316183548","text":"#\n# Copyright (c) 2016 NORDUnet A/S\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or\n# without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# 3. Neither the name of the NORDUnet nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport os, binascii\nimport collections\nfrom time import time\nfrom eduid_common.session.session import SessionManager\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef manage(action):\n '''\n Decorator which causes a cookie to be set when a session method\n is called.\n\n :param action: Whether the session data has been changed or just accessed.\n When it has been changed, the call to session.commit()\n implies setting the ttl on the backend, so there is no need\n to set it explicitly.\n :type action: str ('accessed'|'changed')\n '''\n def outer(wrapped):\n def accessed(session, *arg, **kw):\n renew_backend = action == 'accessed'\n session.renew_ttl(renew_backend=renew_backend)\n return wrapped(session, *arg, **kw)\n accessed.__doc__ = wrapped.__doc__\n return accessed\n return outer\n\n\nclass SessionFactory(object):\n '''\n Session factory implementing the pyramid.interfaces.ISessionFactory\n interface.\n It uses the SessionManager defined in eduid_common.session.session\n to create sessions backed by redis.\n '''\n\n def __init__(self, settings):\n '''\n SessionFactory constructor.\n\n :param settings: the pyramid settings\n :type settings: dict\n '''\n cookie_max_age = int(settings.get('session.cookie_max_age'))\n # make sure that the data in redis outlives the session cookie\n session_ttl = 2 * cookie_max_age\n secret = settings.get('session.secret')\n self.manager = SessionManager(settings, ttl=session_ttl, secret=secret)\n\n def __call__(self, request):\n '''\n Create a session object for the given request.\n\n :param request: the request\n :type request: pyramid.request.Request\n\n :return: the session\n :rtype: Session\n '''\n raise NotImplementedError()\n\n\nclass Session(collections.MutableMapping):\n '''\n Session implementing the pyramid.interfaces.ISession interface.\n It uses the Session defined in eduid_common.session.session\n to store the session data in redis.\n '''\n\n def __init__(self, request, base_session, new=False):\n '''\n :param request: the request\n :type request: pyramid.request.Request\n :param base_session: The underlying session object\n :type base_session: eduid_common.session.session.Session\n :param new: whether the session is new or not.\n :type new: bool\n '''\n self.request = request\n self._session = base_session\n self._created = time()\n self._new = new\n self._ttl_reset = False\n\n @manage('accessed')\n def __getitem__(self, key, default=None):\n return self._session.__getitem__(key, default=None)\n\n @manage('changed')\n def __setitem__(self, key, value):\n self._session[key] = value\n self._session.commit()\n\n @manage('changed')\n def __delitem__(self, key):\n del self._session[key]\n self._session.commit()\n\n @manage('accessed')\n def __iter__(self):\n return self._session.__iter__()\n\n @manage('accessed')\n def __len__(self):\n return len(self._session)\n\n @manage('accessed')\n def __contains__(self, key):\n return self._session.__contains__(key)\n\n @property\n def created(self):\n '''\n See pyramid.interfaces.ISession\n '''\n return self._created\n\n @property\n def new(self):\n '''\n See pyramid.interfaces.ISession\n '''\n return self._new\n\n def invalidate(self):\n '''\n See pyramid.interfaces.ISession\n '''\n self._session.clear()\n name = self.request.registry.settings.get('session.key')\n domain = self.request.registry.settings.get('session.cookie_domain')\n path = self.request.registry.settings.get('session.cookie_path')\n\n def rm_cookie_callback(request, response):\n response.set_cookie(\n name=name,\n value=None,\n domain=domain,\n path=path,\n max_age=0\n )\n return True\n\n self.request.add_response_callback(rm_cookie_callback)\n\n def changed(self):\n '''\n See pyramid.interfaces.ISession\n '''\n self._session.commit()\n\n @manage('changed')\n def flash(self, msg, queue='', allow_duplicate=True):\n '''\n See pyramid.interfaces.ISession\n '''\n if not queue:\n queue = 'default'\n if 'flash_messages' not in self._session:\n self._session['flash_messages'] = {'default': []}\n if queue not in self._session['flash_messages']:\n self._session['flash_messages'][queue] = []\n if not allow_duplicate:\n if msg in self._session['flash_messages'][queue]:\n return\n self._session['flash_messages'][queue].append(msg)\n self._session.commit()\n\n @manage('changed')\n def pop_flash(self, queue=''):\n '''\n See pyramid.interfaces.ISession\n '''\n if not queue:\n queue = 'default'\n if 'flash_messages' not in self._session:\n self._session['flash_messages'] = {'default': []}\n if queue in self._session['flash_messages']:\n msgs = self._session['flash_messages'].pop(queue)\n self._session.commit()\n return msgs\n return []\n\n @manage('accessed')\n def peek_flash(self, queue=''):\n '''\n See pyramid.interfaces.ISession\n '''\n if not queue:\n queue = 'default'\n if 'flash_messages' not in self._session:\n self._session['flash_messages'] = {'default': []}\n return self._session['flash_messages'].get(queue, [])\n\n @manage('changed')\n def new_csrf_token(self):\n '''\n See pyramid.interfaces.ISession\n '''\n token = binascii.hexlify(os.urandom(20))\n self['_csrft_'] = token\n self._session.commit()\n return token\n\n @manage('accessed')\n def get_csrf_token(self):\n '''\n See pyramid.interfaces.ISession\n '''\n token = self.get('_csrft_', None)\n if token is None:\n token = self.new_csrf_token()\n return token\n\n def persist(self):\n '''\n Store the session data in the redis backend,\n and renew the ttl for it.\n '''\n self._session.commit()\n\n def renew_ttl(self, renew_backend):\n '''\n Reset the ttl for the session, both in the cookie and\n (if `renew_backend==True`) in the redis backend.\n\n :param renew_backend: whether to renew the ttl in the redis backend\n :type renew_backend: bool\n '''\n if not self._ttl_reset:\n self.set_cookie()\n if renew_backend:\n self._session.renew_ttl()\n self._ttl_reset = True\n\n def set_cookie(self):\n '''\n Set the session cookie with the token\n '''\n token = self._session.token\n settings = self.request.registry.settings\n session_name = settings.get('session.key')\n domain = settings.get('session.cookie_domain')\n path = settings.get('session.cookie_path')\n secure = settings.get('session.cookie_secure')\n httponly = settings.get('session.cookie_httponly')\n max_age = settings.get('session.cookie_max_age')\n\n def set_cookie_callback(request, response):\n response.set_cookie(\n name=session_name,\n value=token,\n domain=domain,\n path=path,\n secure=secure,\n httponly=httponly,\n max_age=max_age\n )\n return True\n\n self.request.add_response_callback(set_cookie_callback)\n\n def delete(self):\n '''\n alias for invalidate\n '''\n self.invalidate()\n","sub_path":"src/eduid_common/session/pyramid_session.py","file_name":"pyramid_session.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"411313139","text":"from tkinter import *\n\n\ndef create_text(App, key, fontsize, background_color, given_text):\n setattr(\n App,\n key + '_text',\n Label(App.master,\n bg=background_color,\n text=given_text,\n font=(\"Fixedsys\", fontsize)\n )\n )\n\n\ndef create_variable(App, key, variable, fontsize, background_color):\n setattr(\n App,\n key + '_variable',\n IntVar()\n )\n getattr(App, key + '_variable').set(variable)\n\n setattr(\n App,\n key + '_value',\n Label(App.master,\n bg=background_color,\n textvariable=getattr(App, key + '_variable'),\n font=(\"Fixedsys\", fontsize)\n )\n )\n\n\ndef create_button(App, key):\n path = 'pictures/{}.png'.format(key)\n setattr(\n App,\n key + '_image',\n PhotoImage(file=path)\n )\n setattr(\n App,\n key + '_button',\n Label(App.master, borderwidth=0, image=getattr(App, key + '_image'))\n )\n getattr(\n App,\n key + '_button'\n ).bind('', lambda event: App.update(key))\n\n\ndef create_buttons_and_labels(App):\n \"\"\"Creation of the buttons\"\"\"\n\n # Background\n App.background_image = PhotoImage(file=\"pictures/background.png\")\n App.background_label = Label(App.master, image=App.background_image)\n App.first_background_color = '#663399'\n\n # Main info\n create_text(App, 'current_clicks', 30, App.first_background_color, \"YOUR RESPECT:\")\n create_variable(App, 'current_clicks', App.current_clicks, 30, App.first_background_color)\n App.current_clicks_value.config(width=13)\n\n # CLICKS\n create_text(App, 'click', 20, App.first_background_color, \"Power of F:\")\n create_variable(App, 'click', App.one_click, 20, App.first_background_color)\n\n # AUTO_CLICKS\n\n create_text(App, 'auto_click', 20, App.first_background_color, \"Respect per sec:\")\n create_variable(App, 'auto_click', App.auto_click, 20, App.first_background_color)\n\n # CLICK BUTTON\n App.click_button = Button(App.master,\n bg='#660066',\n activebackground='#993399',\n borderwidth=5,\n text='F',\n command=lambda: App.update(\"CLICK\"),\n font=(\"Fixedsys\", 50)\n )\n App.master.bind(\"\", lambda event: App.update(\"CLICK\"))\n\n # Auto-click buttons\n for name in App.names:\n create_button(App, name)\n create_variable(App, name + '_price', getattr(App, name + '_price'), 20, '#333399')\n\n create_button(App, 'one_click')\n create_variable(App, 'one_click_price', getattr(App, 'one_click_price'), 20, '#333399')\n","sub_path":"kayman233/review_1/src/buttons_and_labels.py","file_name":"buttons_and_labels.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651707450","text":"#\n#\n#\npokemon = {'Rattata': 19, 'Machop': 66, 'Seel': 86, 'Volbeat': 86, 'Solrock': 126}\np_names = []\np_number = []\nfor name, number in pokemon.items():\n p_names.append(name)\n p_number.append(number)\nprint('p_names =', p_names)\nprint('p_number =', p_number)\n\n#\n#\n#\n\ntrack_medal_counts = {'shot put': 1, 'long jump': 3, '100 meters': 2, '400 meters': 2, '100 meter hurdles': 3, 'triple jump': 3, 'steeplechase': 2, '1500 meters': 1, '5K': 0, '10K': 0, 'marathon': 0, '200 meters': 0, '400 meter hurdles': 0, 'high jump': 1}\ntrack_events = []\nfor event in track_medal_counts.items():\n track_events.append(event[0])\nprint('track_events =', track_events)\n","sub_path":"c2/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648655685","text":"import os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nextra = {}\nif sys.version_info >= (3, 0):\n extra.update(\n use_2to3=True,\n )\n\nreadme = os.path.join(os.path.dirname(__file__), 'README.rst')\n\nsetup(name='nbt2yaml',\n version=\"0.3.1\",\n description=\"Read and write Minecraft NBT files using Yaml.\",\n long_description=open(readme).read(),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n keywords='minecraft',\n author='Mike Bayer',\n author_email='mike_mp@zzzcomputing.com',\n url='http://bitbucket.org/zzzeek/nbt2yaml',\n license='BSD',\n packages=find_packages(exclude=['ez_setup', 'tests']),\n zip_safe=False,\n install_requires=['PyYAML'],\n scripts=[\n 'scripts/nbtedit',\n 'scripts/nbt2yaml',\n 'scripts/yaml2nbt'\n ],\n test_suite='nose.collector',\n tests_require=['nose'],\n **extra\n)\n","sub_path":"pypi_install_script/nbt2yaml-0.3.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"256105953","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'gocardless'\nurlpatterns = [\n url(\n r'^redirectflow/create/?$',\n views.RedirectFlowCreateView.as_view(),\n name='redirectflow-create'\n ),\n url(\n r'^redirectflow/success/?$',\n views.RedirectFlowSuccessView.as_view(),\n name='redirectflow-success'\n ),\n\n url(\n r'^webhook/?$',\n views.WebhookView.as_view(),\n name='webhook'\n ),\n url(\n r'^webhook/app/?$',\n views.WebhookView.as_view(),\n name='webhook-app',\n kwargs={\n 'is_app': True\n }\n ),\n\n url(\n r'^redirect/?$',\n views.OAuthRedirectView.as_view(),\n name='redirect'\n ),\n]\n","sub_path":"lunchbreak/django_gocardless/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"32389016","text":"import io\nimport json\nimport platform\nimport stat\nimport subprocess\nimport tempfile\nimport zipfile\nfrom pathlib import Path\n\nimport requests\n\nPLATFORM_TYPE = platform.system()\nBASE_DIR = Path.home() / \".antx\"\n\n\ndef get_bin_metadata():\n \"\"\"Return platfrom_type and binary_name.\"\"\"\n if \"Windows\" in PLATFORM_TYPE:\n return \"windows\", \"dmp.exe\"\n elif \"Darwin\" in PLATFORM_TYPE:\n return \"macos\", \"dmp\"\n else:\n return \"linux\", \"dmp\"\n\n\ndef get_dmp_bin_url(platform_type):\n response = requests.get(\"https://api.github.com/repos/Esukhia/node-dmp-cli/releases/latest\")\n version = response.json()[\"tag_name\"]\n return (\n f\"https://github.com/Esukhia/node-dmp-cli/releases/download/{version}/{platform_type}.zip\",\n version,\n )\n\n\ndef get_dmp_exe_path():\n out_dir = BASE_DIR / \"bin\"\n out_dir.mkdir(exist_ok=True, parents=True)\n\n platform_type, binary_name = get_bin_metadata()\n binary_path = out_dir / binary_name\n if binary_path.is_file():\n return binary_path\n\n url, version = get_dmp_bin_url(platform_type)\n print(f\"[INFO] Downloading node-dmp-cli-{version} ...\")\n r = requests.get(url, stream=True, timeout=50)\n\n # attempt 50 times to download the zip\n check = zipfile.is_zipfile(io.BytesIO(r.content))\n attempts = 0\n while not check and attempts < 50:\n r = requests.get(url, stream=True, timeout=50)\n check = zipfile.is_zipfile(io.BytesIO(r.content))\n attempts += 1\n\n if not check:\n raise IOError(\"the .zip file couldn't be downloaded.\")\n else:\n # extract the zip in the current folder\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path=str(out_dir))\n\n print(f\"[INFO] Download completed!\")\n\n # make the binary executable\n binary_path.chmod(binary_path.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n return str(binary_path)\n\n\nclass optimized_diff_match_patch:\n def __init__(self):\n self.binary_path = get_dmp_exe_path()\n\n @staticmethod\n def _save_text(text1, text2):\n tmpdir = Path(tempfile.gettempdir())\n text1_path = tmpdir / \"text1.txt\"\n text2_path = tmpdir / \"text2.txt\"\n text1_path.write_text(text1, encoding=\"utf-8\")\n text2_path.write_text(text2, encoding=\"utf-8\")\n return str(text1_path), str(text2_path)\n\n @staticmethod\n def _delete_text(text1_path, text2_path):\n Path(text1_path).unlink()\n Path(text2_path).unlink()\n\n @staticmethod\n def _unescape_lr(diffs):\n \"\"\"Unescape the line-return.\"\"\"\n for diff_type, diff_text in diffs:\n if \"Windows\" in PLATFORM_TYPE:\n yield (diff_type, diff_text.replace(\"\\r\\\\n\", \"\\n\"))\n else:\n yield (diff_type, diff_text.replace(\"\\\\n\", \"\\n\"))\n\n def diff_main(self, text1, text2):\n text1_path, text2_path = self._save_text(text1, text2)\n process = subprocess.Popen(\n [str(self.binary_path), \"diff\", text1_path, text2_path], stdout=subprocess.PIPE\n )\n stdout = process.communicate()[0]\n diffs = json.loads(stdout, encoding=\"utf-8\")\n diffs = self._unescape_lr(diffs)\n self._delete_text(text1_path, text2_path)\n return diffs\n","sub_path":".env/lib/python3.8/site-packages/antx/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"44042577","text":"#!/usr/bin/env python3\n\nfrom lxml import etree\nimport sys, os\nimport shlex\nfrom multiprocessing import Pool\nimport subprocess\nimport time\nimport shutil\n\ndef main():\n\n try:\n ipi_input = sys.argv[1]\n ipi_exe = sys.argv[2]\n driver_com = sys.argv[3]\n max_beads = int(sys.argv[4])\n max_drivers = int(sys.argv[5])\n title = sys.argv[6]\n except:\n print('uffa')\n die()\n\n if not os.path.isfile(ipi_input):\n sys.stderr.write('File {} is not a valid ipi input\\n'.format(ipi_input))\n die()\n \n # if not isexe(ipi_exe):\n # die()\n results = []\n driver_com = shlex.split(driver_com)\n\n drivers_test = [1,2,4,8]\n nbeads_test = [1,2,8]\n \n for nbeads in nbeads_test:\n for drivers in drivers_test:\n\n if drivers <= nbeads:\n print('\\n\\n@@@@@@@@@@@@@ DRIVERS: {:3d} NBEADS: {:3d} @@@@@@@@@@@@@'.format(drivers, nbeads))\n last = run(drivers, nbeads, ipi_input, ipi_exe, driver_com)\n results.append([nbeads, drivers, last])\n print('#{:7s} {:7s} {:12s}\\n'.format('nbeads', 'drivers', 'time'))\n for res in results:\n print(' {:7d} {:7d} {:12.6f}\\n'.format(res[0], res[1], res[2]))\n\n msg = '#{:7s} {:7s} {:12s}\\n'.format('nbeads', 'drivers', 'time')\n for res in results:\n msg += ' {:7d} {:7d} {:12.6f}\\n'.format(res[0], res[1], res[2])\n\n print(msg)\n with open(title + '-results.dat', 'w') as f:\n f.write(msg)\n \n \ndef run(drivers, nbeads, ipi_input, ipi_exe, driver_com):\n with open('timing_ipi_input.xml', 'w') as tmp_file:\n tmp_file.write(make_input(ipi_input, nbeads))\n\n ipi_com = shlex.split(ipi_exe + ' timing_ipi_input.xml')\n\n # Start ipi\n ipi_pid = subprocess.Popen(ipi_com)\n time.sleep(2)\n\n starting_time = time.time()\n\n # Start the pool of the drivers\n pool = Pool(processes=drivers)\n \n out = [pool.apply_async(run_driver,(driver_com, 'run_'+str(idriver))) for idriver in range(drivers)]\n# out = [pool.apply_async(time.sleep,(1,)) for idriver in range(drivers)]\n # pool.terminate()\n pool.close()\n pool.join()\n os.wait()\n finished_at = time.time()\n\n return finished_at - starting_time\n\n\ndef run_driver(com,run_dir):\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n cwd = os.getcwd()\n os.chdir(run_dir)\n shutil.copy('../dftb_in.hsd', '.')\n try:\n dr = subprocess.Popen(com, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n # dr = subprocess.check_output(com)\n except:\n pass\n os.chdir(cwd)\n\ndef make_input(ipi_input_path, nbeads):\n with open(ipi_input_path) as inf:\n ipi_input = etree.parse(inf).getroot()\n initialization = ipi_input.find('.//initialize')\n initialization.set('nbeads', str(nbeads))\n return str(etree.tostring(ipi_input, pretty_print=True).decode('ascii'))\n\n\ndef die():\n print('Usage: '+os.path.basename(sys.argv[0])+' ')\n print('Error!')\n sys.exit()\n\ndef isexe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\nif __name__ == '__main__':\n main()\n","sub_path":"timing_ipi.py","file_name":"timing_ipi.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319741894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 30 16:10:34 2020\n\n@author: lcawh\n\"\"\"\ndef clean(filename,var,varshort):\n '''Cleans the weirdly formatted climate data from Ouagadougou, return pandas\n df with just the date and variable.\n Arguments:\n ==========\n filename (str): CSV file in original-data folder\n var(str): variable in English we're looking at\n varshort (str): short version of the var in Fr which is in cell F2 (or\n whatever) to target cleaning'''\n \n import pandas as pd\n import numpy as np\n \n #load data and ditch na and extra rows\n names = ('day','01','02','03','04','05','06','07','08','09','10','11','12')\n df = pd.read_csv(f'../../02-data/original-data/climate/{filename}',names=names)\n df = df.dropna(how='all')\n df = df[(df.day!='DEC')&(df.day!='MOIS')&(df.day!='TOTAL')& \\\n (df.day!='DATE')&(df.day!='JRS')&(df['05']!=varshort)& \\\n (df['04']!='OUAG')]\n \n #add year column (all months have 31 days in this wretched sheet)\n years = []\n for i in range(1961,2005):\n yr = [i]*31\n years = years + yr\n df['year'] = years\n \n #rearrange columns\n cols = df.columns.tolist()\n cols2 = cols[-1:] + cols[:-1]\n df=df[cols2]\n \n #melt into long form and get rid of nans\n value_vars = ['01','02','03','04','05','06','07','08','09','10','11','12']\n df = pd.melt(df,id_vars=['year','day'],\n value_vars=value_vars,\n var_name='month',value_name=var)\n df=df.dropna(how='any')\n \n #add a datetime column for full date\n df['date'] = pd.to_datetime(df[['year','month','day']],dayfirst=True)\n \n #replace random nans bits and pieces\n df = df.replace(to_replace='**',value=np.nan)\n df = df.replace(to_replace='.',value=0)\n df = df.replace(to_replace='TR',value=0)\n df[var] = df[var].astype('float64')\n \n #final ouput just date and var\n final = df[['date',var]]\n \n return final\n\n#################\n# TEST FUNCTION #\n#################\n\nall_rain = clean('rainfall-19612003.csv','rainfall','PLUVI')\nall_rain","sub_path":"01-scripts/data-cleaning/clean_ouaga_data.py","file_name":"clean_ouaga_data.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271796033","text":"from fbchat import Client\nclass Command:\n\n def __init__(self, parameters, client:Client):\n self.user_params = parameters['user']\n self.author_id = parameters['author_id']\n self.message_object = parameters['message_object']\n self.thread_id = parameters['thread_id']\n self.thread_type = parameters['thread_type']\n self.client = client\n self.author = self.client.fetchUserInfo(self.author_id)[self.author_id]\n\n client.markAsDelivered(self.thread_id, self.message_object.uid)\n client.markAsRead(self.thread_id)\n\n self.run()\n\n def run(self):\n print(\"Running abstract command...\")\n return\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"103855193","text":"import base64\nimport hashlib\nimport hmac\nimport logging\nimport os\nimport time\n\nfrom http.cookies import SimpleCookie\n\nfrom cryptography.exceptions import InvalidTag\nfrom cryptography.hazmat.primitives.ciphers.aead import AESGCM\n\nfrom cryptojwt import as_bytes\nfrom cryptojwt import as_unicode\nfrom cryptojwt import safe_str_cmp\nfrom cryptojwt.jwe import JWEException\nfrom cryptojwt.jwe import split_ctx_and_tag\nfrom oidcendpoint.exception import InvalidCookieSign\nfrom oidcmsg.time_util import in_a_while\n\nfrom oidcservice import rndstr\nfrom oidcmsg import time_util\n\n__author__ = 'Roland Hedberg'\n\nlogger = logging.getLogger(__name__)\n\n\nCORS_HEADERS = [\n (\"Access-Control-Allow-Origin\", \"*\"),\n (\"Access-Control-Allow-Methods\", \"GET\"),\n (\"Access-Control-Allow-Headers\", \"Authorization\")\n]\n\n\ndef _expiration(timeout, time_format=None):\n \"\"\"\n Return an expiration time\n\n :param timeout: When\n :param time_format: The format of the returned value\n :return: A timeout date\n \"\"\"\n if timeout == \"now\":\n return time_util.instant(time_format)\n else:\n # validity time should match lifetime of assertions\n return time_util.in_a_while(minutes=timeout, time_format=time_format)\n\n\ndef cookie_signature(key, *parts):\n \"\"\"Generates a cookie signature.\n\n :param key: The HMAC key to use.\n :type key: bytes\n :param parts: List of parts to include in the MAC\n :type parts: list of bytes or strings\n :returns: hexdigest of the HMAC\n \"\"\"\n\n sha1 = hmac.new(as_bytes(key), digestmod=hashlib.sha1)\n for part in parts:\n if part:\n sha1.update(as_bytes(part))\n return str(sha1.hexdigest())\n\n\ndef verify_cookie_signature(sig, key, *parts):\n \"\"\"Constant time verifier for signatures\n\n :param sig: The signature hexdigest to check\n :type sig: str\n :param key: The HMAC key to use.\n :type key: bytes\n :param parts: List of parts to include in the MAC\n :type parts: list of bytes or strings\n :raises: `InvalidCookieSign` when the signature is wrong\n \"\"\"\n return safe_str_cmp(as_unicode(sig), cookie_signature(key, *parts))\n\n\ndef _make_hashed_key(parts, hashfunc='sha256'):\n \"\"\"\n Construct a key via hashing the parts\n\n If the parts do not have enough entropy of their\n own, this doesn't help.\n\n The size of the hash digest determines the size.\n \"\"\"\n h = hashlib.new(hashfunc)\n for part in parts:\n if part:\n h.update(as_bytes(part))\n return h.digest()\n\n\ndef make_cookie(name, load, seed, domain=\"\", path=\"\", timestamp=\"\",\n enc_key=None, max_age=0):\n \"\"\"\n Create and return a cookie\n\n The cookie is secured against tampering.\n\n If you only provide a `seed`, a HMAC gets added to the cookies value\n and this is checked, when the cookie is parsed again.\n\n If you provide both `seed` and `enc_key`, the cookie gets protected\n by using AEAD encryption. This provides both a MAC over the whole cookie\n and encrypts the `load` in a single step.\n\n The `seed` and `enc_key` parameters should be byte strings of at least\n 16 bytes length each. Those are used as cryptographic keys.\n\n :param name: Cookie name\n :type name: text\n :param load: Cookie load\n :type load: text\n :param seed: A seed key for the HMAC function\n :type seed: byte string\n :param domain: The domain of the cookie\n :param path: The path specification for the cookie\n :param timestamp: A time stamp\n :type timestamp: text\n :param enc_key: The key to use for cookie encryption.\n :type enc_key: byte string\n :param max_age: The time in seconds for when a cookie will be deleted\n :type max_age: int\n :return: A SimpleCookie instance\n \"\"\"\n cookie = SimpleCookie()\n if not timestamp:\n timestamp = str(int(time.time()))\n\n bytes_load = load.encode(\"utf-8\")\n bytes_timestamp = timestamp.encode(\"utf-8\")\n\n if enc_key:\n # Make sure the key is 256-bit long, for AES-128-SIV\n #\n # This should go away once we push the keysize requirements up\n # to the top level APIs.\n key = _make_hashed_key((enc_key, seed))\n\n # key = AESGCM.generate_key(bit_length=128)\n aesgcm = AESGCM(key)\n iv = os.urandom(12)\n\n # timestamp does not need to be encrypted, just MAC'ed,\n # so we add it to 'Associated Data' only.\n ct = split_ctx_and_tag(aesgcm.encrypt(iv, bytes_load, bytes_timestamp))\n\n ciphertext, tag = ct\n cookie_payload = [bytes_timestamp,\n base64.b64encode(iv),\n base64.b64encode(ciphertext),\n base64.b64encode(tag)]\n else:\n cookie_payload = [\n bytes_load, bytes_timestamp,\n cookie_signature(seed, load, timestamp).encode('utf-8')]\n\n cookie[name] = (b\"|\".join(cookie_payload)).decode('utf-8')\n if path:\n cookie[name][\"path\"] = path\n if domain:\n cookie[name][\"domain\"] = domain\n\n if max_age:\n cookie[name][\"expires\"] = in_a_while(seconds=max_age)\n\n return cookie\n\n\ndef parse_cookie(name, seed, kaka, enc_key=None):\n \"\"\"Parses and verifies a cookie value\n\n Parses a cookie created by `make_cookie` and verifies\n it has not been tampered with.\n\n You need to provide the same `seed` and `enc_key`\n used when creating the cookie, otherwise the verification\n fails. See `make_cookie` for details about the verification.\n\n :param seed: A seed key used for the HMAC signature\n :type seed: bytes\n :param kaka: The cookie\n :param enc_key: The encryption key used.\n :type enc_key: bytes or None\n :raises InvalidCookieSign: When verification fails.\n :return: A tuple consisting of (payload, timestamp) or None if parsing fails\n \"\"\"\n if not kaka:\n return None\n\n seed = as_unicode(seed)\n\n parts = cookie_parts(name, kaka)\n if parts is None:\n return None\n elif len(parts) == 3:\n # verify the cookie signature\n clear_text, timestamp, sig = parts\n if not verify_cookie_signature(sig, seed, clear_text, timestamp):\n raise InvalidCookieSign()\n return clear_text, timestamp\n elif len(parts) == 4:\n # encrypted and signed\n timestamp = parts[0]\n iv = base64.b64decode(parts[1])\n ciphertext = base64.b64decode(parts[2])\n tag = base64.b64decode(parts[3])\n ct = ciphertext + tag\n\n # Make sure the key is 32-Bytes long\n key = _make_hashed_key((enc_key, seed))\n aesgcm = AESGCM(key)\n\n # timestamp does not need to be encrypted, just MAC'ed,\n # so we add it to 'Associated Data' only.\n aad = timestamp.encode('utf-8')\n try:\n cleartext = aesgcm.decrypt(iv, ct, aad)\n except (JWEException, InvalidTag) as err:\n raise InvalidCookieSign('{}'.format(err))\n return cleartext.decode('utf-8'), timestamp\n return None\n\n\ndef cookie_parts(name, kaka):\n \"\"\"\n Give me the parts of the cookie payload\n\n :param name: A name of a cookie object\n :param kaka: The cookie\n :return: A list of parts or None if there is no cookie object with the\n given name\n \"\"\"\n cookie_obj = SimpleCookie(as_unicode(kaka))\n morsel = cookie_obj.get(name)\n if morsel:\n return morsel.value.split(\"|\")\n else:\n return None\n\n\nclass CookieDealer(object):\n \"\"\"\n Functionality that an entity that deals with cookies need to have\n access to.\n \"\"\"\n def __init__(self, symkey='', seed_file='seed.txt', cookie=None):\n self.symkey = as_bytes(symkey)\n\n for attr, default in {'path':'', 'domain':'', 'max_age':0}.items():\n if attr not in cookie:\n cookie[attr] = default\n\n self.cookie = cookie\n\n # Need to be able to restart the OP and still use the same seed\n if os.path.isfile(seed_file):\n _seed = open(seed_file).read()\n else:\n _seed = rndstr(48)\n with open('seed.txt', \"w\") as f:\n f.write(_seed)\n\n self.seed= as_bytes(_seed)\n\n def delete_cookie(self, cookie_name=None):\n \"\"\"\n Create a cookie that will immediately expire when it hits the other\n side.\n\n :param cookie_name: Name of the cookie\n :return: A tuple to be added to headers\n \"\"\"\n if cookie_name is None:\n cookie_name = self.cookie['name']\n \n return self.create_cookie(\"\", \"\", cookie_name=cookie_name, ttl=-1,\n kill=True)\n\n def create_cookie(self, value, typ, cookie_name=None, ttl=-1, kill=False):\n \"\"\"\n\n :param value: Part of the cookie payload\n :param typ: Type of cookie\n :param cookie_name:\n :param ttl: Number of minutes before this cookie goes stale\n :param kill: Whether the the cookie should expire on arrival\n :return: A tuple to be added to headers\n \"\"\"\n if kill:\n ttl = -1\n elif ttl < 0:\n ttl = self.cookie['max_age']\n\n if cookie_name is None:\n cookie_name = self.cookie['name']\n\n try:\n srvdomain = self.cookie['domain']\n cookie_domain = \"\" if not srvdomain else srvdomain\n except AttributeError:\n cookie_domain = \"\"\n\n try:\n srvpath = self.cookie['path']\n cookie_path = \"\" if not srvpath else srvpath\n except AttributeError:\n cookie_path = \"\"\n\n # now\n timestamp = str(int(time.time()))\n\n # create cookie payload\n try:\n cookie_payload = \"::\".join([value, timestamp, typ])\n except TypeError:\n cookie_payload = \"::\".join([value[0], timestamp, typ])\n\n cookie = make_cookie(\n cookie_name, cookie_payload, self.seed,\n domain=cookie_domain, path=cookie_path, timestamp=timestamp,\n enc_key=self.symkey, max_age=ttl)\n\n return cookie\n\n def get_cookie_value(self, cookie=None, cookie_name=None):\n \"\"\"\n Return information stored in a Cookie\n\n :param cookie: A cookie instance\n :param cookie_name: The name of the cookie I'm looking for\n :return: tuple (value, timestamp, type)\n \"\"\"\n if cookie_name is None:\n cookie_name = self.cookie['name']\n\n if cookie is None or cookie_name is None:\n return None\n else:\n try:\n info, timestamp = parse_cookie(cookie_name, self.seed, cookie,\n self.symkey)\n except (TypeError, AssertionError):\n return None\n else:\n value, _ts, typ = info.split(\"::\")\n if timestamp == _ts:\n return value, _ts, typ\n return None\n","sub_path":"src/oidcop/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":10902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"521431889","text":"\n\nimport os\nimport argparse\nimport logging\n\nimport cv2\nimport numpy\n\n\ndef fix_image_size(image, expected_pixels=2E6):\n ratio = float(expected_pixels) / float(image.shape[0] * image.shape[1])\n return cv2.resize(image, (0, 0), fx=ratio, fy=ratio)\n\n\ndef estimate_blur(image, threshold=100):\n if image.ndim == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n blur_map = cv2.Laplacian(image, cv2.CV_64F)\n score = numpy.var(blur_map)\n return blur_map, score, bool(score < threshold)\n\n\ndef pretty_blur_map(blur_map, sigma=5):\n abs_image = numpy.log(numpy.abs(blur_map).astype(numpy.float32))\n cv2.blur(abs_image, (sigma, sigma))\n return cv2.medianBlur(abs_image, sigma)\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='run blur detection on a single image')\n parser.add_argument('-i', '--input_image', dest=\"input_image\", type=str, required=True, help=\"directory of images\")\n # parameters\n parser.add_argument(\"-t\", \"--threshold\", dest='threshold', type=float, default=100.0, help=\"blurry threshold\")\n parser.add_argument(\"-f\", \"--fix_size\", dest=\"fix_size\", help=\"fix the image size\", action=\"store_true\")\n # options\n parser.add_argument(\"-v\", \"--verbose\", dest='verbose', help='set logging level to debug', action=\"store_true\")\n parser.add_argument(\"-d\", \"--display\", dest='display', help='display images', action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n assert os.path.exists(args.input_image)\n\n input_image = cv2.imread(args.input_image)\n\n if args.fix_size:\n input_image = fix_image_size(input_image)\n\n blur_map, score, blurry = estimate_blur(input_image)\n\n logging.info(\"score: {0}, blurry: {1}\".format(score, blurry))\n\n if args.display:\n cv2.imshow(\"input\", input_image)\n cv2.imshow(\"result\", pretty_blur_map(blur_map))\n cv2.waitKey(0)\n","sub_path":"single.py","file_name":"single.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"374120217","text":"from django.shortcuts import render\n\n#\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom .models import EdinaiaTabliza, SpisokIzmeneniiEdinaiaTabliza\nfrom .serializers import EdinaiaTablizaSerializer, SpisokIzmeneniiEdinaiaTablizaSerializer\nfrom .forms import EdinaiaTablizaFormOUF, EdinaiaTablizaFormAll, EdinaiaTablizaFormOTK, EdinaiaTablizaFormSDO, \\\n EdinaiaTablizaFormUristi, EdinaiaTablizaFormBux\n\n\n# from rest_framework.parsers import JSONParser\n# from django.views.decorators.csrf import csrf_exempt\n#\n#\n# import datetime\n# import os\n# import uuid\n# from django.db.models import Q\n#\n# import xlrd\n# from django.shortcuts import render\n#\n# from django.http import HttpResponse, JsonResponse, HttpResponseRedirect\n# from rest_framework.parsers import JSONParser\n# from django.views.decorators.csrf import csrf_exempt\n#\n# from openpyxl import load_workbook\n# import pandas as pd\n\n\n# @csrf_exempt\n# def novaia_edinaia_tabliza_list(request):\n# if request.method == 'GET':\n# content = EdinaiaTabliza.objects.all()\n#\n# serializer = EdinaiaTablizaSerializer(content, many=True)\n# return JsonResponse({'data': serializer.data}, safe=False)\n#\n#\n\n#\n#\n# @csrf_exempt\n# def novaia_edinaia_tabliza_istoria_izmenenia(request, id):\n# try:\n# content = SpisokIzmeneniiEdinaiaTabliza.objects.filter(novaia_edinaia_tabliza_id=id)\n# except SpisokIzmeneniiEdinaiaTablizaSerializer.DoesNotExist:\n# return HttpResponse(status=404)\n#\n# if request.method == 'GET':\n# serializer = SpisokIzmeneniiEdinaiaTabliza(content, many=True)\n# return JsonResponse(serializer.data, safe=False)\n#\n#\n\n\ndef render_edinaia_tabliza(request):\n forma_redaktirovania_all = EdinaiaTablizaFormAll()\n forma_redaktirovania_ouf = EdinaiaTablizaFormOUF()\n forma_redaktirovania_otk = EdinaiaTablizaFormOTK()\n forma_redaktirovania_sdo = EdinaiaTablizaFormSDO()\n forma_redaktirovania_uristi = EdinaiaTablizaFormUristi()\n forma_redaktirovania_bux = EdinaiaTablizaFormBux()\n return render(request, 'edinaia_tabliza/views/edinaia_tabliza.html', {\n 'forma_redaktirovania_all': forma_redaktirovania_all,\n 'forma_redaktirovania_ouf': forma_redaktirovania_ouf,\n 'forma_redaktirovania_otk': forma_redaktirovania_otk,\n 'forma_redaktirovania_sdo': forma_redaktirovania_sdo,\n 'forma_redaktirovania_uristi': forma_redaktirovania_uristi,\n 'forma_redaktirovania_bux': forma_redaktirovania_bux,\n })\n\n\n#####\n# API\n#####\n\ndef edinaia_tabliza_lv(request):\n if request.method == 'GET':\n content = EdinaiaTabliza.objects.all()\n serializer = EdinaiaTablizaSerializer(content, many=True)\n return JsonResponse({'data': serializer.data}, safe=False)\n\n\n\ndef edinaia_tabliza_dv(request, id):\n try:\n content = EdinaiaTabliza.objects.get(pk=id)\n except EdinaiaTabliza.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = EdinaiaTablizaSerializer(content)\n istoria_izmenenia = SpisokIzmeneniiEdinaiaTabliza.objects.filter(edinaia_tabliza_id=id).order_by('data_redaktirovania')\n istoria_izmenenia = SpisokIzmeneniiEdinaiaTablizaSerializer(istoria_izmenenia, many=True)\n\n return JsonResponse({'data': serializer.data, 'istoria_izmenenia': istoria_izmenenia.data})\n\n # elif request.method == 'PUT':\n # data = JSONParser().parse(request)\n # if data['oldvalue'] == None:\n # data['oldvalue'] = ''\n #\n # if str(data['oldvalue']) == str(data['newvalue']):\n # return HttpResponse(\"Нет изменений\")\n #\n # EdinaiaTabliza.objects.filter(id=id).update(**{data['dataField']: data[\"newvalue\"]})\n # SpisokIzmeneniiEdinaiaTabliza.objects.create(debitori_nasele_id=id, stolbez=data['dataField'],\n # staroe_znachenie=data['oldvalue'], data_redaktirovania=datetime.datetime.now(),\n # novoe_znachenie=data['newvalue'], user=request.user)\n # return HttpResponse(\"Данные обновлены\")\n #\n # elif request.method == 'DELETE':\n # content.delete()\n # return HttpResponse(status=204)","sub_path":"apps/edinaia_tabliza/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13061496","text":"\"\"\"\n1.\tПользователь вводит данные о количестве предприятий, их наименования и прибыль\nза 4 квартала (т.е. 4 отдельных числа) для каждого предприятия.\nПрограмма должна определить среднюю прибыль (за год для всех предприятий)\nи вывести наименования предприятий, чья прибыль выше среднего и отдельно\nвывести наименования предприятий, чья прибыль ниже среднего.\n\nПодсказка:\nДля решения задачи обязательно примените какую-нибудь коллекцию из модуля collections\nДля лучшее освоения материала можете даже сделать несколько решений этого задания,\nприменив несколько коллекций из модуля collections\n\nПример:\nВведите количество предприятий для расчета прибыли: 2\nВведите название предприятия: Рога\nчерез пробел введите прибыль данного предприятия\nза каждый квартал(Всего 4 квартала): 235 345634 55 235\n\nВведите название предприятия: Копыта\nчерез пробел введите прибыль данного предприятия\nза каждый квартал(Всего 4 квартала): 345 34 543 34\n\nСредняя годовая прибыль всех предприятий: 173557.5\nПредприятия, с прибылью выше среднего значения: Рога\n\nПредприятия, с прибылью ниже среднего значения: Копыта\n\"\"\"\nfrom collections import namedtuple, defaultdict\n\n\"\"\"Решение с использованием namedtuple\"\"\"\n\nfirms = namedtuple('Firm', 'id name q_1 q_2 q_3 q_4 summ')\nfirms_list = []\n\nnum = int(input('Введите количество предприятий для расчета прибыли: '))\nfor i in range(1, num+1):\n id = i\n name = input(f'Введите название предприятия # {i}: ')\n q_1, q_2, q_3, q_4 = input('Через пробел введите прибыль данного предприятия за каждый квартал(всего 4 квартала)').split()\n summ = float(q_1) + float(q_2) + float(q_3) + float(q_4)\n firms_list.append(firms._make((id, name, q_1, q_2, q_3, q_4, summ)))\nprint(firms_list)\n\navg_profit = (sum([i.summ for i in firms_list]) / len(firms_list))\nprint(f'Средняя годовая прибыль всех предприятий: {avg_profit}')\nlist_above_avg = [i.name for i in firms_list if i.summ >= avg_profit]\nprint(f\"Предприятия, с прибылью выше или равно среднего значения: {';'.join(list_above_avg)}\")\nlist_below_avg = [i.name for i in firms_list if i.summ < avg_profit]\nprint(f\"Предприятия, с прибылью ниже среднего значения: {';'.join(list_below_avg)}\")\n\n\n\"\"\"Решение с использованием defaultdict\"\"\"\n\nfirms = defaultdict(list)\n\nnum = int(input('Введите количество предприятий для расчета прибыли: '))\nfor i in range(1, num+1):\n name = input(f'Введите название предприятия # {i}: ')\n profit = [float(i) for i in input('Через пробел введите прибыль данного предприятия за каждый квартал(всего 4 квартала): ').split()]\n firms[name] = profit\nprint(firms)\n\navg_profit = (sum([sum(i) for i in firms.values()]) / len(firms))\nprint(f'Средняя годовая прибыль всех предприятий: {avg_profit}')\nlist_above_avg = [key for key, value in firms.items() if sum(value) >= avg_profit]\nprint(f\"Предприятия, с прибылью выше или равно среднего значения: {';'.join(list_above_avg)}\")\nlist_below_avg = [key for key, value in firms.items() if sum(value) < avg_profit]\nprint(f\"Предприятия, с прибылью ниже среднего значения: {';'.join(list_below_avg)}\")\n\n","sub_path":"Урок 5. Практическое задание/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"147346973","text":"import tensorflow as tf\nimport time\nimport numpy as np\n\nimage_feature_description = {\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.float32),\n}\n\ndef parse_record(tfrecord):\n proto = tf.parse_single_example(tfrecord, image_feature_description)\n\n image_decoded = tf.image.decode_jpeg(proto['image_raw'], channels=3)\n image_float = tf.image.convert_image_dtype(image_decoded, tf.float32)\n\n return image_float, proto['label']\n\ndef load_dataset(filename):\n raw_dataset = tf.data.TFRecordDataset(filename)\n\n dataset = raw_dataset.map(parse_record)\n dataset = dataset.batch(10)\n return dataset\n\ntraining_dataset = load_dataset(\"D:\\\\speedchallenge\\\\temporal\\\\train.tfrecords\")\nvalidation_dataset = load_dataset(\"D:\\\\speedchallenge\\\\temporal\\\\validation.tfrecords\")\n\niterator = tf.data.Iterator.from_structure(training_dataset.output_types,\n training_dataset.output_shapes)\n\ntraining_init_op = iterator.make_initializer(training_dataset)\nvalidation_init_op = iterator.make_initializer(validation_dataset)\n\n# frame is 640 x 480 pixels, speed is a float\nframe, speed = iterator.get_next()\n\ndef residual_block(input, channels, downsample):\n shortcut = input\n strides = (1, 1)\n if downsample:\n strides = (2, 2)\n shortcut = tf.layers.conv2d(input, channels, (1, 1), strides=strides, padding='same')\n shortcut = tf.layers.batch_normalization(shortcut, training=True)\n \n conv1 = tf.layers.conv2d(input, channels, (3, 3), strides=(1, 1), padding='same')\n conv1 = tf.layers.batch_normalization(conv1, training=True)\n conv1 = tf.nn.relu(conv1)\n\n conv2 = tf.layers.conv2d(conv1, channels, (3, 3), strides=strides, padding='same')\n conv2 = tf.layers.batch_normalization(conv2, training=True)\n\n\n conv2 += shortcut\n output = tf.nn.relu(conv2)\n\n return output\n\n\nout = frame\n\n# 640x480x3 -> 20x15x64\nfor i in range(5):\n out = residual_block(out, 64, True)\n\nout = tf.reshape(out, (-1, 20*15*64))\nout = tf.layers.dense(out, 1)\n\nspeed = tf.expand_dims(speed, 1)\nloss = tf.losses.mean_squared_error(speed, out)\n\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(1, 31):\n sess.run(training_init_op)\n i = 0\n previous_step_time = time.time()\n while True:\n try:\n i += 1\n l, _ = sess.run((loss, train_step))\n if i % 100 == 0:\n current_step_time = time.time()\n time_elapsed = current_step_time - previous_step_time\n print('epoch {:d} - step {:d} - time {:.2f}s : loss {:.4f}'.format(epoch, i, time_elapsed, l))\n previous_step_time = current_step_time\n except tf.errors.OutOfRangeError:\n break\n\n sess.run(validation_init_op)\n validation_losses = []\n while True:\n try:\n validation_loss = sess.run(loss)\n validation_losses.append(validation_loss)\n except tf.errors.OutOfRangeError:\n break\n print('\\n\\nmse after {} epochs: {:.4f}\\n\\n'.format(epoch, np.mean(validation_losses)))\n","sub_path":"single_frame_predictor.py","file_name":"single_frame_predictor.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"202082306","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 27 12:03:26 2020\n\n@author: sw1906\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# ER Values Vs Control Up and down Expression\n\n\n\n# 7158 IDs (Before NAs removed)\n# 2015 Overexpressed,2478 Underexpressed\n\nER0 = pd.read_csv('GSE45584_ER0_Control.txt', sep = '\\t', header = 0)\nER0 = ER0.drop_duplicates(subset=['Gene.symbol'], keep='first')\n\nER0_OverExp = ER0.loc[ER0['logFC'] > 0]\nER0_OverExp = ER0_OverExp.dropna()\n\n\nER0_UnderExp = ER0.loc[ER0['logFC'] < 0]\nER0_UnderExp = ER0_UnderExp.dropna()\n\n\nER0_Over = ER0_OverExp['Gene.symbol'].tolist()\nER0_Under = ER0_UnderExp['Gene.symbol'].tolist()\n\n\n# 7073 IDs (Before NAs removed)\n# 2188 Overexpressed, 2207 Underexpressed\nER4 = pd.read_csv('GSE45584_ER4_Control.txt', sep = '\\t', header = 0)\nER4 = ER4.drop_duplicates(subset=['Gene.symbol'], keep='first')\n\nER4_OverExp = ER4.loc[ER4['logFC'] > 0]\nER4_OverExp = ER4_OverExp.dropna()\n\n\nER4_UnderExp = ER4.loc[ER4['logFC'] < 0]\nER4_UnderExp = ER4_UnderExp.dropna()\n\n\nER4_Over = ER4_OverExp['Gene.symbol'].tolist()\nER4_Under = ER4_UnderExp['Gene.symbol'].tolist()\n\n# 7126 IDs (Before NAs removed)\n# 2002 Overexpressed, 2309 Underexpressed\nER5 = pd.read_csv('GSE45584_ER5_Control.txt', sep = '\\t', header = 0)\nER5 = ER5.drop_duplicates(subset=['Gene.symbol'], keep='first')\n\nER5_OverExp = ER5.loc[ER5['logFC'] > 0]\nER5_OverExp = ER5_OverExp.dropna()\n\n\nER5_UnderExp = ER5.loc[ER5['logFC'] < 0]\nER5_UnderExp = ER5_UnderExp.dropna()\n\n\nER5_Over = ER5_OverExp['Gene.symbol'].tolist()\nER5_Under = ER5_UnderExp['Gene.symbol'].tolist()\n\n\n# 1041 overexpressed in all ER values\nSharedOverExp = set(ER0_Over) & set(ER4_Over) & set(ER5_Over)\n# 1090 underexpressed in all ER values\nSharedUnderExp = set(ER0_Under) & set(ER4_Under) & set(ER5_Under)\n\n#Look for genes which are underexpressed in some ER values and overexpressed in others\n\n# 19 genes under in ER0, over in ER5\nUnderER0_OverER5 = set(ER0_Under) & set(ER5_Over)\n# 14 genes over in ER5, under in ER0\nUnderER5_OverER0 = set(ER0_Over) & set(ER5_Under)\n\n# 10 genes under in ER0, over in ER4\nUnderER0_OverER4 = set(ER0_Under) & set(ER4_Over)\n# 13 genes over in ER4, under in ER0\nUnderER4_OverER0 = set(ER0_Over) & set(ER4_Under)\n\n# 11 genes under in ER5, over in ER4\nUnderER5_OverER4 = set(ER5_Under) & set(ER4_Over)\n# 8 genes over in ER4, under in ER5\nUnderER4_OverER5 = set(ER5_Over) & set(ER4_Under)\n\n# pairings of two groups showing same expression\n# 6 genes\nUnderER04_OverER5 = set(ER0_Under) & set(ER5_Over) & set(ER4_Under)\n# 6 genes\nOverER04_UnderER5 = set(ER5_Under) & set(ER4_Over) & set(ER0_Over)\n# 5 genes\nUnderER45_overER0 = set(ER5_Under) & set(ER4_Under) & set(ER0_Over)\n# 4 genes\nOverER45_underER0 = set(ER0_Under) & set(ER4_Over) & set(ER5_Over)\n# Weird middle ground jumps\n# 3 Genes\nUnderER05_OverER4 = set(ER0_Under) & set(ER4_Over) & set(ER5_Under)\n# 1 Gene\nOverER05_UnderER4 = set(ER4_Under) & set(ER5_Over) & set(ER0_Over)\n\n\n\n# 50 gene overall when comparing up/down expression between two of the three groups\nsetofall = list(set(list(UnderER0_OverER5) + list(UnderER5_OverER0) + list(UnderER0_OverER4) + list(UnderER4_OverER0) + list(UnderER5_OverER4) + list(UnderER4_OverER5)))\n\n# Take genes of interest and their logchanges into a single df\nER0.rename(columns={'logFC': 'ER0_logFC'}, inplace=True)\nER0 = ER0.dropna()\nER0 = ER0[['Gene.symbol', 'ER0_logFC']]\n\nER4.rename(columns={'logFC': 'ER4_logFC'}, inplace=True)\nER4 = ER4.dropna()\nER4 = ER4[['Gene.symbol', 'ER4_logFC']]\n\nER5.rename(columns={'logFC': 'ER5_logFC'}, inplace=True)\nER5 = ER5.dropna()\nER5 = ER5[['Gene.symbol', 'ER5_logFC']]\n\nER045 = pd.merge(ER0, ER4, on='Gene.symbol', how='inner')\nER045 = pd.merge(ER045, ER5, on='Gene.symbol', how='inner')\nER045.rename(columns={'Gene.symbol': 'Gene'}, inplace=True)\n\n\nER0ER5 = pd.merge(ER0, ER5, on='Gene.symbol', how='inner')\nER0ER5.rename(columns={'Gene.symbol': 'Gene'}, inplace=True)\nER0ER5[\"Control\"] = 0\n\n\n\n\n\n# Values for under/over ER0 and ER5\n#goi = list(set( list(UnderER0_OverER5) + list(UnderER5_OverER0) ))\n#ERgoi = ER0ER5[ER0ER5.Gene.isin(goi)]\n\n# Values if evalutating ER 4 as well\ngoi = list(setofall)\nERgoi = ER045[ER045.Gene.isin(goi)]\n\n\n# Lets visualise these genes\n\n\n# take log values and convert to array\nlogvalues = ERgoi[['ER0_logFC', 'ER4_logFC', 'ER5_logFC']]\nlogvalues = logvalues.to_numpy()\n\n\ngenes = ERgoi.Gene.tolist()\nloggroups = ['ER0_logFC', 'ER4_logFC','ER5_logFC']\n\nsns.set(rc={'figure.figsize':(11.7,8.27)})\nax = sns.heatmap(logvalues, xticklabels=loggroups, yticklabels=genes, cmap = 'PiYG')\n\n\n\n\n\n\n\n\n","sub_path":"DiffExpAnalysis.py","file_name":"DiffExpAnalysis.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"84558249","text":"#The model on the basis of which data analysis is performed: https://uk.wikipedia.org/wiki/%D0%92%D0%B5%D0%BB%D0%B8%D0%BA%D0%B0_%D0%BF%27%D1%8F%D1%82%D1%96%D1%80%D0%BA%D0%B0_(%D0%BF%D1%81%D0%B8%D1%85%D0%BE%D0%BB%D0%BE%D0%B3%D1%96%D1%8F)\r\n#Data source: https://www.kaggle.com/tunguz/big-five-personality-test\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport pycountry_convert\r\n\r\ndf = pd.read_csv('Datasets/bf_data.csv', sep='\\t')\r\n\r\ncols = df.columns.values[:50]\r\ndf[cols].dropna().astype(int)\r\n\r\n#New columns are created to merge the data (according to the test rules, some values ​​are negative)\r\ndf[\"EXT\"] = df[\"EXT1\"] - df[\"EXT2\"] + df[\"EXT3\"] - df[\"EXT4\"] + df[\"EXT5\"] - df[\"EXT6\"] + df[\"EXT7\"] - df[\"EXT8\"] + df[\"EXT9\"] - df[\"EXT10\"]\r\ndf[\"EST\"] = df[\"EST1\"] - df[\"EST2\"] + df[\"EST3\"] - df[\"EST4\"] + df[\"EST5\"] + df[\"EST6\"] + df[\"EST7\"] + df[\"EST8\"] + df[\"EST9\"] + df[\"EST10\"]\r\ndf[\"AGR\"] = - df[\"AGR1\"] + df[\"AGR2\"] - df[\"AGR3\"] + df[\"AGR4\"] - df[\"AGR5\"] + df[\"AGR6\"] - df[\"AGR7\"] + df[\"AGR8\"] + df[\"AGR9\"] + df[\"AGR10\"]\r\ndf[\"CSN\"] = df[\"CSN1\"] - df[\"CSN2\"] + df[\"CSN3\"] - df[\"CSN4\"] + df[\"CSN5\"] - df[\"CSN6\"] + df[\"CSN7\"] - df[\"CSN8\"] + df[\"CSN9\"] + df[\"CSN10\"]\r\ndf[\"OPN\"] = df[\"OPN1\"] - df[\"OPN2\"] + df[\"OPN3\"] - df[\"OPN4\"] + df[\"OPN5\"] - df[\"OPN6\"] + df[\"OPN7\"] - df[\"OPN8\"] + df[\"OPN9\"] + df[\"OPN10\"]\r\n\r\n#Correlation\r\nsns.heatmap(df[[\"EXT\", \"AGR\", \"CSN\", \"EST\", \"OPN\"]].corr(), annot=True).set_title(\"Кореляція 5 якостей людини\")\r\n\r\n#Distribution (full range of data)\r\nfig, axs = plt.subplots(ncols = 2, nrows = 3)\r\nsns.distplot(df[\"EXT\"], bins = 50, kde = False, ax = axs[0, 0]).set_title(\"Екстраверсія\")\r\nsns.distplot(df[\"EST\"], bins = 50, kde = False, ax = axs[0, 1]).set_title(\"Нейротизм\")\r\nsns.distplot(df[\"AGR\"], bins = 50, kde = False, ax = axs[1, 0]).set_title(\"Доброзичливість\")\r\nsns.distplot(df[\"CSN\"], bins = 50, kde = False, ax = axs[1, 1]).set_title(\"Сумлінність\")\r\nsns.distplot(df[\"OPN\"], bins = 50, kde = False, ax = axs[2, 0]).set_title(\"Відкритість\")\r\nfig.delaxes(axs[2, 1])\r\n\r\n#Distribution (Ukraine)\r\n\r\n#1\r\nfig, ax = plt.subplots()\r\ntraits = [\"EXT\", \"AGR\", \"CSN\", \"EST\", \"OPN\"]\r\nbf_ua = df.loc[df['country'] == 'UA', [\"EXT\", \"AGR\", \"CSN\", \"EST\", \"OPN\"]].dropna().astype(int)\r\nfor t in traits:\r\n\tsns.distplot(df[df['country'] == 'UA'][t], hist=False, label = t)\r\nplt.legend()\r\n\r\n\r\n#2\r\nfig, axs = plt.subplots(ncols = 2, nrows = 3)\r\nsns.distplot(bf_ua[\"EXT\"], bins = 50, kde = False, ax = axs[0, 0]).set_title(\"Екстраверсія\")\r\nsns.distplot(bf_ua[\"EST\"], bins = 50, kde = False, ax = axs[0, 1]).set_title(\"Нейротизм\")\r\nsns.distplot(bf_ua[\"AGR\"], bins = 50, kde = False, ax = axs[1, 0]).set_title(\"Доброзичливість\")\r\nsns.distplot(bf_ua[\"CSN\"], bins = 50, kde = False, ax = axs[1, 1]).set_title(\"Сумлінність\")\r\nsns.distplot(bf_ua[\"OPN\"], bins = 50, kde = False, ax = axs[2, 0]).set_title(\"Відкритість\")\r\nfig.delaxes(axs[2, 1])\r\n\r\n\r\nplt.show()\r\n","sub_path":"Seaborn/big_five.py","file_name":"big_five.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325618681","text":"#coordinates in OpenCV format! => (y, x)\n\nimport draw_func\nfrom PIL import Image\n\nsize = 50\n\ndef calculate(func):\n _maxsize = (100 * 2)+ 2\n #write into file to execute string\n with open(\"./func.py\", \"w\") as f:\n f.write(\"import math\\n\\ndef f(x):\\n return {}\".format(func))\n f.close()\n\n #import and run\n try:\n import func\n except:\n print(\"Error importing custom function! Missing file or syntax error?\")\n result = []\n for x in range(-size, size+1):\n try:\n y = func.f(x)\n except :\n print(\"Syntax error! Remember to use correct Python 3 math syntax!\")\n result.append((y, x))\n if(y > _maxsize):\n _maxsize = (y*2) + 2\n \n return(result, _maxsize)\n\nresult, maxsize = calculate(input(\"f(x)=\"))\ndraw_func.draw(result, maxsize)\n\nif(True):\n Image.open(\"./func_graph.png\").show()\n","sub_path":"calc_func.py","file_name":"calc_func.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262992033","text":"\r\n\"\"\"\r\nCreated on Sun Nov 29 16:16:28 2020\r\n\r\n@author: Guney Doruk\r\n\"\"\"\r\n\r\nimport cv2 \r\n\r\ndirectory = r'C:\\Captured images'\r\n \r\nvideo_capture = cv2.VideoCapture(0) \r\n\r\nsuccess = video_capture.isOpened()\r\n\r\nif(success == False):\r\n video_capture.open(0)\r\n success = True\r\n \r\ni = 0\r\nfps = video_capture.get(cv2.CAP_PROP_FPS)\r\nprint(fps)\r\n\r\nwhile success: \r\n \r\n ret, frame = video_capture.read() \r\n \r\n cv2.imshow('frame', frame) \r\n \r\n if cv2.waitKey(1) & 0xFF == ord('q'): \r\n break\r\n \r\n if cv2.waitKey(100) & 0xFF == ord('f'): \r\n name = \"\\deneme_\"+str(i)+ \".jpeg\"\r\n path = directory + name\r\n cv2.imwrite(path,frame)\r\n i += 1\r\n \r\nvideo_capture.release() \r\ncv2.destroyAllWindows() \r\n","sub_path":"Capture.py","file_name":"Capture.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"86141920","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views import generic\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nimport stripe\n\nfrom djvideomem.content.models import Pricing\n\nUser = get_user_model()\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\n@csrf_exempt\ndef webhook(request):\n # You can use webhooks to receive information about asynchronous payment events.\n # For more about our webhook events check out https://stripe.com/docs/webhooks.\n webhook_secret = settings.STRIPE_WEBHOOK_SECRET\n payload = request.body\n \n # Retrieve the event by verifying the signature using the raw body and secret if webhook signing is configured.\n signature = request.META[\"HTTP_STRIPE_SIGNATURE\"]\n try:\n event = stripe.Webhook.construct_event(\n payload=payload, sig_header=signature, secret=webhook_secret)\n data = event['data']\n except Exception as e:\n return e\n # Get the type of webhook event sent - used to check the status of PaymentIntents.\n event_type = event['type']\n data_object = data['object']\n\n if event_type == 'invoice.paid':\n # Used to provision services after the trial has ended.\n # The status of the invoice will show up as paid. Store the status in your\n # database to reference when a user accesses your service to avoid hitting rate\n # limits.\n # TODO: change the users subscription and pricing\n print(data)\n\n webhook_object = data[\"object\"]\n stripe_customer_id = webhook_object[\"customer\"]\n\n stripe_sub = stripe.Subscription.retrieve(webhook_object[\"subscription\"])\n print(stripe_sub)\n stripe_price_id = stripe_sub[\"plan\"][\"id\"]\n\n pricing = Pricing.objects.get(stripe_price_id=stripe_price_id) # pylint: disable=maybe-no-member\n\n user = User.objects.get(stripe_customer_id=stripe_customer_id)\n user.subscription.status = stripe_sub[\"status\"]\n user.subscription.stripe_subscription_id = webhook_object[\"subscription\"]\n user.subscription.pricing = pricing\n user.subscription.save()\n\n if event_type == 'invoice.payment_failed':\n # If the payment fails or the customer does not have a valid payment method,\n # an invoice.payment_failed event is sent, the subscription becomes past_due.\n # Use this webhook to notify your user that their payment has\n # failed and to retrieve new card details.\n print(data)\n\n if event_type == 'invoice.finalized':\n # If you want to manually send out invoices to your customers\n # or store them locally to reference to avoid hitting Stripe rate limits.\n print(data)\n\n if event_type == 'customer.subscription.deleted':\n # handle subscription cancelled automatically based\n # upon your subscription settings. Or if the user cancels it.\n webhook_object = data[\"object\"]\n stripe_customer_id = webhook_object[\"customer\"]\n stripe_sub = stripe.Subscription.retrieve(webhook_object[\"id\"])\n user = User.objects.get(stripe_customer_id=stripe_customer_id)\n user.subscription.status = stripe_sub[\"status\"]\n user.subscription.save()\n\n if event_type == 'customer.subscription.trial_will_end':\n # Send notification to your user that the trial will end\n print(data)\n\n return HttpResponse()\n\n\nclass EnrollView(generic.TemplateView):\n template_name = \"payment/enroll.html\"\n\n\nclass PaymentView(generic.TemplateView):\n template_name = \"payment/checkout.html\"\n\n def get_context_data(self, **kwargs):\n context = super(PaymentView, self).get_context_data(**kwargs)\n pricing = get_object_or_404(Pricing, slug=kwargs[\"slug\"])\n context.update({\n \"pricing_tier\": pricing,\n \"STRIPE_PUBLIC_KEY\": settings.STRIPE_PUBLIC_KEY\n })\n return context\n\n\nclass CreateSubscriptionView(APIView):\n def post(self, request, *args, **kwargs):\n data = request.data\n print(data)\n customer_id = request.user.stripe_customer_id\n print(customer_id)\n\n try:\n # Attach the payment method to the customer\n stripe.PaymentMethod.attach(\n data['paymentMethodId'],\n customer=customer_id,\n )\n # Set the default payment method on the customer\n stripe.Customer.modify(\n customer_id,\n invoice_settings={\n 'default_payment_method': data['paymentMethodId'],\n },\n )\n\n # Create the subscription\n subscription = stripe.Subscription.create(\n customer=customer_id,\n items=[{'price': data[\"priceId\"]}],\n expand=['latest_invoice.payment_intent'],\n )\n\n data = {}\n data.update(subscription)\n\n return Response(data)\n except Exception as e:\n return Response({\n \"error\": {'message': str(e)}\n })\n\n\nclass RetryInvoiceView(APIView):\n\n def post(self, request, *args, **kwargs):\n data = request.data\n customer_id = request.user.stripe_customer_id\n try:\n\n stripe.PaymentMethod.attach(\n data['paymentMethodId'],\n customer=customer_id,\n )\n # Set the default payment method on the customer\n stripe.Customer.modify(\n customer_id,\n invoice_settings={\n 'default_payment_method': data['paymentMethodId'],\n },\n )\n\n invoice = stripe.Invoice.retrieve(\n data['invoiceId'],\n expand=['payment_intent'],\n )\n data = {}\n data.update(invoice)\n\n return Response(data)\n except Exception as e:\n return Response({\n \"error\": {'message': str(e)}\n })","sub_path":"djvideomem/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"49921156","text":"from typing import List\n\nfrom view.data.event import EventData\nfrom view.enum.event_label import Label\n\n\nclass EventEditView:\n def __init__(self, event: EventData, save_url: str, label_config: List[Label]):\n self.event = event\n\n self.save_url = save_url\n self.label_config = label_config\n\n self.min_width = 400\n self.max_width = 1200\n","sub_path":"view/view/event_edit.py","file_name":"event_edit.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"422596503","text":"# -*- coding: utf8 -*-\n\nfrom utils.Utils import LogLevel\nfrom utils.Poster import post\nfrom utils.Poster import get\nfrom utils.Poster import login \nfrom utils.Poster import signup \nfrom utils.Poster import NoAuthorization\nimport utils.Poster as poster\nfrom utils.Utils import log\nfrom utils.Config import Path\nfrom utils.Utils import pcat\nfrom utils.Poster import fresh_token\nimport json\nimport os\nimport time\n\ntag_table = {\n \"C语言\":(\"Lang\", \"C语言, 一门面向过程的、抽象化的通用程序设计语言,广泛应用于底层开发。\"),\n \"Java\":(\"Lang\", \"Java是由Sun Microsystems公司于1995年 月推出的高级程序设计语言。Java可运行于多个平台,如Windows, Mac OS及其他多种UNIX版本的系统。\"),\n \"Python\":(\"Lang\", \"Python由荷兰数学和计算机科学研究学会的Guido van Rossum 于1990 年代初设计,作为一门叫做ABC语言的替代品。\"),\n \"SQL\":(\"Lang\", \"结构化查询语言(Structured Query Language)简称SQL,是一种特殊目的的编程语言,是一种数据库查询和程序设计语言,用于存取数据以及查询、更新和管理关系数据库系统。\"),\n \"环境配置\" : (\"Other\", \"环境配置相关,包括IDE安装、编译器安装。\"),\n \"工具使用\" : (\"Other\", \"开发工具使用相关,包括IDE使用、编译器使用,包括各种报错。\"),\n \"标准库\" : (\"Other\", \"标准库相关,包括标准库函数使用、理解。\"),\n \"语句\" : (\"Other\", \"语句相关,包括循环、分支等语句。\"),\n \"关键字\" : (\"Other\", \"关键字相关,包括if、while、for等。\"),\n \"代码\" : (\"Other\", \"有源代码的问题。\"),\n \"Linux\" : (\"Env\", \"由Linus Benedict Torvalds开发,并由开源社区维护的类Unix操作系统内核Linux。一般代指使用Linux内核的发行版,如Debian、Ubuntu、RHEL、Centos等。\"),\n \"Windows\" : (\"Env\", \"由微软公司(Microsoft)开发的操作系统,是目前最主流的桌面PC操作系统。\"),\n \"macOS\" : (\"Env\", \"由苹果(Apple)开发的操作系统,常用于苹果的Mac系列电脑。\"),\n \"Dev C++\" : (\"Env\", \"Windows 环境下的一个轻量级 C/C++ 集成开发环境(IDE),是一款自由软件,遵守GPL许可协议分发源代码。目前已停止维护,但仍有社区维护的版本。\"),\n \"Visual C++\" : (\"Env\", \"由微软公司(Microsoft)的免费C++集成开发环境(IDE),可提供编辑C语言。目前微软已停止维护并关闭下载渠道。\"),\n \"VS Code\" : (\"Env\", \"由微软公司(Microsoft)同社区开发的Code-OOS的构建,是一款轻量级集成开发环境(IDE),通过插件来支持多种语言的开发。\"),\n \"Visual Studio\" : (\"Env\", \"由微软公司(Microsoft)开发的重量级集成开发环境(IDE)。\"),\n \"gcc\" : (\"Env\", \"GNU Compiler Collection,GNU编译器套件,是遵循GPL协议的自由软件。支持多种语言与多种目标平台。也是GCC编译C语言所使用的的指令名。\"),\n \"clang\" : (\"Env\", \"是LLVM的C家族语言前端,是遵循BSD协议的自由软件。一般可认为是一个C语言、C++、Objective-C语言的轻量级编译器。\"),\n \"msvc\" : (\"Env\", \"由微软公司(Microsoft)开发的C++编译器,是微软旗下IDE(Visual Studio、Visual C++等)的默认C++编译器。\"),\n}\n\ntid = None\n\ndef init_tids():\n global tid\n tid = get(\"/api/questions/taglist\")\n\ndef add_tag(name: str, desc: str, category: str) -> int:\n obj = { \"name\": name, \"desc\": desc, \"category\": category}\n log(\"添加标签:\", name)\n res = post(\"/api/questions/add_tag\", obj)\n if res[\"status\"] != \"success\":\n log(\"添加标签:\", name, \"失败\", \"[{}] {}\".format(res[\"status\"], res[\"message\"]), level=LogLevel.WAR)\n return -1\n else:\n tid = res[\"tid\"]\n log(\"添加标签:\", name, \"成功\", \"tid=\", tid)\n return tid\n\ndef add_question(title: str, remarks: str, tags: list, question_jwt: str=None) -> int:\n global tid\n if tid is None:\n init_tids()\n obj = { \"title\": title, \"remarks\": remarks, \"tags\": [tid[t] for t in tags if t in tid.keys()] }\n log(\"添加问题:\", title[:10])\n res = post(\"/api/questions/add_question\", obj, jwt=question_jwt)\n if res[\"status\"] != \"success\":\n log(\"添加问题:\", title[:10], \"失败\", \"[{}] {}\".format(res[\"status\"], res[\"message\"]), level=LogLevel.WAR)\n return -1\n else:\n qid = res[\"qid\"]\n log(\"添加问题:\", title[:10], \"成功\", \"qid=\", qid)\n return qid\n\ndef add_answer(qid: int, content: str, answer_jwt: str=None) -> int:\n obj = { \"qid\": qid, \"content\": content }\n log(\"添加回答:\", content[:10])\n res = post(\"/api/questions/add_answer\", obj, jwt=answer_jwt)\n if res[\"status\"] != \"success\":\n log(\"添加回答:\", content[:10], \"失败\", \"[{}] {}\".format(res[\"status\"], res[\"message\"]), level=LogLevel.WAR)\n return -1\n else:\n aid = res[\"aid\"]\n log(\"添加回答:\", content[:10], \"成功\", \"aid=\", aid)\n return aid\n\ndef try_add_all_tags():\n for k, v in tag_table.items():\n add_tag(k, v[1], v[0])\n\ndef try_add_all_questions(question_jwt: str, answer_jwt: str):\n with open(Path.Script_CSDNData, 'r', encoding='utf8') as f:\n questions = json.load(f)\n to_add_file = pcat(Path.Script, \"questions_to_add.json\")\n to_add = None\n if os.path.exists(to_add_file):\n with open(to_add_file, 'r', encoding='utf8') as f:\n to_add = json.load(f)\n else:\n to_add = list(questions)\n added = []\n try:\n for i, question in enumerate(to_add):\n time.sleep(1)\n if i % 64 == 63:\n question_jwt = fresh_token(question_jwt)\n answer_jwt = fresh_token(answer_jwt)\n q = question[\"q\"]\n a = question[\"a\"]\n qid = add_question(q[\"title\"], q[\"remarks\"], q[\"tags\"], question_jwt)\n if qid < 0:\n continue\n aid = add_answer(qid, a[\"content\"], answer_jwt)\n if aid < 0:\n continue\n added.append(question)\n finally:\n for a in added:\n to_add.remove(a)\n with open(to_add_file, 'w', encoding='utf8') as f:\n json.dump(to_add, f)\n\n\ndef main() -> int:\n log(\"using protocol:\", poster.protocol)\n log(\"using base_url:\", poster.base_url)\n while True:\n command = input(\"signup, tag, question, or exit:\\t\")\n try:\n if command == \"signup\":\n signup()\n elif command == \"exit\":\n return\n elif command == \"tag\":\n print(\"please login with admin account\")\n login()\n try_add_all_tags()\n elif command == \"question\":\n print(\"please login with question account\")\n question_jwt = login()\n print(\"please login with answer account\")\n answer_jwt = login()\n try_add_all_questions(question_jwt, answer_jwt)\n else:\n print(\"unknow command:\", command)\n except KeyboardInterrupt:\n print(\"\")\n except NoAuthorization:\n print(\"login required!\")\n \n\nif __name__ == '__main__':\n try:\n main()\n print(\"exit with exit command\")\n except KeyboardInterrupt:\n print(\"\\nexit with keyboard interrupt\")","sub_path":"Scripts/OldDataInserter.py","file_name":"OldDataInserter.py","file_ext":"py","file_size_in_byte":7456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593587846","text":"import os\nimport sys\n\nimport datetime\nimport numpy as np\nfrom keras.applications.densenet import DenseNet121\nfrom keras.applications.densenet import preprocess_input\n\nfrom keras.utils import to_categorical\nfrom sklearn.metrics import log_loss\nfrom collections import OrderedDict\nimport pandas as pd\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nsys.path.append('..')\nfrom helpers.settings import arrays_folder, models_folder\n\n\nmodel = DenseNet121()\nmodel_uid = 'densenet121'\n\nPHI = (np.sqrt(5) - 1) / 2\n\nif __name__ == '__main__':\n input_folder = os.path.join(arrays_folder, 'imagenet_224')\n output_folder = os.path.join(models_folder, model_uid)\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n y_val = np.load(os.path.join(input_folder, 'y_val.npy'))\n y_val = to_categorical(y_val, 1000)\n ws = model.get_weights()\n n_batch = 50\n samples_per_batch = 50000 // n_batch\n\n n_optimization_iterations = 20\n n_selected_batch = 5\n n_ensemble = 5\n\n y_val = y_val[:n_selected_batch * samples_per_batch]\n X = np.zeros((n_selected_batch * samples_per_batch, 224, 224, 3))\n sigma_lower, sigma_upper = 5e-4, 5e-3\n\n sigmas_log = list()\n nlls_log = list()\n nlls = dict()\n for iteration in range(n_optimization_iterations):\n now = datetime.datetime.now().strftime(\"%m/%d %H:%M:%S\")\n print('=' * 100)\n print('iteration {0}... time: {1}'.format(iteration, now))\n print('=' * 100)\n sigma_1 = sigma_lower + (sigma_upper - sigma_lower) * PHI\n sigma_2 = sigma_lower + (sigma_1 - sigma_lower) * PHI\n sigmas_log.append(OrderedDict({\"iteration\": iteration, \"sigma_l\": sigma_lower,\n \"sigma_2\": sigma_2, \"sigma_1\": sigma_1,\n \"sigma_h\": sigma_upper}))\n print('sigma: {0:.6f}, {1:.6f}, {2:.6f}, {3:.6f}'.format(sigma_lower, sigma_2, sigma_1, sigma_upper))\n for sigma in [sigma_1, sigma_2]:\n if sigma not in nlls.keys():\n y_val_pred_ensemble = np.zeros((n_ensemble, *y_val.shape))\n for seed_index, seed in enumerate(range(17, 17 + n_ensemble)):\n np.random.seed(seed)\n model.set_weights(ws)\n wp = np.copy(ws)\n for index2, w in enumerate(ws):\n shape = w.shape\n if len(shape) == 4:\n noise = np.random.normal(0, sigma, (w.shape[0], w.shape[1], w.shape[2], w.shape[3]))\n wp[index2] = ws[index2] + noise\n model.set_weights(wp)\n for i in range(0, n_selected_batch):\n images_path = os.path.join(input_folder, 'x_val_' + str(i).zfill(3) + '.npy')\n array = np.load(images_path)\n X[i * samples_per_batch:(i + 1) * samples_per_batch] = preprocess_input(array)\n y_val_pred_p = model.predict(X, batch_size=64, verbose=1)\n y_val_pred_ensemble[seed_index] = y_val_pred_p\n nll_val = log_loss(y_val, y_val_pred_p)\n print('sigma: {0:.6f}, seed: {1}, nll: val {2:.4f}'.format(sigma, seed, nll_val))\n y_val_pred_ensemble_mean = np.mean(y_val_pred_ensemble, axis=0)\n nll = log_loss(y_val, y_val_pred_ensemble_mean)\n nlls[sigma] = nll\n nlls_log.append(OrderedDict({\"iteration\": iteration, \"sigma\": sigma, \"nll\": nll}))\n else:\n print('will skip for {0:6f}'.format(sigma))\n print(nlls_log)\n if nlls[sigma_1] < nlls[sigma_2]:\n sigma_lower = sigma_2\n print('updated lower side...')\n else:\n sigma_upper = sigma_1\n print('updated upper side...')\n print(sigma_lower, sigma_upper)\n pd.DataFrame(nlls_log).to_csv(os.path.join(output_folder, 'golden_optimization_nlls.csv'), index=False)\n pd.DataFrame(sigmas_log).to_csv(os.path.join(output_folder, 'golden_optimization_simgas.csv'), index=False)\n","sub_path":"ImageNet/2_optimal_sigma/densenet121.py","file_name":"densenet121.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243750293","text":"import os\nimport os.path\nimport webracer\nimport nose.plugins.attrib\nfrom . import utils\nfrom .apps import kitchen_sink_app\n\ntry:\n # python 3.3?\n file_not_found_exception_class = FileNotFoundError\nexcept NameError:\n file_not_found_exception_class = IOError\n\nutils.app_runner_setup(__name__, kitchen_sink_app.app, 8060)\n\nsave_dir = os.environ.get('WEBRACER_TEST_TMP') or os.path.join(os.path.dirname(__file__), 'tmp')\nnonexistent_save_dir = '/tmp/nonexistent.dee11123e367b4a7506f856cc55898fabd4caeff'\n\ndef list_save_dir():\n entries = os.listdir(save_dir)\n entries = [entry for entry in entries if entry[0] != '.']\n return entries\n\n@nose.plugins.attrib.attr('client')\n@webracer.config(host='localhost', port=8060)\nclass ResponseTest(webracer.WebTestCase):\n def setUp(self, *args, **kwargs):\n super(ResponseTest, self).setUp(*args, **kwargs)\n \n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n else:\n for entry in list_save_dir():\n os.unlink(os.path.join(save_dir, entry))\n \n @webracer.config(save_responses=True, save_dir=save_dir)\n def test_save_successful(self):\n self.assertEqual(0, len(list_save_dir()))\n \n self.get('/ok')\n self.assert_status(200)\n self.assertEqual('ok', self.response.body)\n \n entries = list_save_dir()\n # response + last symlink\n self.assertEqual(2, len(entries))\n assert 'last' in entries\n entries.remove('last')\n assert entries[0].startswith('response')\n \n @webracer.config(save_responses=True, save_dir=nonexistent_save_dir)\n def test_save_to_nonexistent_dir(self):\n assert not os.path.exists(nonexistent_save_dir)\n \n with self.assert_raises(file_not_found_exception_class) as cm:\n self.get('/ok')\n \n assert nonexistent_save_dir in str(cm.exception)\n \n assert not os.path.exists(nonexistent_save_dir)\n \n @webracer.config(save_responses=False, save_failed_responses=True,\n save_dir=save_dir)\n def test_save_failed_request(self):\n self.assertEqual(0, len(list_save_dir()))\n \n self.get('/internal_server_error')\n \n self.assertEqual(0, len(list_save_dir()))\n \n with self.assert_raises(AssertionError):\n # triggers save\n self.assert_status(200)\n \n entries = list_save_dir()\n # response + last symlink\n self.assertEqual(2, len(entries))\n assert 'last' in entries\n entries.remove('last')\n assert entries[0].startswith('response')\n \n @webracer.config(save_responses=False, save_failed_responses=True,\n save_dir=nonexistent_save_dir)\n def test_save_failed_request_to_nonexistent_dir(self):\n assert not os.path.exists(nonexistent_save_dir)\n \n self.get('/internal_server_error')\n \n with self.assert_raises(AssertionError) as cm:\n # triggers save\n self.assert_status(200)\n \n # Resulting exception should contain both assertion failure message\n # and the message involving inability to save response\n assert 'Response status 200 expected but was 500' in str(cm.exception)\n assert 'No such file or directory' in str(cm.exception)\n assert nonexistent_save_dir in str(cm.exception)\n \n assert not os.path.exists(nonexistent_save_dir)\n","sub_path":"tests/response_saving_test.py","file_name":"response_saving_test.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"53565522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 11:15:02 2019\n\n@author: russe\nNeed to convert units from ppb to some sort of mass unit but easy to do; prior to the slope determination\nCan pull the formatting aspect of this script and maybe have a function version of it \n\"\"\"\n\nimport pandas as pd\nimport glob \nimport numpy as np\nimport datetime\nfrom sklearn.linear_model import LinearRegression\nimport warnings\n\ndef MixingModel(a,b,data_gr, Consts_File):\n Consts = Consts_File\n quant = Consts['Value']['Quant']\n coff = []; coff = pd.DataFrame(coff)\n ss = a+datetime.timedelta(minutes=Consts['Value']['Chamber_Start'])\n se = b-datetime.timedelta(minutes=Consts['Value']['Chamber_End'])\n if Consts['Value']['Ambient_Start'] == 0:\n amb = a; \n else: amb = a+datetime.timedelta(minutes = Consts['Value']['Ambient_Start'])\n ambe = a+datetime.timedelta(minutes=Consts['Value']['Ambient_End'])\n # Does this get changed to [a:b] instead of [amb:ambe]? Makes a pretty big difference\n N20_amb = data_gr['N2O_ppm'][a:b].quantile([quant])\n N15Bulk_amb = data_gr['BulkAT'][amb:ambe].mean()\n x = np.linspace(0,len(data_gr[ss:se])-1,len(data_gr[ss:se]))\n Num = ((data_gr['N2O_ppm'][a:b]*data_gr['BulkAT'][a:b])-(N20_amb[quant]*N15Bulk_amb))\n Dem = (data_gr['N2O_ppm'][a:b]-N20_amb[quant])\n Bulk15Ng = Num/Dem\n Alpha_Diff = data_gr['NN15O_ppm']-data_gr['NN15O_ppm'].min()\n Beta_Diff = data_gr['N15NO_ppm']-data_gr['N15NO_ppm'].min()\n Alpha_Prop = Alpha_Diff/(Alpha_Diff+Beta_Diff); Beta_Prop = Beta_Diff/(Alpha_Diff+Beta_Diff)\n Alpha15Ng = Bulk15Ng*Alpha_Prop; Beta15Ng = Bulk15Ng*Beta_Prop # Add in the QC for this as well; again should be part of a function Not sure how much more want to do\n ok = ((data_gr['N2O_ppm'][a:b]-N20_amb[quant])*1000) > Consts['Value']['Detect']\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n preBulkNgm = np.nanmean(Bulk15Ng[amb:ambe][ok]);preBulkNgs = np.std(Bulk15Ng[amb:ambe][ok])\n BulkNgm = np.nanmean(Bulk15Ng[ss:se][ok]); BulkNgs = np.std(Bulk15Ng[ss:se][ok])\n Back = pd.DataFrame([preBulkNgm, preBulkNgs, BulkNgm, BulkNgs,N20_amb[quant],np.std(data_gr['N2O_ppm'][amb:ambe])\n ,np.nanmean(Alpha15Ng[ss:se][ok]),np.nanmean(Beta15Ng[ss:se][ok])\n ,np.nanmean(Alpha15Ng[amb:ambe][ok]),np.nanmean(Beta15Ng[amb:ambe][ok])\n ,np.nanmean(data_gr['BulkAT'][amb:ambe]),np.nanmean(data_gr['NAlphaAT'][amb:ambe]),np.nanmean(data_gr['NBetaAT'][amb:ambe])]).transpose()\n if len(x) > 1:\n model = LinearRegression().fit(x.reshape((-1, 1)),data_gr['N2O_ppm_conc'][ss:se])\n cof = pd.DataFrame([str(ss.strftime('%Y%m%d%H%M')),str(se.strftime('%Y%m%d%H%M')),model.coef_[0], kk])\n else: cof = pd.DataFrame([str(ss.strftime('%Y%m%d%H%M')),str(se.strftime('%Y%m%d%H%M')),np.NaN,kk])\n coff = pd.concat([cof.transpose(),Back],axis = 1)\n return (coff)\n\ndef Timing_Index(df):\n time = []\n for k in range (0,len(df)):\n Y = str(int(df['TIMESTAMP_START'][k]))[0:4]\n M = str(int(df['TIMESTAMP_START'][k]))[4:6] \n D = str(int(df['TIMESTAMP_START'][k]))[6:8]\n hh = str(int(df['TIMESTAMP_START'][k]))[8:10] \n mm = str(int(df['TIMESTAMP_START'][k]))[10:12]\n time.append(Y+'-'+M+'-'+D+' '+hh+':'+mm)\n time = pd.DataFrame(time); df.index = time[0]\n df.index=pd.to_datetime(df.index) # Time-based index \n return df\n\nfnames = glob.glob(r'C:\\Users\\russe\\Desktop\\LTAR\\N2O\\PCFS\\Formatted\\*.csv')\ncol = pd.read_csv(r'C:\\Users\\russe\\Desktop\\LTAR\\N2O\\PCFS\\N2O_Isotop_Columns.csv',header = 0)\nConsts_File = pd.read_csv(r'C:\\Users\\russe\\Desktop\\LTAR\\N2O\\PCFS\\LGR_Isotope_Constants.csv', header = 0, index_col = 'ID')\n\n#%%\n# Loop over the number of files within the directory (len(fnames)); fnames contains the full path for the files\nFinal = []\nFinal = pd.DataFrame(Final)\nfor k in range (10,len(fnames)):\n data = pd.read_csv(fnames[k], header = 0, index_col = 'TimeStamp') # Reads in each file in sequence; first row is the header, no index defined here\n data.index = pd.to_datetime(data.index)\n qn = (data['N2O_ppm'] > 0.3) & (data['N2O_ppm'] < 2.1) # Created a Boolean of which rows in the N2O_ppm column are below zero; because of the drops in the data, we may want to have a stricter lower bound (0.325?)\n datax = data['N2O_ppm'][qn]/1e6\n data_conc = (datax*44*(data['GasP_torr']*133.322))/(8.314*(data['GasT_C']*273.15))\n data['N2O_ppm_conc'] = data_conc\n # Here be the atom percent calculations for things\n data['BulkAT'] = (((data['NN15O_ppm']+data['N15NO_ppm'])/data['N2O_ppm'])*100)/2\n data['NAlphaAT'] = ((data['NN15O_ppm']/data['N2O_ppm'])*100)\n data['NBetaAT'] = ((data['N15NO_ppm']/data['N2O_ppm'])*100)\n test = data.groupby('MIU_DESC') \n print(fnames[k])\n for kk in test.groups.keys():\n# print(kk)\n data_gr = (test.get_group(kk))\n dd = []\n for dt in range(0,(len(data_gr)-1)):\n dd.append(data_gr.index[dt+1]-data_gr.index[dt])\n dd = pd.DataFrame(dd)\n xxn = dd >'00:00:05'\n xxn.index = data_gr.index[0:-1]\n qq = data_gr['d15NB'][0:-1][xxn[0]]\n qq = qq.index\n zn = np.where(xxn == True) \n zn = pd.DataFrame(zn).transpose()\n # This part is really crappy and needs to be cleaned up better as well\n if \"Chamber\" in kk: # Add an output for the start and end time of the chambers to be able to use with the non-iso LGR data\n for m in range (0,len(zn)):\n if m ==0: # Leave time decider but can move everything else since repeat at each time\n a = data_gr.index[0]\n b = data_gr.index[zn[0][m]]\n coff = MixingModel(a,b,data_gr, Consts_File)\n a = data_gr.index[zn[0][m]+1] # What to do with these two pieces\n b = data_gr.index[zn[0][m+1]]\n coff2 = MixingModel(a,b,data_gr, Consts_File)\n coff = pd.concat([coff, coff2])\n elif (m >0) & (m<len(zn)-1):\n a = data_gr.index[zn[0][m]+1]\n b = data_gr.index[zn[0][m+1]]\n coff = MixingModel(a,b,data_gr, Consts_File)\n elif m == (len(zn)-1):\n a = data_gr.index[zn[0][m]+1]\n b = data_gr.index[-1]\n coff = MixingModel(a,b,data_gr, Consts_File)\n Final=pd.concat([Final, coff])\n \nFinal.columns = col['Cols']\nFinal = Final.reset_index(); Final = Final.drop('index', axis = 1)\nFinal = Timing_Index(Final)\nFinal = Final.sort_index()\nFinal['N2O_Flux'] = Final['N2O_Flux']/2\nFinal['N2O_Flux_g_ha-1_d-1']=Final['N2O_Flux']*60*60*24*1000*10000\n#Final = pd.concat([Final,data]) # Maybe to get the extra data involved\nFinal.to_csv(r'C:\\Users\\russe\\Desktop\\LTAR\\N2O\\PCFS\\LGRISO_N2O_Flux_Test_MixModel_20190809.csv', index_label ='TimeStamp',index = True, na_rep = np.NaN)\n#Check = Final['2019-05-21 15:00':'2019-05-21 18:00']","sub_path":"N2O_Simple_Read_PCFS.py","file_name":"N2O_Simple_Read_PCFS.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373110636","text":"import pickle\nimport sys\n\nfrom heuristicSearch.utils.utils import *\nfrom heuristicSearch.envs.occupancyGrid import OccupancyGrid\n\ndef inputPoints( numPoints, occMap ):\n points = []\n for i in range(numPoints):\n print(\"Click on an point\")\n point = inputClickedPoint(occMap)\n points.append(point)\n return points\n\ndef savePoints(points, fileName):\n pickle.dump( points, open(fileName, \"wb\") )\n\ndef inputSavePoints():\n \"\"\"Function to input and pickle points.\n Takes three commands line arguments: Map to be used, file to be saved in\n and number of points.\n \"\"\"\n image = sys.argv[1]\n fileName = sys.argv[2]\n numPoints = int( sys.argv[3] )\n\n occGrid = OccupancyGrid()\n occMap = occGrid.getMapFromImage(image)\n\n points = inputPoints( numPoints, occMap )\n savePoints( points, fileName )\n\nif __name__ == \"__main__\":\n inputSavePoints()\n\n","sub_path":"save_points.py","file_name":"save_points.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533704539","text":"\"\"\"\nKernel Functions\n================\n\nTheses kernels functions are most used for Kernel Polynomial Method\nin order to....\n\n\nAvailable methods\n-----------------\n\n - jackson\n - lorentz\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n\ndef jackson(\n num_moments,\n precision=32,\n name_scope=None\n):\n \"\"\"\n This function generates the Jackson kernel for a given number of\n Chebyscev moments\n\n Parameters\n ----------\n num_moments: (uint)\n number of Chebyshev moments\n tf_float: (tensorflow float type)\n valids values are tf.float32, tf.float64, or tf.float128\n name_scope: (str) (default=\"get_jackson_kernel\")\n scope name for tensorflow\n\n Return\n ------\n jackson_kernel: Tensor(shape=(num_moments,), dtype=tf_float)\n\n Note\n ----\n See .. _The Kernel Polynomial Method:\n https://arxiv.org/pdf/cond-mat/0504627.pdf for more details\n \"\"\"\n tf_float = tf.float64\n if precision == 32:\n tf_float = tf.float32\n\n with tf.name_scope(name_scope, \"jackson_kernel\"):\n\n kernel_moments = tf.range(0, num_moments, dtype=tf_float)\n norm = np.pi/(num_moments+1)\n phases = kernel_moments*norm\n\n kernel = tf.math.divide(\n tf.add(\n (num_moments-kernel_moments+1)*tf.cos(phases),\n tf.sin(phases)/tf.tan(norm)\n ),\n (num_moments+1)\n )\n return kernel\n\n\ndef lorentz(\n num_moments,\n l,\n precision=32,\n name_scope=None\n):\n \"\"\"\n This function generates the Lorentz kernel for a given number of\n Chebyscev moments and a positive real number, l\n\n Parameters\n ----------\n num_moments: (int)\n positive integer, number of Chebyshev moments\n l: (float)\n positve number,\n tf_float: (tensorflow float type)\n valids values are tf.float32, tf.float64, or tf.float128\n name_scope: (str) (default=\"lorentz_kernel\")\n scope name for tensorflow\n\n Return\n ------\n kernel: Tensor(shape=(num_moments,), dtype=tf_float)\n\n Note\n ----\n See .. _The Kernel Polynomial Method:\n https://arxiv.org/pdf/cond-mat/0504627.pdf for more details\n \"\"\"\n tf_float = tf.float64\n if precision == 32:\n tf_float = tf.float32\n\n with tf.name_scope(name_scope, \"lorentz_kernel\"):\n\n kernel_moments = tf.range(0, num_moments, dtype=tf_float)\n phases = 1. - kernel_moments/num_moments\n\n kernel = tf.math.divide(\n tf.sinh(l*phases),\n tf.math.sinh(l)\n )\n return kernel\n\n\n__all__ = [\"lorentz\", \"jackson\"]\n","sub_path":"emate/utils/tfops/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"648459771","text":"# plot 集合\n\nfont_filename = 'C:/_git/vcs/_1.data/______test_files1/_font/msch.ttf'\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#設定中文字型及負號正確顯示\n#設定中文字型檔\nplt.rcParams[\"font.sans-serif\"] = \"Microsoft JhengHei\" # 將字體換成 Microsoft JhengHei\n#設定負號\nplt.rcParams[\"axes.unicode_minus\"] = False # 讓負號可正常顯示\n\ndef process_key(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n\n if event.key == \"p\":\n #previous_slice(ax) #上一張\n #draw(ax, False)\n print('你在圖上按了 p')\n elif event.key == \"n\":\n #next_slice(ax) #下一張\n #draw(ax, False)\n print('你在圖上按了 n')\n elif event.key == \"c\":\n #change_axis(ax)\n #draw(ax, True)\n print('你在圖上按了 c')\n\n fig.canvas.draw_idle()\n\n\nfig, ax = plt.subplots()\n\n#ax.volume = volume\n#ax.index = volume.shape[0] // 2\nax.axis = 0\n#ax.imshow(volume[ax.index, :, :], cMap, vmin=cMin, vmax=cMax)\nfig.canvas.mpl_connect(\"key_press_event\", process_key)\n\n\n\nplt.show()\n\n\n\n\n","sub_path":"_4.python/matplotlib/matplotlib_process_key.py","file_name":"matplotlib_process_key.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300121823","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom itertools import chain\nfrom django.template.defaultfilters import slugify\nfrom django.utils.translation import ugettext as _\nfrom pustakalaya_apps.collection.models import Collection\nfrom elasticsearch.exceptions import NotFoundError\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom .search import VideoDoc\nfrom django.core import urlresolvers\nfrom pustakalaya_apps.core.abstract_models import (\n AbstractItem,\n AbstractSeries,\n AbstractTimeStampModel,\n LinkInfo,\n EmbedVideoAudioLink\n\n)\n\nfrom pustakalaya_apps.core.models import (\n Keyword,\n Biography,\n Sponsor,\n Publisher,\n Language,\n EducationLevel,\n LicenseType,\n genre_audio_video\n)\n\n\nclass FeaturedItemManager(models.Manager):\n def get_queryset(self):\n return super(FeaturedItemManager, self).get_queryset().filter(published=\"yes\", featured=\"yes\").order_by(\"-updated_date\")[:5]\n\n\nclass Video(AbstractItem):\n \"\"\"\n Video item class\n \"\"\"\n\n collections = models.ManyToManyField(\n Collection,\n verbose_name=_(\"Add this video to these collection\"),\n blank=True,\n\n )\n\n video_original_document_authors = models.ManyToManyField(\n Biography,\n verbose_name=_(\"Original Author(s)\"),\n related_name=\"video_original_document_authors\",\n blank=True,\n\n )\n\n video_release_date = models.CharField(\n verbose_name=_(\"Release date\"),\n max_length=255,\n blank=True,\n )\n\n video_director = models.ManyToManyField(\n Biography,\n verbose_name=(\"Director\"),\n related_name=\"directors\",\n blank=True,\n\n )\n\n video_producers = models.ManyToManyField(\n Biography,\n verbose_name=_(\"Producer\"),\n related_name=\"producers\",\n blank=True,\n\n )\n\n education_levels = models.ManyToManyField(\n EducationLevel,\n verbose_name=_(\"Education Level\"),\n blank=True,\n\n )\n languages = models.ManyToManyField(\n Language,\n verbose_name=_(\"Languages\"),\n blank=True,\n\n )\n\n # Manager to return the featured objects.\n objects = models.Manager()\n featured_objects = FeaturedItemManager()\n\n video_series = models.ForeignKey(\n \"VideoSeries\",\n verbose_name=_(\"Video series\"),\n on_delete=models.CASCADE,\n blank=True,\n null=True\n )\n\n type = models.CharField(\n editable=False,\n default=\"video\",\n max_length=255\n )\n video_certificate_license = models.CharField(\n verbose_name=_(\"Certification\"),\n max_length=255,\n blank=True\n )\n age = models.CharField(\n verbose_name=_(\"Age group\"),\n max_length=255,\n blank=True,\n )\n\n sponsors = models.ManyToManyField(\n Sponsor,\n verbose_name=_(\"Sponsor\"),\n blank=True,\n\n )\n\n submitted_by = models.ForeignKey(\n User,\n on_delete=models.SET_NULL,\n editable=False,\n null=True\n )\n\n # custom video genre inherit from genre_audio_video\n\n # video_genre = models.ForeignKey(\n # \"VideoGenre\",\n # verbose_name=_(\"Video Genre\"),\n # blank=True,\n # null=True\n # )\n\n video_genre = models.ManyToManyField(\n genre_audio_video,\n verbose_name=_(\"Video Genre\"),\n blank=True,\n\n )\n # publisher = models.ForeignKey(\n # Publisher,\n # verbose_name=_(\"Publisher\"),\n # blank=True,\n # null=True\n # )\n\n publisher = models.ManyToManyField(\n Publisher,\n verbose_name=_(\"Publisher\"),\n blank=True,\n\n )\n\n keywords = models.ManyToManyField(\n Keyword,\n verbose_name=_(\"Keywords\"),\n blank=True,\n\n )\n\n license = models.ForeignKey(\n LicenseType,\n verbose_name=_(\"license\"),\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n thumbnail = models.ImageField(\n upload_to=\"uploads/thumbnails/video/%Y/%m/%d\",\n max_length=255,\n blank=True\n )\n\n video_running_time = models.CharField(\n verbose_name=_(\"Running time in minutes\"),\n max_length=255,\n blank=True,\n\n )\n\n @property\n def getauthors(self):\n if not self.video_director:\n return [None]\n\n return [(author.getName, author.pk) for author in self.video_director.all()] or [None]\n\n\n\n def get_absolute_url(self):\n from django.urls import reverse\n return reverse(\"video:detail\", kwargs={\"title\": slugify(self.title), \"pk\": self.pk})\n\n def get_dashboard_edit_url(self):\n return reverse(\"dashboard:video_update\", kwargs={\"pk\": self.pk})\n\n\n\n def get_dashboard_delete_url(self):\n return reverse(\"dashboard:video_delete\", kwargs={\"pk\": self.pk})\n\n\n\n\n\n def get_similar_items(self):\n from pustakalaya_apps.document.models import Document\n from pustakalaya_apps.audio.models import Audio\n\n documents = Document.objects.filter(keywords__in=[keyword.id for keyword in self.keywords.all()]).distinct()[:4]\n audios = Audio.objects.filter(keywords__in=[keyword.id for keyword in self.keywords.all()]).distinct()[:4]\n videos = Video.objects.filter(keywords__in=[keyword.id for keyword in self.keywords.all()]).distinct()[:4]\n return chain(documents, audios, videos)\n\n\n\n def doc(self):\n # Parent attr\n item_attr = super(Video, self).doc()\n # Combine item attr and video attr to index in search server\n videoattr = dict(\n **item_attr,\n publisher=[publisher.publisher_name for publisher in self.publisher.all()],\n sponsors=[sponsor.name for sponsor in self.sponsors.all()], # Multi value # TODO some generators\n keywords=[keyword.keyword for keyword in self.keywords.all()],\n type=self.type,\n education_levels=[education_level.level for education_level in self.education_levels.all()],\n communities=[collection.community_name for collection in self.collections.all()],\n collections=[collection.collection_name for collection in self.collections.all()],\n collections_ids=[collection.pk for collection in self.collections.all()],\n languages=[language.language.lower() for language in self.languages.all()],\n video_running_time=self.video_running_time,\n thumbnail=self.thumbnail.name,\n # License type\n license_type=self.license.license if self.license else None,\n video_director=self.getauthors,#getattr(self.video_director, \"getname\", \"\"),\n video_series=getattr(self.video_series, \"series_name\", \"\"),\n video_certificate_license=self.video_certificate_license,\n # video_genre=getattr(self.video_genre, \"genre\", \"\"),\n video_genre=[video_genre.custom_genre for video_genre in self.video_genre.all()],\n #video_genre=self.video_genre.genre if self.video_genre else None,\n author_list=self.getauthors,\n url = self.get_absolute_url()\n\n )\n # Create a video instance\n obj = VideoDoc(**videoattr)\n return obj\n\n\n def index(self):\n \"\"\"\n Call this method to index an instance to search server\n \"\"\"\n if self.published == \"no\":\n # delete this if the published is set to no form\n self.delete_index()\n else:\n # save the doc\n self.doc().save()\n\n def get_admin_url(self):\n return urlresolvers.reverse(\"admin:%s_%s_change\" %(self._meta.app_label, self._meta.model_name), args=(self.pk,))\n\n def video_title(self):\n return self.title;\n\n def published_yes_no(self):\n return self.published\n\n def featured_yes_no(self):\n return self.featured\n\n\n def updated_date_string(self):\n return self.updated_date\n\n def bulk_index(self):\n \"\"\"\n call this method to during bulk indexing an instance to search server.\n method used by `search.py module`\n \"\"\"\n return self.doc().to_dict(include_meta=True)\n\n def delete_index(self):\n \"\"\"method to delete a video instance from search server\n This method is called by `signals.py` module\n \"\"\"\n try:\n self.doc().delete()\n except NotFoundError:\n pass\n\n def __str__(self):\n return self.title\n\n def get_admin_url(self):\n return urlresolvers.reverse(\"admin:%s_%s_change\" %(self._meta.app_label, self._meta.model_name), args=(self.pk,))\n\n\n\nclass VideoSeries(AbstractSeries):\n\n class Meta:\n verbose_name_plural = _(\"Video series\")\n ordering = [\"created_date\"]\n\n def __str__(self):\n return \"{}\".format(self.series_name)\n\n\nclass VideoFileUpload(AbstractTimeStampModel):\n \"\"\"Class to upload the multiple document objects\"\"\"\n\n file_name = models.CharField(\n _(\"File name\"),\n max_length=255,\n blank=True,\n )\n\n video = models.ForeignKey(\n Video,\n on_delete=models.CASCADE\n )\n\n upload = models.FileField(\n upload_to=\"uploads/videos/%Y/%m/\",\n max_length=255\n )\n\n\n thumbnail = models.ImageField(\n upload_to=\"uploads/thumbnails/videofile/%Y/%m/%d\",\n max_length=255,\n blank=True,\n null=True,\n help_text=_(\"maximum size of thumbnail should be 165px by 93px\")\n )\n\n # This is added to ignore the delay caused while playing video\n video_running_length = models.CharField(\n _(\"Video running length\"),\n editable=False,\n max_length=255,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.file_name\n\n class Meta:\n ordering = [\"created_date\"]\n\n\nclass VideoLinkInfo(LinkInfo):\n video = models.ForeignKey(\n Video,\n verbose_name=_(\"Link\"),\n on_delete=models.CASCADE,\n\n )\n\n def __str__(self):\n return self.video.title\n\n class Meta:\n ordering=[\"created_date\"]\n\n\nclass VideoEmbedLink(EmbedVideoAudioLink):\n video = models.ForeignKey(\n Video,\n verbose_name=_(\"Embed Link\"),\n on_delete=models.CASCADE,\n\n )\n\n def __str__(self):\n return self.video.title\n\n class Meta:\n ordering=[\"created_date\"]\n\n\n\n\nclass VideoGenre(AbstractTimeStampModel):\n genre = models.CharField(\n _(\"Genre name\"),\n max_length=255\n )\n\n genre_description = models.TextField(\n verbose_name=_(\"Genre description\"),\n blank=True\n )\n\n class Meta:\n db_table = \"video_genre\"\n\n def __str__(self):\n return self.genre\n\n","sub_path":"src/pustakalaya_apps/video/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14923919","text":"from random import randint\nimport time\n\ndef selection_sort(lst):\n for i in range(len(lst)):\n\n # find the minimum element in remaining \n minPosition = i\n\n for j in range(i+1, len(lst)):\n if lst[minPosition] > lst[j]:\n minPosition = j\n\n # swap the found minimum element with minPosition\n temp = lst[i]\n lst[i] = lst[minPosition]\n lst[minPosition] = temp\n\n return lst\n\n\nrandList = []\nfor i in range(5):\n randList.append(randint(1,10))\n\nprint(randList)\n\nsortedList = selection_sort(randList)\n\nprint(sortedList)\n\nrand1 = []\nfor i in range(10000):\n rand1.append(randint(1,1000000))\n \nrand2 = []\nfor i in range(100000):\n rand2.append(randint(1,1000000))\n \nrand3 = []\nfor i in range(1000000):\n rand3.append(randint(1,1000000))\n\nprint(len(rand1))\nprint(len(rand2))\nprint(len(rand3))\n\n\nstart = time.time()\nsortedListA = selection_sort(rand1)\nend = time.time()\nprint(end - start)\n\nstart = time.time()\nsortedListA = selection_sort(rand2)\nend = time.time()\nprint(end - start)\n\nstart = time.time()\nsortedListA = selection_sort(rand3)\nend = time.time()\nprint(end - start)\n","sub_path":"python/data_structures_and_algorithms_BOOK/exercises/ch1/1.3.py","file_name":"1.3.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33682227","text":"\"\"\"Module for Krake controller responsible for managing Magnum cluster resources and\ncreating their respective Kubernetes cluster. It connects to the Magnum service of the\nProject on which a MagnumCluster has been scheduled.\n\n.. code:: bash\n\n python -m krake.controller.magnum --help\n\nConfiguration is loaded from the ``controllers.scheduler`` section:\n\n.. code:: yaml\n\n api_endpoint: http://localhost:8080\n worker_count: 5\n debounce: 1.0\n poll_interval: 30\n\n tls:\n enabled: false\n client_ca: tmp/pki/ca.pem\n client_cert: tmp/pki/system:magnum.pem\n client_key: tmp/pki/system:magnum-key.pem\n\n\n log:\n ...\n\n\"\"\"\nimport logging\nimport asyncio\nimport pprint\nimport random\nimport re\nimport string\nfrom functools import partial, wraps\nfrom argparse import ArgumentParser\nfrom base64 import b64encode\n\nfrom OpenSSL import crypto\nfrom aiohttp import ClientError, ClientResponseError\n\nfrom keystoneauth1.identity.v3 import Password, ApplicationCredential\nfrom keystoneauth1.session import Session\nimport keystoneauth1.exceptions\nfrom krake.data.config import MagnumConfiguration\nfrom krake.utils import KrakeArgumentFormatter\nfrom magnumclient.v1.client import Client as MagnumV1Client\nimport magnumclient.exceptions\n\nfrom kubernetes_asyncio.config.kube_config import KubeConfigLoader\nfrom kubernetes_asyncio.client import ApiClient, CoreV1Api, Configuration\n\nfrom krake import (\n search_config,\n setup_logging,\n load_yaml_config,\n ConfigurationOptionMapper,\n)\nfrom krake.client.openstack import OpenStackApi\nfrom krake.client.kubernetes import KubernetesApi\nfrom krake.data.core import Reason, ReasonCode, Metadata, resource_ref, ResourceRef\nfrom krake.data.openstack import MagnumClusterState\nfrom krake.data.kubernetes import (\n Cluster as KubernetesCluster,\n ClusterSpec as KubernetesClusterSpec,\n ClusterStatus as KubernetesClusterStatus,\n ClusterCloudConstraints as KubernetesClusterCloudConstraints,\n CloudConstraints as KubernetesCloudConstraints,\n)\nfrom . import Controller, ControllerError, create_ssl_context, run, Reflector\n\n\nDELETION_FINALIZER = \"magnum_cluster_deletion\"\n\nlogger = logging.getLogger(\"krake.controller.openstack\")\n\n\nclass CreateFailed(ControllerError):\n \"\"\"Raised in case the creation of a Magnum cluster failed.\"\"\"\n\n code = ReasonCode.CREATE_FAILED\n\n\nclass ReconcileFailed(ControllerError):\n \"\"\"Raised in case the update of a Magnum cluster failed.\"\"\"\n\n code = ReasonCode.RECONCILE_FAILED\n\n\nclass DeleteFailed(ControllerError):\n \"\"\"Raised in case the deletion of a Magnum cluster failed.\"\"\"\n\n code = ReasonCode.DELETE_FAILED\n recoverable = False\n\n\nclass InvalidClusterTemplateType(ControllerError):\n \"\"\"Raised in case the given Magnum template is not a template for a Kubernetes\n cluster.\n \"\"\"\n\n code = ReasonCode.INVALID_CLUSTER_TEMPLATE\n\n\nOPENSTACK_ERRORS = (\n magnumclient.exceptions.ClientException,\n keystoneauth1.exceptions.ClientException,\n)\n\n\ndef format_openstack_error(error):\n \"\"\"Create a more readable error message using OpenStack specific errors.\n\n Args:\n error (BaseException): the exception whose information is used to create a\n message.\n\n Returns:\n str: the generated error message.\n\n \"\"\"\n if isinstance(error, magnumclient.exceptions.HttpError):\n return \"{message} (HTTP {status}): {method} {url}\".format(\n status=error.http_status,\n message=error.message,\n method=error.method,\n url=error.url,\n )\n elif isinstance(error, keystoneauth1.exceptions.HttpError):\n return \"{message}: {method} {url}\".format(\n message=error.message, method=error.method, url=error.url\n )\n return str(error)\n\n\nclass MagnumClusterController(Controller):\n \"\"\"The Magnum controller receives the MagnumCluster resources from the API and acts\n on it, by creating, updating or deleting their actual cluster counterparts. It uses\n the OpenStack Magnum client for this purpose.\n\n Args:\n api_endpoint (str): URL to the API\n loop (asyncio.AbstractEventLoop, optional): Event loop that should be\n used.\n ssl_context (ssl.SSLContext, optional): if given, this context will be\n used to communicate with the API endpoint.\n debounce (float, optional): value of the debounce for the\n :class:`WorkQueue`.\n worker_count (int, optional): the amount of worker function that should be\n run as background tasks.\n poll_interval (float): time in second before two attempts to modify a Magnum\n cluster (creation, deletion, update, change from FAILED state...).\n\n \"\"\"\n\n def __init__(self, *args, worker_count=5, poll_interval=30, **kwargs):\n super().__init__(*args, **kwargs)\n self.openstack_api = None\n self.kubernetes_api = None\n self.reflector = None\n self.worker_count = worker_count\n self.poll_interval = poll_interval\n\n async def prepare(self, client):\n self.client = client\n self.openstack_api = OpenStackApi(self.client)\n self.kubernetes_api = KubernetesApi(self.client)\n\n for i in range(self.worker_count):\n self.register_task(self.consume, name=f\"worker_{i}\")\n\n async def enqueue(cluster):\n # Always cleanup deleted clusters even if they are in FAILED\n # state.\n if cluster.metadata.deleted:\n if (\n cluster.metadata.finalizers\n and cluster.metadata.finalizers[-1] == DELETION_FINALIZER\n ):\n # Safe guard for infinite looping: a failed but deleted\n # application is enqueued after the poll interval to slow\n # down the infinite retry loop.\n #\n # FIXME: Should there be a retry limit?\n if cluster.status.state == MagnumClusterState.FAILED:\n logger.debug(\n \"Enqueue deleted but failed %r in %ss\",\n cluster,\n self.poll_interval,\n )\n await self.queue.put(\n cluster.metadata.uid, cluster, delay=self.poll_interval\n )\n else:\n logger.debug(\"Enqueue deleted %r\", cluster)\n await self.queue.put(cluster.metadata.uid, cluster)\n else:\n logger.debug(\"Reject deleted %r without finalizer\", cluster)\n\n # Ignore all other failed clusters\n elif cluster.status.state == MagnumClusterState.FAILED:\n logger.debug(\"Reject failed %r\", cluster)\n\n # Accept scheduled clusters\n elif cluster.status.project:\n logger.debug(\"Enqueue scheduled %r\", cluster)\n await self.queue.put(cluster.metadata.uid, cluster)\n else:\n logger.debug(\"Reject %r\", cluster)\n\n self.reflector = Reflector(\n listing=self.openstack_api.list_all_magnum_clusters,\n watching=self.openstack_api.watch_all_magnum_clusters,\n on_list=enqueue,\n on_add=enqueue,\n on_update=enqueue,\n on_delete=enqueue,\n resource_plural=\"Magnum Clusters\",\n )\n self.register_task(self.reflector, name=\"Reflector\")\n\n async def cleanup(self):\n self.openstack_api = None\n self.kubernetes_api = None\n self.reflector = None\n\n async def consume(self, run_once=False):\n \"\"\"Continuously retrieve new elements from the worker queue to be processed.\n\n Args:\n run_once (bool, optional): if True, the function only handles one resource,\n then stops. Otherwise, continue to handle each new resource on the\n queue indefinitely.\n\n \"\"\"\n while True:\n key, cluster = await self.queue.get()\n try:\n await self.process_cluster(cluster)\n finally:\n await self.queue.done(key)\n if run_once:\n break # Only used for tests\n\n async def process_cluster(self, cluster):\n \"\"\"Process a Magnum cluster: if the given cluster is marked for deletion, delete\n the actual cluster. Otherwise, start the reconciliation between a Magnum cluster\n spec and its state.\n\n Handle any :class:`ControllerError` or the supported OpenStack error that are\n raised during the processing.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster to process.\n\n \"\"\"\n try:\n logger.debug(\"Handle %r\", cluster)\n\n if cluster.metadata.deleted:\n await self.delete_magnum_cluster(cluster)\n else:\n await self.reconcile_magnum_cluster(cluster)\n\n except ControllerError as error:\n logger.error(error)\n\n reason = Reason(code=error.code, message=error.message)\n cluster.status.reason = reason\n cluster.status.state = MagnumClusterState.FAILED\n\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n except OPENSTACK_ERRORS as error:\n message = format_openstack_error(error)\n logger.error(message)\n\n cluster.status.reason = Reason(\n code=ReasonCode.OPENSTACK_ERROR, message=message\n )\n cluster.status.state = MagnumClusterState.FAILED\n\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n async def reconcile_magnum_cluster(self, cluster):\n \"\"\"Depending on the state of the given Magnum cluster, start the rapprochement\n of the wanted state of the cluster to the desired one.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the cluster whose actual state\n will be modified to match the desired one.\n\n \"\"\"\n magnum = await self.create_magnum_client(cluster)\n\n # Ensure that deletion finalizer exists\n if DELETION_FINALIZER not in cluster.metadata.finalizers:\n cluster.metadata.finalizers.append(DELETION_FINALIZER)\n await self.openstack_api.update_magnum_cluster(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n # Simple finite state machine (FSM). Every state is handled by an\n # action. Action are methods of the controller. An action can return\n # another action (\"transition\"). If no action is returned, the FSM\n # terminates (terminal state).\n\n # Initial actions\n actions = {\n MagnumClusterState.PENDING: self.on_pending,\n MagnumClusterState.CREATING: self.on_creating,\n MagnumClusterState.RUNNING: self.on_running,\n MagnumClusterState.RECONCILING: self.on_reconciling,\n }\n action = actions.get(cluster.status.state)\n if not action:\n logger.warning(\n \"Unknown reconciliation state %r for %r\", cluster.status.state, cluster\n )\n return\n\n while action:\n action = await action(cluster, magnum)\n\n logger.info(\"Reconciliation of %r finished\", cluster)\n\n async def on_pending(self, cluster, magnum):\n \"\"\"Called when a Magnum cluster with the PENDING state needs reconciliation.\n\n Initiate the creation of a Magnum cluster using the registered Magnum template,\n but does not ensure that the creation succeeded.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster to actually\n create on its scheduled OpenStack project.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n Returns:\n callable: the next function to be called, as the Magnum cluster changed its\n state. In this case, the Magnum cluster has now the CREATING state, thus\n the function returned is :meth:`on_creating`.\n\n \"\"\"\n # Transition into \"CREATING\" state\n cluster.status.state = MagnumClusterState.CREATING\n\n # Create forward reference to Kubernetes cluster resource. The\n # resource does not exist. The reconcile loop will detect this and\n # create the resource.\n if not cluster.status.cluster:\n cluster.status.cluster = ResourceRef(\n api=KubernetesCluster.api,\n kind=KubernetesCluster.kind,\n name=f\"{cluster.metadata.name}-{randstr()}\",\n namespace=cluster.metadata.namespace,\n )\n\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n cluster_template = await read_magnum_cluster_template(\n client=magnum, cluster=cluster\n )\n if cluster_template.coe != \"kubernetes\":\n raise InvalidClusterTemplateType(\n message=f\"Invalid cluster template type {cluster_template.coe!r}\"\n )\n\n # Request OpenStack to create the cluster\n response = await create_magnum_cluster(client=magnum, cluster=cluster)\n\n # Save ID of Magnum cluster resource\n cluster.status.cluster_id = response.uuid\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n return self.on_creating\n\n async def on_creating(self, cluster, magnum):\n \"\"\"Called when a Magnum cluster with the CREATING state needs reconciliation.\n\n Watch over a Magnum cluster currently being created on its scheduled OpenStack\n project, and updates the corresponding Kubernetes cluster created in the API.\n\n As the Magnum cluster is in a stable state at the end, no further processing\n method is needed to return.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster that needs\n to be processed.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n \"\"\"\n await self.wait_for_running(cluster, magnum)\n await self.reconcile_kubernetes_resource(cluster, magnum)\n return None\n\n async def on_running(self, cluster, magnum):\n \"\"\"Called when a Magnum cluster with the RUNNING state needs reconciliation.\n\n If the Magnum cluster needs to be resized, initiate the resizing. Otherwise,\n updates the corresponding Kubernetes cluster created in the API.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster that needs\n to be processed.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n Returns:\n callable: the next function to be called, as the Magnum cluster changed its\n state. In the case of resizing, the Magnum cluster has now the\n RECONCILING state, thus the function returned is :meth:`on_creating`.\n Otherwise, as the state is stable at the end, no further processing\n is needed and None is returned.\n\n \"\"\"\n if (\n cluster.spec.node_count is not None\n and cluster.status.node_count != cluster.spec.node_count\n ):\n cluster.status.state = MagnumClusterState.RECONCILING\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n await resize_magnum_cluster(magnum, cluster)\n return self.on_reconciling\n\n await self.reconcile_kubernetes_resource(cluster, magnum)\n return None\n\n async def on_reconciling(self, cluster, magnum):\n \"\"\"Called when a Magnum cluster with the RECONCILING state needs reconciliation.\n\n Watch over a Magnum cluster already created on its scheduled OpenStack project,\n and updates the corresponding Kubernetes cluster created in the API.\n\n As the Magnum cluster is in a stable state at the end, no further processing\n method is needed to return.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster that needs\n to be processed.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n \"\"\"\n await self.wait_for_running(cluster, magnum)\n await self.reconcile_kubernetes_resource(cluster, magnum)\n return None\n\n async def delete_magnum_cluster(self, cluster):\n \"\"\"Initiate the deletion of the actual given Magnum cluster, and wait for its\n deletion. The finalizer specific to the Magnum Controller is also removed from\n the Magnum cluster resource.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster that needs\n to be deleted.\n\n \"\"\"\n # FIXME: during its deletion, a MagnumCluster is updated, and thus put into the\n # queue again on the controller to be deleted. After deletion, the\n # MagnumCluster is released from the active resources of the queue, and the\n # updated resource is handled again. However, there are no actual resource\n # anymore, as it has been deleted. To prevent this, the worker verifies that\n # the MagnumCluster is not deleted yet before attempting to delete it.\n try:\n await self.openstack_api.read_magnum_cluster(\n namespace=cluster.metadata.namespace, name=cluster.metadata.name\n )\n except ClientResponseError as err:\n if err.status == 404:\n return\n raise\n\n if (\n cluster.status.project\n and cluster.status.state != MagnumClusterState.PENDING\n ):\n magnum = await self.create_magnum_client(cluster)\n\n if cluster.status.state != MagnumClusterState.DELETING:\n try:\n await delete_magnum_cluster(magnum, cluster)\n except magnumclient.exceptions.NotFound:\n pass\n\n cluster.status.state = MagnumClusterState.DELETING\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n while True:\n try:\n response = await read_magnum_cluster(magnum, cluster)\n except magnumclient.exceptions.NotFound:\n logger.info(\"The cluster has been deleted: %r\", cluster)\n break\n\n if response.status == \"DELETE_FAILED\":\n raise DeleteFailed(message=response.status_reason)\n\n await asyncio.sleep(self.poll_interval)\n\n # Remove finalizer. The owner reference does not need to be removed\n # because the API will remove the resource if there are\n # no finalizers left no matter the owners.\n cluster.metadata.finalizers.remove(DELETION_FINALIZER)\n await self.openstack_api.update_magnum_cluster(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n async def wait_for_running(self, cluster, magnum):\n \"\"\"Await for an actual Magnum cluster to be in a stable state, that means, when\n its creation or update is finished.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster on which\n an operation is performed that needs to be awaited.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n Raises:\n ControllerError: if the operation on the cluster failed, a corresponding\n error will be raised (for instance CreateFailed in case the creation of\n the cluster failed).\n\n \"\"\"\n # FIXME: This should be handled by an observer. The observer\n # periodically checks the status of the OpenStack resource and\n # if the status of the OpenStack resource is different\n # then the \".status.state\", the observer will update the Krake\n # resource state accordingly.\n while True:\n try:\n response = await read_magnum_cluster(magnum, cluster)\n except magnumclient.exceptions.NotFound:\n # Magnum cluster was deleted from OpenStack deployment. Clear\n # reference to deleted cluster and transition into PENDING\n # state again.\n #\n # Note: The referenced Kubernetes cluster resource is not\n # deleted here. The controller handling the Kubernetes\n # cluster periodically checks the status of the cluster\n # (observer). If the cluster does not exist, it will be\n # recognized by the observer.\n #\n cluster.status.cluster_id = None\n cluster.status.state = MagnumClusterState.PENDING\n\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n return\n\n if response.status in (\"CREATE_COMPLETE\", \"UPDATE_COMPLETE\"):\n logger.info(\"Cluster %r operation complete\", cluster)\n break\n\n if response.status == \"CREATE_FAILED\":\n raise CreateFailed(message=response.status_reason)\n\n if response.status == \"UPDATE_FAILED\":\n raise ReconcileFailed(message=response.status_reason)\n\n if response.status == \"DELETE_FAILED\":\n raise DeleteFailed(message=response.status_reason)\n\n # TODO: Handle timeout\n logger.debug(\"Operation on %r still in progress\", cluster)\n await asyncio.sleep(self.poll_interval)\n\n cluster.status.node_count = response.node_count\n cluster.status.api_address = response.api_address\n cluster.status.master_addresses = response.master_addresses\n cluster.status.node_addresses = response.node_addresses\n\n # Transition into \"RUNNING\" state\n cluster.status.state = MagnumClusterState.RUNNING\n await self.openstack_api.update_magnum_cluster_status(\n namespace=cluster.metadata.namespace,\n name=cluster.metadata.name,\n body=cluster,\n )\n\n async def reconcile_kubernetes_resource(self, cluster, magnum):\n \"\"\"Create or update the Krake resource of the Kubernetes cluster that was\n created from a given Magnum cluster.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the Kubernetes cluster will be\n created using the specifications of this Magnum cluster.\n magnum (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service on the project.\n\n Raises:\n ClientResponseError: when checking if the Kubernetes cluster resource\n already exists, raise if any HTTP error except 404 is raised.\n\n \"\"\"\n if cluster.status.state != MagnumClusterState.RUNNING:\n logger.debug(\n \"Magnum cluster %r not running. Skip Kubernetes cluster \"\n \"resource reconciliation.\",\n cluster,\n )\n return\n\n assert cluster.status.cluster, \"Cluster reference not set\"\n assert cluster.status.cluster_id, \"Magnum cluster UUID not set\"\n assert cluster.status.template, \"Cluster template UUID not set\"\n\n kube = None\n\n try:\n # Check if the Kubernetes cluster resource exists\n kube = await self.kubernetes_api.read_cluster(\n namespace=cluster.status.cluster.namespace,\n name=cluster.status.cluster.name,\n )\n except ClientResponseError as error:\n if error.status != 404:\n raise\n else:\n # Check if we can access the Kubernetes cluster with this\n # kubeconfig. If not, update kubeconfig with new certificates.\n loader = KubeConfigLoader(kube.spec.kubeconfig)\n config = Configuration()\n await loader.load_and_set(config)\n\n # If the API address is the same, check if we can access the\n # cluster. Otherwise (if the API addresses are different),\n # recreate certificates.\n if config.host == cluster.status.api_address:\n api_client = ApiClient(config)\n core_v1_api = CoreV1Api(api_client)\n try:\n await core_v1_api.list_node()\n return\n except ClientError:\n pass\n\n # Generate kubeconfig with new certificates.\n #\n # IMPORTANT: The generated certificate has admin rights on the\n # cluster.\n kubeconfig = await make_kubeconfig(magnum, cluster)\n\n if not kube:\n kube = KubernetesCluster(\n metadata=Metadata(\n namespace=cluster.status.cluster.namespace,\n name=cluster.status.cluster.name,\n uid=None,\n created=None,\n modified=None,\n owners=[resource_ref(cluster)],\n # TODO: There should be support for transitive labels and\n # metrics in the scheduler. For now, we simply copy.\n labels=cluster.metadata.labels.copy(),\n ),\n spec=KubernetesClusterSpec(\n kubeconfig=kubeconfig,\n metrics=cluster.spec.metrics.copy(),\n constraints=KubernetesClusterCloudConstraints(\n cloud=KubernetesCloudConstraints()\n ),\n ),\n status=KubernetesClusterStatus(),\n )\n await self.kubernetes_api.create_cluster(\n namespace=kube.metadata.namespace, body=kube\n )\n else:\n kube.spec.kubeconfig = kubeconfig\n await self.kubernetes_api.update_cluster(\n namespace=kube.metadata.namespace, name=kube.metadata.name, body=kube\n )\n\n async def create_magnum_client(self, cluster):\n \"\"\"Create a client to communicate with the Magnum service API for the given\n Magnum cluster. The specifications defined in the OpenStack project of the\n cluster are used to create the client.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the cluster whose project's\n specifications will be used to connect to the Magnum service.\n\n Returns:\n MagnumV1Client: the Magnum client to use to connect to the Magnum service on\n the project of the given Magnum cluster.\n\n \"\"\"\n # TODO: Handle 404 errors\n project = await self.openstack_api.read_project(\n namespace=cluster.status.project.namespace, name=cluster.status.project.name\n )\n\n try:\n return await make_magnum_client(project)\n # FIXME: \"magnum.v1.client.Client\" catches every exception\n # during endpoint discovery and transforms it into an RuntimeError\n # ... AAAAAAAAAAAAAAAAH!!!\n except RuntimeError as error:\n # Ugly workaround to get the original exception\n if isinstance(error.__context__, keystoneauth1.exceptions.ClientException):\n raise error.__context__ from None\n raise\n\n\ndef concurrent(fn):\n \"\"\"Decorator function to turn a synchronous function into an asynchronous coroutine\n that runs in another thread, that can be awaited and thus does not block the main\n asyncio loop. It is particularly useful for synchronous tasks which requires a long\n time to be run concurrently to the main asyncio loop.\n\n Example:\n\n .. code:: python\n\n @concurrent\n def my_function(args_1, arg2=value):\n # long synchronous processing...\n return result\n\n await my_function(value1, arg2=value2) # function run in another thread\n\n Args:\n fn (callable): the function to run in parallel from the main loop.\n\n Returns:\n callable: decorator around the given function. The returned callable is an\n asyncio coroutine.\n\n \"\"\"\n\n @wraps(fn)\n async def wrapper(*args, **kwargs):\n loop = asyncio.get_event_loop()\n wrapped = partial(fn, *args, **kwargs)\n return await loop.run_in_executor(None, wrapped)\n\n return wrapper\n\n\ndef randstr(length=7):\n \"\"\"Create a random string of lowercase and digit character of the given length.\n\n Args:\n length (int): specifies how many characters should be present in the returned\n string.\n\n Returns:\n str: the string randomly generated.\n\n \"\"\"\n return \"\".join(random.choices(string.ascii_lowercase + string.digits, k=length))\n\n\ndef generate_magnum_cluster_name(cluster):\n \"\"\"Create a unique name for a Magnum cluster from its metadata. The name has the\n following structure: \"<namespace>-<name>-<random_lowercase_digit_string>\". Any\n special character that the Magnum service would see as invalid will be replaced.\n\n Args:\n cluster (krake.data.openstack.MagnumCluster): the cluster to use to create a\n name.\n\n Returns:\n str: the name generated.\n\n \"\"\"\n raw_name = f\"{cluster.metadata.namespace}-{cluster.metadata.name}-{randstr()}\"\n # Replace all special characters not accepted by the Magnum service with \"_\".\n return re.sub(\"[^A-Za-z0-9_.-]+\", \"_\", raw_name)\n\n\n@concurrent\ndef read_magnum_cluster(client, cluster):\n \"\"\"Read the actual information of the given Magnum cluster resource.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the resource whose actual\n cluster state will be read.\n\n Returns:\n magnumclient.v1.clusters.Cluster: the current information regarding the given\n Magnum cluster.\n\n \"\"\"\n return client.clusters.get(cluster.status.cluster_id)\n\n\n@concurrent\ndef create_magnum_cluster(client, cluster):\n \"\"\"Create an actual Magnum cluster by connecting to the the Magnum service.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the cluster to create.\n\n Returns:\n magnumclient.v1.clusters.Cluster: the cluster created by the Magnum service.\n\n \"\"\"\n extra = {}\n\n # None would be serialized to a string value of \"None\" which makes Magnum\n # fail the creation because \"None\" is an invalid integer value.\n if cluster.spec.master_count is not None:\n extra[\"master_count\"] = cluster.spec.master_count\n\n if cluster.spec.node_count is not None:\n extra[\"node_count\"] = cluster.spec.node_count\n\n return client.clusters.create(\n cluster_template_id=cluster.status.template,\n name=generate_magnum_cluster_name(cluster),\n create_timeout=60, # Timeout for cluster creation in minutes\n **extra,\n )\n\n\n@concurrent\ndef resize_magnum_cluster(client, cluster):\n \"\"\"Update the given Magnum cluster by changing its node count.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the cluster to resize.\n\n Returns:\n magnumclient.v1.clusters.Cluster: the cluster updated by the Magnum service.\n\n \"\"\"\n # TODO: How to handle \".node_count = None\"?\n return client.clusters.resize(\n cluster.status.cluster_id, node_count=cluster.spec.node_count\n )\n\n\n@concurrent\ndef delete_magnum_cluster(client, cluster):\n \"\"\"Delete the actual Magnum cluster that corresponds to the given resource.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the cluster to delete.\n\n Returns:\n magnumclient.v1.clusters.Cluster: the cluster deleted by the Magnum service.\n\n \"\"\"\n return client.clusters.delete(cluster.status.cluster_id)\n\n\n@concurrent\ndef read_magnum_cluster_template(client, cluster):\n \"\"\"Get the actual template associated with the one specified in the given Magnum\n cluster resource.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the template given is the one\n specified by this Magnum cluster.\n\n Returns:\n magnumclient.v1.cluster_templates.ClusterTemplate\n\n \"\"\"\n return client.cluster_templates.get(cluster.status.template)\n\n\ndef _encode_to_64(string):\n \"\"\"Compute the base 64 encoding of a string.\n\n Args:\n string (str): the string to encode.\n\n Returns:\n str: the result of the encoding.\n\n \"\"\"\n # b64encode accepts only bytes.\n return b64encode(string.encode()).decode()\n\n\nasync def make_kubeconfig(client, cluster):\n \"\"\"Create a kubeconfig for the Kubernetes cluster associated with the given Magnum\n cluster. For this process, it uses (non exhaustively) the name, address and\n certificates associated with it.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster for which a\n kubeconfig will be created.\n\n Returns:\n dict: the kubeconfig created, returned as a dictionary.\n\n \"\"\"\n client_key, csr = make_csr()\n\n ca = await read_ca_certificate(client, cluster)\n client_certificate = await create_client_certificate(client, cluster, csr)\n\n return {\n \"kind\": \"Config\",\n \"apiVersion\": \"v1\",\n \"clusters\": [\n {\n \"cluster\": {\n \"certificate-authority-data\": _encode_to_64(ca),\n \"server\": cluster.status.api_address,\n },\n \"name\": cluster.status.cluster.name,\n }\n ],\n \"contexts\": [\n {\n \"context\": {\"cluster\": cluster.status.cluster.name, \"user\": \"krake\"},\n \"name\": \"admin\",\n }\n ],\n \"current-context\": \"admin\",\n \"preferences\": {},\n \"users\": [\n {\n \"name\": \"krake\",\n \"user\": {\n \"client-certificate-data\": _encode_to_64(client_certificate),\n \"client-key-data\": _encode_to_64(client_key),\n },\n }\n ],\n }\n\n\ndef make_keystone_session(project):\n \"\"\"Create an OpenStack Keystone session using the authentication information of the\n given project resource.\n\n Args:\n project (krake.data.openstack.Project): the OpenStack project to use for getting\n the credentials and endpoint.\n\n Returns:\n Session: the Keystone session created.\n\n \"\"\"\n if project.spec.auth.type == \"password\":\n auth = Password(\n auth_url=project.spec.url,\n user_id=project.spec.auth.password.user.id,\n password=project.spec.auth.password.user.password,\n project_id=project.spec.auth.password.project.id,\n )\n elif project.spec.auth.type == \"application_credential\":\n id = project.spec.auth.application_credential.id\n secret = project.spec.auth.application_credential.secret\n auth = ApplicationCredential(\n auth_url=project.spec.url,\n application_credential_id=id,\n application_credential_secret=secret,\n )\n else:\n raise NotImplementedError(\n f\"Keystone authentication '{project.spec.auth.type}' is not implemented\"\n )\n return Session(auth=auth)\n\n\n@concurrent\ndef make_magnum_client(project):\n \"\"\"Create a Magnum client to connect to the given OpenStack project.\n\n Args:\n project (krake.data.openstack.Project): the project to connect to.\n\n Returns:\n MagnumV1Client: the client to connect to the Magnum service of the given\n project.\n\n \"\"\"\n session = make_keystone_session(project)\n return MagnumV1Client(session=session)\n\n\n@concurrent\ndef read_ca_certificate(client, cluster):\n \"\"\"Get the certificate authority used by the given Magnum cluster.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster for which the\n certificate authority will be retrieved.\n\n Returns:\n str: the certificate authority of the given cluster.\n\n \"\"\"\n return client.certificates.get(cluster_uuid=cluster.status.cluster_id).pem\n\n\n@concurrent\ndef create_client_certificate(client, cluster, csr):\n \"\"\"Create and get a certificate for the given Magnum cluster.\n\n Args:\n client (MagnumV1Client): the Magnum client to use to connect to the Magnum\n service.\n cluster (krake.data.openstack.MagnumCluster): the Magnum cluster for which a\n kubeconfig file will be created.\n csr (str): the certificate signing request (CSR) to use on the Magnum service\n for the creation of the certificate.\n\n Returns:\n str: the generated certificate.\n\n \"\"\"\n return client.certificates.create(\n cluster_uuid=cluster.status.cluster_id, csr=csr\n ).pem\n\n\ndef make_csr(key_size=4096):\n \"\"\"Generates a private key and corresponding certificate and certificate\n signing request.\n\n Args:\n key_size (int): Length of private key in bits\n\n Returns:\n (str, str): private key, certificate signing request (CSR)\n\n \"\"\"\n key = crypto.PKey()\n key.generate_key(crypto.TYPE_RSA, key_size)\n\n csr = crypto.X509Req()\n\n # Use \"admin\" for Common Name and \"system:masters\" for Organization to get\n # admin access to the cluster.\n #\n # @see https://docs.openstack.org/releasenotes/magnum/queens.html#relnotes-6-1-1-stable-queens-upgrade-notes # noqa\n subject = csr.get_subject()\n subject.organizationName = \"system:masters\"\n subject.commonName = \"admin\"\n\n csr.set_pubkey(key)\n csr.sign(key, \"sha256\")\n\n return (\n crypto.dump_privatekey(crypto.FILETYPE_PEM, key).decode(),\n crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr).decode(),\n )\n\n\nparser = ArgumentParser(\n description=\"OpenStack Magnum controller\", formatter_class=KrakeArgumentFormatter\n)\nparser.add_argument(\"-c\", \"--config\", type=str, help=\"Path to configuration YAML file\")\n\nmapper = ConfigurationOptionMapper(MagnumConfiguration)\nmapper.add_arguments(parser)\n\n\ndef main(config):\n setup_logging(config.log)\n logger.debug(\n \"Krake Magnum Controller configuration settings:\\n %s\",\n pprint.pformat(config.serialize()),\n )\n\n tls_config = config.tls\n ssl_context = create_ssl_context(tls_config)\n logger.debug(\"TLS is %s\", \"enabled\" if ssl_context else \"disabled\")\n\n controller = MagnumClusterController(\n api_endpoint=config.api_endpoint,\n worker_count=config.worker_count,\n ssl_context=ssl_context,\n debounce=config.debounce,\n poll_interval=config.poll_interval,\n )\n run(controller)\n\n\nif __name__ == \"__main__\":\n args = vars(parser.parse_args())\n\n config = load_yaml_config(args[\"config\"] or search_config(\"magnum.yaml\"))\n kubernetes_config = mapper.merge(config, args)\n\n main(kubernetes_config)\n","sub_path":"krake/krake/controller/magnum.py","file_name":"magnum.py","file_ext":"py","file_size_in_byte":40930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451980167","text":"from flask import Flask, request\nfrom datetime import timedelta, datetime\n\nimport rq_dashboard\n\n\nfrom config import r, q, scheduler\nfrom utils import RedisIdentifier, random_stats, Serializer\nfrom tasks import sleeper, background_task, failer, sched1\nfrom configdashboard import default_settings\n\napp = Flask(__name__)\n\n# setting up dashboard\napp.config.from_object(default_settings)\napp.register_blueprint(rq_dashboard.blueprint, url_prefix=\"/rq\")\n\n#identifier\nredis_identifier = RedisIdentifier()\n\n@app.route('/')\ndef hello():\n\n q.enqueue(sleeper)\n return f\"this is the start page{q.job_ids}, {len(q)}.\"\n\n@app.route(\"/task\")\ndef add_task():\n\n if request.args.get(\"n\"):\n\n val = request.args.get(\"n\")\n job = q.enqueue(background_task, val)\n q_len = len(q)\n\n return f\"Task {job.id} added to queue at {job.enqueued_at}. {q_len} tasks in queue\"\n\n return \"no value for n\"\n\n@app.route('/failer')\ndef failer_endpoint():\n\n if request.args.get(\"msg\"):\n val = request.args.get(\"msg\")\n\n # can be used to inspect failing jobs\n if val==\"fail\":\n job = q.enqueue(failer)\n else:\n job = q.enqueue(sleeper)\n\n return f\"failer endpoint. job_id: {job.id}, enqueued at: {job.enqueued_at}\"\n\n return \"no value for msg\"\n\n@app.route('/sched')\ndef sched():\n\n #scheduler.enqueue_in(timedelta(seconds=10), sched1)\n b = True\n sj = scheduler.enqueue_at(datetime.utcnow(), sched1, b)\n return f\"schedule: id {sj.id}. All scheduled jobs: {scheduler.get_jobs()}\"\n\n# Saving to redis manually -> pickling before saving\n@app.route('/manual/stats')\ndef stats():\n stats = random_stats()\n timestamp = datetime.utcnow()\n _id_stats = redis_identifier.create_id(typ='12weeks:stats')\n _id_meta = redis_identifier.create_id(typ='12weeks:meta')\n\n r.set(_id_stats, Serializer.to_byte(stats))\n r.hmset(_id_meta, {'timestamp': Serializer.to_byte(timestamp), 'stats_id': _id_stats})\n return f\"id: {_id_stats}, timestamp: {timestamp}\"\n\n# Loading from redis manually\n@app.route('/manual/stats/get')\ndef get_stats():\n _id_meta = redis_identifier.create_id(typ='12weeks:meta')\n _id_stats = r.hget(_id_meta, 'stats_id').decode(\"utf-8\")\n res = Serializer.from_byte(r.get(_id_stats))\n\n return f\"id: {_id_stats}, res: {res}\"\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312491659","text":"\"\"\"\n绘制小猪佩奇\n参考turtle绘图介绍\nhttps://docs.python.org/3.6/library/turtle.html?highlight=turtle#turtle.penup\n\"\"\"\n# -*- coding: utf-8 -*-\nimport turtle as t\n\n\ndef draw_nose():\n\n t.up() # 提起笔\n t.goto(-100, 100) # 定位\n t.down()\n t.seth(-30) # 设置当前朝向的角度\n t.begin_fill() # 准备开始填充图形\n a = 0.4\n for i in range(120):\n if 0 <= i < 30 or 60 <= i < 90:\n a += 0.08\n t.left(3) # 向左转角度\n t.forward(a) # 向当前方向移动a的长度\n else:\n a -= 0.08\n t.left(3) # 向左转角度\n t.forward(a) # 向当前方向移动a的长度\n t.end_fill() # 填充完成\n t.up()\n t.seth(90)\n t.forward(25)\n t.seth(0)\n t.fd(10)\n t.pd()\n t.seth(10)\n t.begin_fill()\n t.circle(5)\n t.fillcolor(160, 82, 45) # 棕色,设置填充颜色\n t.end_fill()\n t.pu()\n t.seth(0)\n t.fd(20)\n t.pd()\n t.seth(10)\n t.begin_fill()\n t.circle(5)\n t.end_fill()\n\n\ndef draw_head():\n t.color((255, 155, 192), \"pink\")\n t.pu()\n t.seth(90)\n t.fd(41)\n t.seth(0)\n t.pd()\n t.begin_fill()\n t.seth(180)\n t.circle(300, -30)\n t.circle(100, -60)\n t.circle(80, -100)\n t.circle(150, -20)\n t.circle(60, -95)\n t.seth(161)\n t.circle(-300, 15)\n t.pu()\n t.goto(-100, 100)\n t.pd()\n t.seth(-30)\n a = 0.4\n for i in range(60):\n if 0 <= i < 30 or 60 <= i < 90:\n a += 0.08\n t.lt(3)\n t.fd(a)\n else:\n a -= 0.08\n t.lt(3)\n t.fd(a)\n t.end_fill()\n\n\ndef drawl_ears():\n t.pu()\n t.seth(90)\n t.fd(-7)\n t.seth(0)\n t.fd(70)\n t.begin_fill()\n t.pd()\n t.seth(100)\n t.circle(-50, 50)\n t.circle(-10, 120)\n t.circle(-50, 54)\n t.end_fill()\n t.pu()\n t.seth(-90)\n t.fd(12)\n t.seth(0)\n t.fd(30)\n t.pd()\n t.begin_fill()\n t.seth(100)\n t.circle(-50, 50)\n t.circle(-10, 120)\n t.circle(-50, 56)\n t.end_fill()\n\n\ndef draw_eyes():\n\n # 画第一只眼睛\n\n t.color((255, 155, 192), \"white\")\n t.pu()\n t.seth(90)\n t.fd(-20)\n t.seth(0)\n t.fd(-95)\n t.pd()\n t.begin_fill()\n t.circle(15, 360)\n t.end_fill()\n t.color(\"black\")\n t.pu()\n t.seth(90)\n t.fd(12)\n t.seth(0)\n t.fd(-3)\n t.pd()\n t.begin_fill()\n t.circle(3)\n t.end_fill()\n\n # 画第二只眼睛\n\n t.color((255, 155, 192), \"white\")\n t.pu()\n t.seth(90)\n t.fd(-25)\n t.seth(0)\n t.fd(40)\n t.pd()\n t.begin_fill()\n t.circle(15, 360)\n t.end_fill()\n t.color(\"black\")\n t.pu()\n t.seth(90)\n t.fd(12)\n t.seth(0)\n t.fd(-3)\n t.pd()\n t.begin_fill()\n t.circle(3)\n t.end_fill()\n\n\ndef draw_cheek():\n t.color(255, 155, 192)\n t.pu()\n t.seth(90)\n t.fd(-100)\n t.seth(0)\n t.fd(60)\n t.pd()\n t.begin_fill()\n t.circle(30)\n t.end_fill()\n\n\ndef draw_mouse():\n t.pencolor((239, 64, 64))\n t.pu()\n t.seth(90)\n t.fd(15)\n t.seth(0)\n t.fd(-100)\n t.pd()\n t.seth(-80)\n t.circle(30, 40)\n t.circle(40, 80)\n\n\ndef draw_body():\n t.color((255, 99, 71), \"red\")\n t.pu()\n t.seth(90)\n t.fd(-20)\n t.seth(0)\n t.fd(-71)\n t.pd()\n t.begin_fill()\n t.seth(-130)\n t.circle(100, 10)\n t.circle(300, 29)\n t.seth(0)\n t.fd(230)\n t.seth(90)\n t.circle(300, 30)\n t.circle(100, 5)\n t.color((255, 155, 192), (255, 100, 100))\n t.seth(-135)\n t.circle(-80, 58)\n t.circle(-180, 21)\n t.end_fill()\n\n\ndef drawl_hands():\n # 左手\n t.color((255, 155, 192))\n t.pu()\n t.seth(90)\n t.fd(-45)\n t.seth(0)\n t.fd(-25)\n t.pd()\n t.seth(-160)\n t.circle(300, 15)\n t.pu()\n t.seth(90)\n t.fd(15)\n t.pd()\n t.seth(-10)\n t.circle(-20, 90)\n\n # 右手\n t.pu()\n t.seth(90)\n t.fd(30)\n t.seth(0)\n t.fd(240)\n t.pd()\n t.seth(-20)\n t.circle(-300, 15)\n t.pu()\n t.seth(90)\n t.fd(20)\n t.pd()\n t.seth(-170)\n t.circle(20, 90)\n\n\ndef draw_legs():\n\n # 画左腿\n\n t.pensize(10)\n t.color((255, 110, 147))\n t.pu()\n t.seth(90)\n t.fd(-68)\n t.seth(0)\n t.fd(-180)\n t.pd()\n t.seth(-90)\n t.fd(40)\n t.seth(-180)\n t.color(\"black\")\n t.pensize(15)\n t.fd(20)\n\n # 画右腿\n\n t.pensize(10)\n t.color((255, 110, 147))\n t.pu()\n t.seth(90)\n t.fd(40)\n t.seth(0)\n t.fd(90)\n t.pd()\n t.seth(-90)\n t.fd(40)\n t.seth(-180)\n t.color(\"black\")\n t.pensize(15)\n t.fd(20)\n\n\ndef draw_tail():\n t.pensize(4)\n t.color((255, 155, 192))\n t.pu()\n t.seth(90)\n t.fd(70)\n t.seth(0)\n t.fd(95)\n t.pd()\n t.seth(0)\n t.circle(70, 20)\n t.circle(10, 330)\n t.circle(70, 30)\n\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n t.pensize(4)\n t.colormode(255)\n t.color((255, 155, 192), \"pink\")\n t.setup(840, 800)\n t.speed(50)\n\n draw_nose()\n draw_head()\n drawl_ears()\n draw_eyes()\n draw_cheek()\n draw_mouse()\n draw_body()\n drawl_hands()\n draw_legs()\n draw_tail()\n\n t.exitonclick()\n\n\nif __name__ == '__main__':\n main()","sub_path":"DrawPeiqi.py","file_name":"DrawPeiqi.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301824592","text":"\"\"\"\r\nAssignment #4, Part 1\r\nJun Seob Shim\r\n8/10/2020\r\nIntro to Programming Section 012\r\nRoll the Dice\r\n\r\nSources:\r\nUsing multiple argument in if statement using \"or\":\r\nhttps://discuss.codecademy.com/t/can-we-compare-one-value-with-multiple-values-using-or/349398\r\n\"\"\"\r\n#import modules\r\nimport random as r\r\n\r\n#get number of dice sides from user\r\ninvalidsides = True\r\n\r\nwhile invalidsides:\r\n sides = int(input(\"How many sides on your dice (4, 6, 8, 10, 12 or 20)? \"))\r\n if sides == 4 or sides == 6 or sides == 8 or sides == 10 or sides == 12 or sides == 20:\r\n invalidsides = False\r\n print()\r\n print(\"Thanks, here we go!\")\r\n else:\r\n print(\"Invalid size, try again.\")\r\n\r\n#establish variables for dice rolling\r\nnotsnakeeyes = True\r\ndie1total = 0\r\ndie2total = 0\r\nrolls = 0\r\n\r\n#special roll counters\r\ndoubles = 0\r\nhigh = 0\r\nhighlow = 0\r\nevens = 0\r\nodds = 0\r\nsums = 0\r\n\r\nwhile notsnakeeyes:\r\n #dice values\r\n die1 = r.randint(1,sides)\r\n die2 = r.randint(1,sides)\r\n #counter values\r\n die1total += die1\r\n die2total += die2\r\n rolls += 1\r\n\r\n print(\"\\n\", rolls, \". die #1 is \",\"*\",die1,\"* \",\"and die #2 is \",\"*\",die2,\"* \",sep=\"\",end=\"\")\r\n\r\n #doubles\r\n if die1 == die2:\r\n print(\"Doubles! \", end=\"\")\r\n doubles += 1\r\n\r\n #high\r\n if die1 == die2 == sides:\r\n print(\"High! \", end=\"\")\r\n high += 1\r\n\r\n #highlow \r\n if (die1 == 1 and die2 == sides) or (die1 == sides and die2 == 1):\r\n print(\"High/Low! \", end=\"\")\r\n highlow += 1\r\n\r\n #evens \r\n if die1 % 2 == 0 and die2 % 2 == 0:\r\n print(\"Evens! \", end=\"\")\r\n evens += 1\r\n\r\n #odds \r\n if die1 % 2 != 0 and die2 % 2 != 0:\r\n print(\"Odds! \", end=\"\")\r\n odds += 1\r\n\r\n #sums \r\n if die1 + die2 == sides:\r\n print(\"Sum value is size value! \", end=\"\")\r\n sums += 1\r\n\r\n #snakeeyes \r\n if die1 == 1 and die2 == 1:\r\n print(\"Snake Eyes! \", end=\"\")\r\n notsnakeeyes = False\r\n\r\nprint()\r\nprint()\r\n\r\nprint(\"You finally got snake eyes on roll #\",rolls)\r\n\r\n#print counters for special rolls\r\nprint(\"Along the way you rolled DOUBLES \",doubles,\" time(s). (\",format((doubles/rolls)*100,\".2f\"),\"% of all rolls were doubles)\\\r\n\",sep=\"\")\r\nprint(\"Along the way you rolled TWO HIGH VALUES \",high,\" time(s). (\",format((high/rolls)*100,\".2f\"),\"% of all rolls were two \\\r\nhigh values)\",sep=\"\")\r\nprint(\"Along the way you rolled TWO EVENS \",evens,\" time(s). (\",format((evens/rolls)*100,\".2f\"),\"% of all rolls were two evens)\\\r\n\",sep=\"\")\r\nprint(\"Along the way you rolled TWO ODDS \",odds,\" time(s). (\",format((odds/rolls)*100,\".2f\"),\"% of all rolls were two odds)\",sep=\"\")\r\nprint(\"Along the way you rolled HIGH / LOW \",highlow,\" time(s). (\",format((highlow/rolls)*100,\".2f\"),\"% of all rolls were high/low)\\\r\n\",sep=\"\")\r\nprint(\"Along the way you rolled A SUM VALUE \",sums,\" time(s). (\",format((doubles/rolls)*100,\".2f\"),\"% of all rolls were \\\r\na sum value)\",sep=\"\")\r\n\r\n#print average roll\r\nprint(\"Average roll for die #1:\",format(die1total/rolls,\".2f\"))\r\nprint(\"Average roll for die #2:\",format(die2total/rolls,\".2f\"))\r\n","sub_path":"Intro to Programming/Assignments/Assignment 4/ShimJunSeob_assign4_part1.py","file_name":"ShimJunSeob_assign4_part1.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"322734074","text":"#!/usr/bin/python\nimport sys\nimport itertools;\nposcar = open(\"POSCAR\",\"r\")\nposcar_lines = poscar.readlines();\nelements = poscar_lines[0].split()\nelement_counts = poscar_lines[5].split()\n#print(elements)\n#print(element_counts)\n\n\nsorted_element_types = []\nfor i in range(0,len(element_counts)):\n sorted_element_types.extend( itertools.repeat(elements[i],int(element_counts[i])) )\nprint(sorted_element_types)\n\nspin_elements = [\"Co\",\"O\"]\nspin_ups = [\"Co\",\"O\"]\nspin_downs = [\"Cd\",\"N\"]\n\n\nincar = open(\"INCAR\",\"r\")\nmag_pars = []\nfor line in incar.readlines():\n if \"MAGMOM\" in line:\n for i in range(0,len(line)):\n if line[i] == '=':\n effect_line = line[i+1:len(line)]\n# print(effect_line);\n mag_pars = effect_line.split();\n\nprint(\"mag_parts = \" + str(mag_pars))\n\nsorted_mags = []\nfor i in mag_pars:\n if \"*\" in i:\n parts = i.split(\"*\");\n# print(\"parts = \" + str(parts))\n sorted_mags.extend( itertools.repeat(parts[1], int(parts[0]) ) )\n else:\n sorted_mags.append( i)\n\nprint(sorted_mags)\n\nif (len(sorted_element_types) != len(sorted_mags)):\n print (\"Atom count in MAGMOM mismatches count in POSCAR!\")\n exit()\n\nfor i in range(0,len(sorted_element_types)):\n \n if sorted_element_types[i] in spin_elements:\n pos = spin_elements.index(sorted_element_types[i])\n\n if float(sorted_mags[i]) > 0.0:\n sorted_element_types[i] = spin_ups[pos]\n elif float(sorted_mags[i]) < 0.0:\n sorted_element_types[i] = spin_downs[pos]\n\n#print (sorted_element_types)\n\nelements = []\nelement_counts = []\nfor i in range(0,len(sorted_element_types)):\n if (len(elements) == 0 or elements[-1] != sorted_element_types[i] ):\n elements.append (sorted_element_types[i])\n element_counts.append (1)\n else:\n element_counts[-1] = element_counts[-1] + 1\n#print(elements)\n#print(element_counts)\n\nl0 = \"\"\nl5 = \"\"\nfor i in range(0,len(elements)):\n l0 = l0 + \" \" + str(elements[i])\n l5 = l5 + \" \" + str(element_counts[i])\n \nl0 = l0 + \"\\n\"\nl5 = l5 + \"\\n\"\nposcar_lines[0] = l0\nposcar_lines[5] = l5\n\nposcar_show = open(\"POSCAR_SHOW\",\"w\")\nfor i in poscar_lines:\n poscar_show.write(i)\nposcar.close()\nposcar_show.close()\n","sub_path":"show_mag.py","file_name":"show_mag.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"438945003","text":"\"\"\"\nThis file is part of Linspector (https://linspector.org/)\nCopyright (c) 2013-2023 Johannes Findeisen <you@hanez.org>. All Rights Reserved.\nSee LICENSE.\n\"\"\"\n\nimport atexit\nimport os\nimport signal\nimport sys\nimport time\n\n\n# TODO: there is a bug when stopping the daemon. the pid_file is not being deleted. NEEDS A FIX!\nclass Linspectord:\n def __init__(self, configuration, environment, linspector, log):\n self._configuration = configuration\n self._environment = environment\n self._linspector = linspector\n self._log = log\n try:\n self._pid_file = configuration.get_option('linspector', 'pid_file')\n except Exception as err:\n log.critical('daemonize error (no pid_file set): {0}'.str(format(err)))\n\n def daemonize(self):\n # daemonize the class using the UNIX double fork mechanism.\n\n # do first fork.\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent.\n sys.exit(0)\n except OSError as err:\n self._log.critical('fork #1 failed: {0}'.str(format(err)))\n sys.exit(1)\n\n # decouple from parent environment.\n os.chdir('/')\n os.setsid()\n os.umask(0)\n\n # do second fork.\n try:\n pid = os.fork()\n if pid > 0:\n # Exit from second parent.\n sys.exit(0)\n except OSError as err:\n self._log.critical('fork #2 failed: {0}'.str(format(err)))\n sys.exit(1)\n\n # redirect standard file descriptors.\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(os.devnull, 'r')\n so = open(os.devnull, 'a+')\n se = open(os.devnull, 'a+')\n\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # write pid file.\n atexit.register(self.delete_pid)\n\n pid = str(os.getpid())\n with open(self._pid_file, 'w+') as f:\n f.write(pid + '\\n')\n\n def delete_pid(self):\n os.remove(self._pid_file)\n\n def start(self):\n # start the daemon. check for a pidfile to see if the daemon already runs before.\n self._log.info('starting daemon using pid_file: ' + str(self._pid_file))\n try:\n with open(self._pid_file, 'r') as pf:\n pid = int(pf.read().strip())\n except IOError:\n pid = None\n\n if pid:\n message = 'pid_file {0} already exist. daemon already running?'\n self._log.critical(str(message.format(self._pid_file)))\n sys.exit(1)\n\n # start the daemon.\n self.daemonize()\n self.run()\n\n def stop(self):\n # stop the daemon.\n self._log.info('stopping daemon using pid_file: ' + str(self._pid_file))\n # get the pid from the pid file.\n try:\n with open(self._pid_file, 'r') as pf:\n pid = int(pf.read().strip())\n except IOError:\n pid = None\n\n if not pid:\n message = 'pid_file {0} does not exist. daemon not running?'\n self._log.error(str(message.format(self._pid_file)))\n return # not an error in a restart\n\n # try killing the daemon process.\n try:\n while 1:\n os.kill(pid, signal.SIGTERM)\n time.sleep(0.1)\n except OSError as err:\n e = str(err.args)\n if e.find('no such process') > 0:\n if os.path.exists(self._pid_file):\n os.remove(self._pid_file)\n else:\n self._log.critical(str(err.args))\n sys.exit(1)\n\n def restart(self):\n # restart the daemon.\n self._log.info('restarting daemon using pid_file: ' + str(self._pid_file))\n self.stop()\n self.start()\n\n # maybe this function can be removed in the future because Linspector should rund endless when\n # starting scheduled jobs. need to cover this in the future. for now, it is useful for testing\n # while development because the daemon even runs when internally is nothing to do.\n @staticmethod\n def run():\n signal.pause()\n","sub_path":"linspector/linspectord.py","file_name":"linspectord.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"216973826","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom tweets.models import Tweet\n\nclass NewsFeed(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.SET_NULL,\n null=True,\n )\n tweet = models.ForeignKey(\n Tweet,\n on_delete=models.SET_NULL,\n null=True,\n )\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n index_together = (('user', 'created_at'),)\n unique_together = (('user', 'tweet'),)\n ordering = ('-created_at',)\n\n def __str__(self):\n # 这里是你执行 print(newsfeed instance) 的时候会显示的内容\n return f'{self.created_at} inbox of {self.user}: {self.tweet}'","sub_path":"newsfeeds/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"569133662","text":"\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(logging.INFO)\r\n\r\nformatter = logging.Formatter('%(levelname)s:%(message)s')\r\nfile_handler = logging.FileHandler('emp.log')\r\nfile_handler.setFormatter(formatter)\r\n\r\nlogger.addHandler(file_handler)\r\n\r\nclass Employee:\r\n\r\n def __init__(self,first,last):\r\n self.first=first\r\n self.last=last\r\n logger.info('Created Employee: {} - {}'.format(self.fullname,self.email))\r\n \r\n @property\r\n def email(self):\r\n return '{}.{}@gmail.com'.format(self.first,self.last)\r\n \r\n @property\r\n def fullname(self):\r\n return '{} {}'.format(self.first,self.last)\r\n\r\nexp_1 = Employee('payal','kutana')\r\nexp_2 = Employee('rima','jogani')","sub_path":"logger_of_employee.py","file_name":"logger_of_employee.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"421654882","text":"import socketserver, socket, subprocess, time, errno, sys\n\nhost = 'localhost'\nport = 2540\nsize = 1024\n\ndef Probe_Port(host, port):\n # AF_INET creates an IPV4 address family\n # SOCK_STREAM connections is TCP protocol\n\ttry:\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\texcept socket.error as msg:\n\t\tprint (\"Failed to create socket. Error code: \", msg[0], \" , Error message : \", msg[1])\n\t\tsys.exit();\n\n\twhile True:\n\t\ttry:\n\t\t\ts.connect(( host,port))\n\t\t\tbreak\n\t\texcept OSError as e:\n\t\t\tprint(\"There was a problem connecting to device, retrying\")\n\treturn s\n\ndef write_to_reg(conn,intValue):\n# This will take an integer input and convert it to a binary string. It will also cut off the 0b at the beginning of the string.\n size = 8\n bStr_RegisterVal = bin(intValue).lstrip('0b').zfill(size) #Convert from int to binary string\n conn.send(bStr_RegisterVal.encode('utf-8') + b'\\n') #Newline is required to flush the buffer on the Tcl server\n data = conn.recv(10)\t# This will always need to have two additional bits added to the size of the string, this is for a start and stop bit.\n return data\n\n###########################\n########## Main ###########\n###########################\n\n#this will clear any previous port data\nclear_serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclear_serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n\nsubprocess.Popen('start /B start_TCL_Server.LNK', shell=True) #opens the TCL Server\ntime.sleep(1) #delays for 5 seconds\n\nconn = Probe_Port(host, port)\ntime.sleep(1) #delays for 2 seconds\n\nwhile True:\n# This is a loop that will keep asking for first the address of the register that you want to write to.\n# It will then ask for the value to write to the addressed register\n\ta =input(\"\\nPlease enter a register address : \")\n\tif a == 'end':\n\t\tbreak\n\ta_int = int(a)\n\twrite_to_reg(conn, a_int)\n\tb = input(\"\\nPlease enter the integer value to be written to the register : \")\n\tif b == 'end':\n\t\tbreak\n\tb_int = int(b)\n\twrite_to_reg(conn, b_int)\n\n# Things to do.\n\t\t# Add in an extra input for the length of the data that you want to write.\n\t\t# This will require additional vhdl\n\t\t# also need to test the length of data that can be sent. From things read it seems that it is limited by tcl (32 bits)\n\t\t# look into making a tcl function file that can have python call the individual function:\n\t\t\t#Example;\n\t\t\t\t# This is a rather old thread, but I recently stumbled on Tkinter.Tcl() which gives you direct\n\t\t\t\t# access to a Tcl interpreter in python without having to spawn a Tk GUI as Tkinter.Tk() requires.\n\n\t\t\t\t# An example... suppose you have a Tcl file (foo.tcl) with a proc called main that requires an\n\t\t\t\t# single filename as an argument... main returns a string derived from reading foo.tcl.\n\n\t\t\t\t\t\t# from Tkinter import Tcl\n\n\t\t\t\t\t\t# MYFILE = 'bar.txt'\n\t\t\t\t\t\t# tcl = Tcl()\n\t\t\t\t\t\t# # Execute proc main from foo.tcl with MYFILE as the arg\n\t\t\t\t\t\t# tcl.eval('source foo.tcl')\n\t\t\t\t\t\t# tcl_str = tcl.eval('main %s' % MYFILE)\n\t\t\t\t\t\t# # Access the contents of a Tcl variable, $tclVar from python\n\t\t\t\t\t\t# tcl.eval('set tclVar foobarme')\n\t\t\t\t\t\t# tclVar = tcl.eval('return $tclVar')\n\n\t\t\t\t# I haven't found another way to access Tcl objects from python besides through a return value, but\n\t\t\t\t# this does give you a way to interface with Tcl procs. Furthermore, you can export python functions\n\t\t\t\t# into Tcl as discussed in Using Python functions in Tkinter.Tcl()\n\t\t\t# Also refer to this site https://wiki.python.org/moin/How%20Tkinter%20can%20exploit%20Tcl/Tk%20extensions\n\n\nconn.close()\n\n\n\n\n\n\n\n\n","sub_path":"vJtag_7_29_14/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"638153467","text":"import matplotlib\nimport matplotlib.pylab as plt\nimport sys\nsys.path.append('waveglow/')\nimport numpy as np\nimport torch\n\nfrom hparams import create_hparams\nfrom model import Tacotron2\nfrom layers import TacotronSTFT, STFT\nfrom audio_processing import griffin_lim\nfrom train import load_model\nfrom text import text_to_sequence\nfrom denoiser import Denoiser\nfrom scipy.io.wavfile import write\n\ndef plot_data(data, filepath, figsize=(16, 4)):\n fig, axes = plt.subplots(1, len(data), figsize=figsize)\n for i in range(len(data)):\n axes[i].imshow(data[i], aspect='auto', origin='bottom', \n interpolation='none')\n fig.savefig(filepath)\n\n# Setup hparams\n\nprint(\"Setting up hyperparams...\")\nhparams = create_hparams()\nhparams.sampling_rate = 22050\nprint(\"Done.\")\n\n# Load model from checkpoint\n#checkpoint_path = \"./models/tacotron2_statedict.pt\"\ncheckpoint_path = \"./outdir1/checkpoint_5000\"\nmodel = load_model(hparams)\nmodel.load_state_dict(torch.load(checkpoint_path)['state_dict'])\nmodel.cuda().eval()\n\n\n# Load WaveGlow for mel2audio synthesis and denoiser\n\nwaveglow_path = './models/waveglow_256channels_universal_v5.pt'\nwaveglow = torch.load(waveglow_path)['model']\nwaveglow.cuda().eval()\nfor k in waveglow.convinv:\n k.float()\ndenoiser = Denoiser(waveglow)\n\n# Prepare input text\ntext = \"आपके हिन्दी पसन्द करने पर खुशी हुई |\"\nsequence = np.array(text_to_sequence(text, ['basic_cleaners']))[None, :]\nsequence = torch.autograd.Variable(\n torch.from_numpy(sequence)).cuda().long()\n\n# Decode text input and plot results\nmel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)\nplot_data((mel_outputs.float().data.cpu().numpy()[0],\n mel_outputs_postnet.float().data.cpu().numpy()[0],\n alignments.float().data.cpu().numpy()[0].T), './melplots/plot_hindi.png')\n\n\nwith torch.no_grad():\n audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)\n\naudio_path = \"./audio/audio_hindi.wav\"\nwrite(audio_path, hparams.sampling_rate, audio[0].cpu().numpy())\n\n# Remove WaveGlow bias\n'''\naudio_denoised = denoiser(audio, strength=0.01)[:, 0]\naudio_denoised_path = \"./audio/audio_denoised.wav\"\nwrite(audio_denoised_path, hparams.sampling_rate, audio_denoised.cpu().numpy())\n'''","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"413908868","text":"# !/usr/bin/python\n# vim: set fileencoding=utf8 :\n#\n__author__ = 'keping.chu'\n\n\nfrom pyalgotrade import strategy\nfrom updaytech import UP318\nfrom basetech import LimitPrice, CloseAvg\nfrom pyalgotrade.broker.backtesting import LimitOrder, Broker\n\n\nclass LimitOrderDays(LimitOrder):\n\n def __init__(self, action, instrument, limitPrice, quantity, instrumentTraits):\n super(LimitOrderDays, self).__init__(action, instrument, limitPrice, quantity, instrumentTraits)\n self.days = 1\n\n\nclass MOrder:\n\n def __init__(self, order):\n\n self.order = order\n self.days = 1\n\n\nclass LeftBroker(Broker):\n\n def createLimitOrder(self, action, instrument, limitPrice, quantity):\n return LimitOrderDays(action, instrument, limitPrice, quantity, self.getInstrumentTraits(instrument))\n\n def commitOrderExecution(self, order, dateTime, fillInfo):\n #\n if order.isSell():\n ins = order.getInstrument()\n amt = self.getShares(ins)\n if amt <= 0:\n return\n\n super(LeftBroker, self).commitOrderExecution(order, dateTime, fillInfo)\n\n\nclass UpStrategy(strategy.BacktestingStrategy):\n\n def __init__(self, feed, instrument, days=5):\n self.broker = LeftBroker(4000000, feed)\n super(UpStrategy, self).__init__(feed, self.broker)\n self.instrument = instrument\n self.up_ = {}\n self.li_ = {}\n self.ca_ = {}\n self.days_ = days\n self.default_day = days\n self.positions = {}\n for stock in self.instrument:\n self.up_[stock] = UP318(feed.getDataSeries(stock), 5)\n self.li_[stock] = LimitPrice(feed.getDataSeries(stock), 2)\n self.ca_[stock] = CloseAvg(feed.getDataSeries(stock), 2)\n\n def onBars(self, bars):\n #订单计时\n self.sales(bars)\n self.buy(bars)\n\n def sales(self, bars):\n for ins, order in self.positions.items():\n #达到限定天数卖出\n bar = bars.getBar(order.order.getInstrument())\n if bar:\n ca = self.ca_[ins]\n if not ca[-1]:\n self.marketOrder(ins, -order.order.getQuantity())\n self.info(\"sales [%s %s %s]\" % (order.order.getInstrument(), bar.getDateTime(), order.days))\n del self.positions[ins]\n\n def buy(self, bars):\n for stock, bar in bars.items():\n up = self.up_[stock]\n #满足指标买入\n if up[-1] and (self.positions.get(stock) is None) and (self.li_[stock][-1] < 0.99) and self.ca_[stock][-1]:\n order = self.marketOrder(stock, self.round(bar.getClose()))\n self.info(\"buy [%s, %s, %s]\" % (stock, bar.getClose(), order.getQuantity()))\n self.positions[stock] = MOrder(order)\n\n def round(self, price, cash=5000):\n ratio = self.getResult() / 4000000\n amount = int(int(cash * ratio / price) / 100) * 100\n if amount == 0:\n amount = 100\n return amount\n\n","sub_path":"stratlib/updaystrategy.py","file_name":"updaystrategy.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"460151213","text":"import numpy as np\nimport scipy.linalg as la\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as spla\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as mtri\nfrom matplotlib.animation import ArtistAnimation\nfrom mpl_toolkits.mplot3d import axes3d\n\nfrom itertools import product as iter_product\n\nfrom ..triangular_2d_fem import Triangular2DFEM\n\nfrom ..tools import matprint, delete_from_csr, \\\n quadrature1D, quadrature2D, BCtype\n\n# Now we inherit from a common \"Linear, Triangular Finite Element Solver\"\n# Base class contains the nodes, triangulation (triang), edge_nodes, coordinate transformations, etc. \n# As well as the definitions of the basis functions and their gradients.\n# Should refactor project_1 solver as well!\n\nclass Elasticity2DSolver(Triangular2DFEM):\n \"\"\"\n 2D-FEM solver of the static equilibrium equation:\\n\n ρuₜₜ = ∇ᵀσ(u) + f, (x, y) ∈ Ω = [-1, 1]² \\n\n Can find vibration modes/frequencies of a problem with no\n driving force term f, or solve the steady state equation\\n\n ∇ᵀσ(u) = -f, (x, y) ∈ Ω. \n\n \\nUsing Linear basis function Polynomials. \n \"\"\" \n def __init__(self, N, f, g_D, g_N, class_BC, E, nu, rho,\n quad_points=4, area=\"plate\", eps=1.0e-8):\n \"\"\"\n Initializer for 2D-FEM solver of the linear elasticity equation:\\n\n ρuₜₜ = ∇ᵀσ(u) + f, (x, y) ∈ Ω = [-1, 1]². \\n\n Can find vibration modes/frequencies of a problem with no\n driving force term f, or solve the steady state equation\n ∇ᵀσ(u) = -f, (x, y) ∈ Ω. \n\n N: Size of the mesh. (sqrt(num_nodes) for plate.) \\n\n f: Source function. ℜ² ⇒ ℜ². \\n\n E: Youngs modulus of your material. \\n\n nu: Poisson ratio of your material. \\n\n rho: Density per area of your materal. \\n\n g_D: Function g([x, y]) -> R, specifying Dirichlet boundary conditions. \\n\n g_N: Function g([x, y]) -> R, specifying Neumann boundary conditions. \\n\n class_BC: Function BC_type([x, y]) -> Bool, returning True if point [x, y] \n should be a Dirichlet BC. Assumed Neumann if not. \\n\n \"\"\"\n super(Elasticity2DSolver, self).__init__(N=N, area=area)\n\n self.edge_triangle_indexes = list(filter(lambda i: any((node in self.triang[i] for node in self.edge_nodes)), \n np.arange(len(self.triang))))\n self.edge_triangles = list(self.triang[self.edge_triangle_indexes])\n\n self.num_nodes = len(self.nodes)\n\n # Two basis functions for each node: \n # (ϕ_i_0 = [ϕ_i, 0], ϕ_i_1 = [0, ϕ_i]), i = 1, .., num_nodes.\n self.num_basis_functions = 2*self.num_nodes \n\n # Internal solver variables:\n self.quad_points = quad_points\n self.area = area # A bit superfluous.\n self.d_pairs = ((0, 0), (0, 1), (1, 0), (1, 1))\n\n # Problem source- and BC-functions:\n self.f = f\n self.g_D = g_D\n self.g_N = g_N\n \n # Material properties:\n self.E = E\n self.nu = nu\n self.rho = rho\n if callable(rho):\n self.constant_density = False\n else:\n self.constant_density = True\n\n self.class_BC = class_BC\n self.eps = eps # Big-Number Epsilon.\n \n # Store the transformation matrix C: σ_vec = C @ ε_vec\n self.C = (E/(1 - nu**2)) * np.array([[1.0, nu, 0.0],\n [ nu, 1.0, 0.0],\n [0.0, 0.0, (1 - nu)/2.0]])\n\n # Store which nodes are BCtype.Dir and their values:\n self.dirichlet_BC_mask = np.zeros(self.num_basis_functions, dtype=bool)\n self.dirichlet_BC_nodes_mask = np.zeros(self.num_nodes, dtype=bool)\n self.dirichlet_BC_basis_functions = []\n self.dirichlet_BC_values = []\n\n # Boolean indicating whether Boundary Conditions have been applied:\n self.applied_BC = False\n\n # Initialize the full Stiffness matrix to None before construction:\n self.A_h = None\n\n # Initialize the full Mass matrix to None before construction:\n self.M_h = None\n\n # Initialize the Source vector to None before construction:\n self.F_h = None\n\n # Initialize the Basis function coefficients \"u_h\" to None:\n self.u_h = None\n \n @classmethod\n def from_dict(cls, model_dict):\n return cls(**model_dict)\n \n def basis_func_eps_vec(self, i, d, J_inv_T=None, k=None):\n \"\"\"\n Calculate the epsilon-vector for a basis-function:\\n\n ε(ϕ) = [∂ϕ₁/∂x, ∂ϕ₂/∂y, ∂ϕ₁/∂y + ∂ϕ₂/∂x],\n ϕ = [ϕ₁, ϕ₂] = [(1-d)*ϕ_i, d*ϕ_i].\\n\n Based on transforming derivatives from the reference element.\\n\n i: Local basis function index. i = 0, 1, 2.\\n\n d: x- or y-component basis vector. {x: 0, y: 1}\\n\n J_inv_T: Inverse Jacobian: [∂(r,s)/∂(x, y)].T\\n\n k: Element number.\n \"\"\"\n # Slow extra checking. Might be removed. \n if J_inv_T is not None:\n pass\n elif J_inv_T is None and type(k) is int:\n J_inv_T = la.inv(self.generate_jacobian(k)).T\n else:\n raise ValueError(\"Either the inverse-Jacobian transpose 'J_int_T' or element number 'k' must be given.\")\n\n gradient = J_inv_T @ self.reference_gradients[i]\n\n eps_xx = (1 - d)*gradient[0]\n eps_yy = d*gradient[1]\n eps_xy = d*gradient[0] + (1-d)*gradient[1]\n \n return np.array([eps_xx, eps_yy, eps_xy])\n\n def sigma_vec(self, i: int, disp_vec, J_inv_T=None, k=None):\n \"\"\"\n Calculate sigma-components [σ_xx, σ_yy, σ_xy]\n at the node index. \n i: Local basis function index.\n disp_vec: Displacement of each component of the basis functions. np.array([u_1, u_2]).\n \"\"\"\n # Slow extra checking. Might be removed. \n if J_inv_T is not None:\n pass\n elif J_inv_T is None and type(k) is int:\n J_inv_T = la.inv(self.generate_jacobian(k)).T\n else:\n raise ValueError(\"Either the inverse-Jacobian transpose 'J_int_T' or element number 'k' must be given.\")\n\n eps_vec_0 = disp_vec[0]*self.basis_func_eps_vec(i, d=0, J_inv_T=J_inv_T)\n eps_vec_1 = disp_vec[1]*self.basis_func_eps_vec(i, d=1, J_inv_T=J_inv_T)\n sigma_vec = self.C @ (eps_vec_0 + eps_vec_1)\n return sigma_vec\n\n \"\"\" Stiffness matrix \"\"\"\n\n def A_i_j(self, i_loc, j_loc, d_i, d_j, A_k, J_inv_T):\n \"\"\"\n Function calculating a (Aₕ)ᵢ,ⱼ-th contribution to the \"Stiffness\"-matrix.\n i_loc: Local index of basis function. [0, 1, 2]\n j_loc: Local index of basis function. [0, 1, 2]\n d_i, d_j: Components of the vector function ϕ_i_d = [(1-d)*ϕ_i, d*ϕ_i]\n A_k: Area of the element: |Jₖ|/2\n J_inv_T: Inverse Jacobian: [∂(r,s)/∂(x, y)].T\n \"\"\"\n eps_i = self.basis_func_eps_vec(i_loc, d_i, J_inv_T) \n eps_j = self.basis_func_eps_vec(j_loc, d_j, J_inv_T) \n # Could do: eps_i.T @ self.C @ eps_j, but '.T' does nothing for 1-D array.\n return A_k * (eps_i @ self.C @ eps_j)\n\n def generate_A_h(self):\n # Hopefully updated correctly!\n \"\"\"\n Generate the Stiffness Matrix A_h, based on linear Langrange basis functions on triangles.\n \"\"\"\n self.A_h = sp.dok_matrix((self.num_basis_functions, self.num_basis_functions))\n d_pairs = ((0, 0), (0, 1), (1, 0), (1, 1))\n\n # Loop through elements (triangles): \n for k, element in enumerate(self.triang):\n # Six basis functions per element. 3 nodes, 2 functions per node.\n # (3x2)^2 = 36 interactions per element k.\n # Only 6*(6+1)/2 = 21 unique interactions due to symmetry, but\n # for simplicity in code we perform 24 calculations.\n\n J = self.generate_jacobian(k)\n J_inv_T = la.inv(J).T\n element_area = 0.5*la.det(J)\n \n # Exploit symmetry of the (A_h)_sub-matrix about i=j.\n # Only compute the upper-triangular part i <= j.\n for i_loc, node_i in enumerate(element):\n \n # Do every vector-component combination. \n for (d_i, d_j) in d_pairs:\n A_i_di_j_dj = self.A_i_j(i_loc=i_loc, j_loc=i_loc, d_i=d_i, d_j=d_j, \n A_k=element_area, J_inv_T=J_inv_T)\n self.A_h[2*node_i + d_i, 2*node_i + d_j] += A_i_di_j_dj\n\n for j_loc in range(i_loc+1, 3):\n node_j = element[j_loc]\n \n for (d_i, d_j) in d_pairs:\n A_i_di_j_dj = self.A_i_j(i_loc=i_loc, j_loc=j_loc, d_i=d_i, d_j=d_j, \n A_k=element_area, J_inv_T=J_inv_T)\n self.A_h[2*node_i + d_i, 2*node_j + d_j] += A_i_di_j_dj\n self.A_h[2*node_j + d_j, 2*node_i + d_i] += A_i_di_j_dj\n \n # Convert A_h to csr-format for ease of calculations later:\n self.A_h = self.A_h.tocsr() \n \n \"\"\" Mass matrix \"\"\"\n\n def M_i_j(self, i_loc, j_loc, d_i, d_j, det_J_k):\n \"\"\"\n Function calculating a (Mₕ)ᵢ,ⱼ-th contribution to the \"Stiffness\"-matrix. \\n\n i_loc: Local index of basis function. [0, 1, 2] \\n\n j_loc: Local index of basis function. [0, 1, 2] \\n\n d_i, d_j: Components of the vector function ϕ_i_d = [(1-d)*ϕ_i, d*ϕ_i] \\n\n det_J_k: Determinant of the jacobian for element k.\n \"\"\"\n # Only 36 interactions. 21 unique. Can calculate the 21 elements beforehand, and \n # insert the values det_J_k*M_ref for each element.\n\n # d_i, d_j = [0, 1]:\n # Dot product (ϕ_i)^T ⋅ ϕ_j = (1 - d_i)*(1 - d_j)ϕ_i*ϕ_j + (d_i)*(d_j)ϕ_i*ϕ_j.\n if d_i != d_j:\n return 0.0\n\n # As we integrate over the reference triangle, \n # we can use the basis function definitions directly:\n phi_i, phi_j = self.basis_functions[i_loc], self.basis_functions[j_loc]\n\n # Assume that this function is only used when 'self.rho' is callable. \n if self.constant_density:\n integrand = lambda p: self.rho*phi_i(p)*phi_j(p)\n else:\n integrand = lambda p: self.rho(p)*phi_i(p)*phi_j(p)\n \n p1, p2, p3 = self.reference_triangle_nodes\n I = quadrature2D(integrand, p1, p2, p3, Nq=self.quad_points)\n\n # Scale the integral up by the determinant of the Element-Jacobian:\n return I*det_J_k\n\n def generate_M_ref(self):\n # 36 interactions.\n M_ref = np.zeros((6, 6), dtype=float)\n local_indices = (0, 1, 2)\n d_pairs = ((0, 0), (0, 1), (1, 0), (1, 1))\n \n for i, j, (d_i, d_j) in iter_product(local_indices, local_indices, d_pairs):\n row, col = 2*i + d_i, 2*j + d_j\n # Calculate the mass matrix on the reference element: det_J_k = 1.0\n M_ref[row, col] = self.M_i_j(i, j, d_i, d_j, det_J_k=1.0)\n\n return M_ref\n\n def M_h_element_contribution(self, element: list, det_J_k: np.ndarray, M_ref: np.ndarray):\n \n if self.constant_density:\n # Exploit symmetry of the (A_h)_sub-matrix about i=j.\n # Only compute the upper-triangular part i <= j.\n\n for i_loc, node_i in enumerate(element):\n # All values scaled by det_J_k compared to M_ref: \n \n for (d_i, d_j) in self.d_pairs:\n i_i_value = det_J_k * M_ref[2*i_loc + d_i, 2*i_loc + d_j]\n self.M_h[2*node_i + d_i, 2*node_i + d_j] += i_i_value\n\n for j_loc in range(i_loc+1, 3):\n node_j = element[j_loc]\n \n for (d_i, d_j) in self.d_pairs:\n i_j_value = det_J_k * M_ref[2*i_loc + d_i, 2*j_loc + d_j]\n\n self.M_h[2*node_i + d_i, 2*node_j + d_j] += i_j_value\n self.M_h[2*node_j + d_j, 2*node_i + d_i] += i_j_value\n \n else:\n # Must integrate density over each element per basis function. \n for i_loc, node_i in enumerate(element):\n \n for (d_i, d_j) in self.d_pairs:\n i_i_value = self.M_i_j(i_loc, i_loc, d_i, d_j, det_J_k)\n self.M_h[2*node_i + d_i, 2*node_i + d_j] += i_i_value\n\n for j_loc in range(i_loc+1, 3):\n node_j = element[j_loc]\n \n for (d_i, d_j) in self.d_pairs:\n i_j_value = self.M_i_j(i_loc, j_loc, d_i, d_j, det_J_k)\n\n self.M_h[2*node_i + d_i, 2*node_j + d_j] += i_j_value\n self.M_h[2*node_j + d_j, 2*node_i + d_i] += i_j_value\n\n def generate_M_h(self):\n \n if self.constant_density:\n M_ref = self.generate_M_ref()\n else:\n M_ref = None\n \n self.M_h = sp.dok_matrix((self.num_basis_functions, self.num_basis_functions))\n\n # Generate the mass matrix.\n for k, element in enumerate(self.triang):\n \n # TODO: Avoid generating all Jacobians twice for A_h and M_h.\n J = self.generate_jacobian(k)\n det_J_k = la.det(J)\n self.M_h_element_contribution(element=element, det_J_k=det_J_k, M_ref=M_ref)\n\n # Convert M_h to csr-format for ease of calculations later:\n self.M_h = self.M_h.tocsr()\n\n \"\"\" Source vector \"\"\"\n\n def generate_F_h(self):\n \"\"\"\n Generate the source vector. Sum over elements and add contributions from each basis function.\n \"\"\"\n # Source function f([x, y]) must be callable.\n assert(callable(self.f))\n # Making the full Source vector:\n self.F_h = np.zeros(self.num_basis_functions)\n\n # Reference triangle nodes:\n eta1, eta2, eta3 = self.reference_triangle_nodes\n\n T = self.reference_to_global_transformation\n\n for k, element in enumerate(self.triang):\n J_k = self.generate_jacobian(k)\n det_J_k = la.det(J_k)\n\n # Loop through nodes in each element and two components per node:\n for (i, node), d in iter_product(enumerate(element), (0, 1)):\n # print(f\"i: {i}, d: {d}\\tnode: {node}\")\n \n global_index = 2*node + d\n\n # Integrand. ϕ_i_d^T⋅f([x, y]) = [(1-d)*ϕ_i, d*ϕ_i]^T⋅[f_1(x, y), f_2(x, y)] \n # = ((1 - d)*f_1(x, y) + d*f_2(x, y))*ϕ_i(x, y)\n integrand = lambda eta: self.f(T(eta, k, J_k))[d]*self.basis_functions[i](eta)\n\n # Add contribution from basis function component d. Integrate over reference triangle.\n self.F_h[global_index] += det_J_k*quadrature2D(integrand, eta1, eta2, eta3, self.quad_points)\n \n # Reshape F_h to a column vector:\n self.F_h = self.F_h.reshape(len(self.F_h), 1)\n\n \"\"\" Boundary conditions \"\"\"\n\n def apply_node_dirichlet_bc(self, node, p=None):\n if p is None:\n p = self.nodes[node]\n \n # Indicies of basis functions at 'node':\n i_0, i_1 = 2*node + 0, 2*node + 1\n\n dir_bc_value = self.g_D(p)\n u_D_x, u_D_y = dir_bc_value\n\n # Register the two basis functions at 'node'\n # as Dirichlet BC's:\n self.dirichlet_BC_mask[i_0] = True\n self.dirichlet_BC_mask[i_1] = True\n\n self.dirichlet_BC_nodes_mask[node] = True\n\n # Register basis functions as Dirichlet BC's:\n self.dirichlet_BC_basis_functions.append(i_0)\n self.dirichlet_BC_basis_functions.append(i_1)\n\n # Register their values: Sequence matters!\n self.dirichlet_BC_values.append(u_D_x)\n self.dirichlet_BC_values.append(u_D_y)\n\n # Subtract from the source vector the columns corresponding to \n # both the basis function components at 'node', times \n # their respective Dirichlet BC-value: \n self.F_h += self.A_h[:, i_0]*u_D_x\n self.F_h += self.A_h[:, i_1]*u_D_y\n\n def apply_direct_dirichlet(self):\n # Dirichlet Boundary \n for node in self.edge_nodes:\n p = self.nodes[node]\n\n if self.class_BC(p) == BCtype.Dir:\n self.apply_node_dirichlet_bc(node, p)\n \n elif self.class_BC(p) == BCtype.Neu:\n # Find which triangles the Edge node belongs to:\n raise ValueError(\"Cannot apply Neumann BC Using Direct Dirichlet function!\")\n \n # Remove redundant degrees of freedom from F_h, A_h, and M_h:\n F_mask = np.ones(len(self.F_h), dtype=bool)\n F_mask[self.dirichlet_BC_basis_functions] = False\n self.F_h = self.F_h[F_mask]\n\n self.A_h = delete_from_csr(self.A_h, row_indices=self.dirichlet_BC_basis_functions, \n col_indices=self.dirichlet_BC_basis_functions)\n if self.M_h is None:\n self.generate_M_h()\n\n self.M_h = delete_from_csr(self.M_h, row_indices=self.dirichlet_BC_basis_functions, \n col_indices=self.dirichlet_BC_basis_functions) \n self.applied_BC = True\n\n \"\"\" Solve \"\"\"\n\n def solve_direct_dirichlet(self):\n self.generate_A_h()\n self.generate_F_h()\n self.apply_direct_dirichlet()\n \n reduced_u_h = sp.linalg.spsolve(self.A_h, self.F_h)\n self.u_h = np.zeros(self.num_basis_functions)\n self.u_h[~self.dirichlet_BC_mask] = reduced_u_h\n self.u_h[self.dirichlet_BC_mask] = self.dirichlet_BC_values\n self.u_h = self.u_h.reshape((self.num_nodes, 2))\n\n def fem_solution(self, element: list = None, F_inv: callable = None, k: int = None):\n # Assume one has solved for the coefficients self.u_h:\n if F_inv is None:\n if k is not None:\n F_inv = lambda p: self.global_to_reference_transformation(p, k, J_inv=None)\n else:\n raise ValueError(\"Either the inverse coordinate transform F_inv or element index k must be given.\")\n \n if element is None and type(k) is int:\n element = self.triang[k]\n\n # Retrieve coefficients of basis function in each vector component:\n u_xs = self.u_h[[element], 0].ravel()\n u_ys = self.u_h[[element], 1].ravel()\n \n phi_0 = lambda p: self.basis_functions[0](F_inv(p))\n phi_1 = lambda p: self.basis_functions[1](F_inv(p))\n phi_2 = lambda p: self.basis_functions[2](F_inv(p))\n phi_funcs = [phi_0, phi_1, phi_2]\n\n def u_h_func(p):\n x_comp = sum([u_xs[i]*phi_funcs[i](p) for i in (0, 1, 2)])\n y_comp = sum([u_ys[i]*phi_funcs[i](p) for i in (0, 1, 2)])\n return np.array([x_comp, y_comp])\n\n return u_h_func\n\n \"\"\" Error calculation \"\"\"\n\n def find_h(self):\n h = 0\n\n for k, element in enumerate(self.triang):\n\n p1, p2, p3 = element\n x1 = self.nodes[p1]\n x2 = self.nodes[p2]\n x3 = self.nodes[p3]\n hk = max(np.linalg.norm(x2 - x1), np.linalg.norm(x3 - x1), np.linalg.norm(x3 - x2))\n\n if hk > h:\n h = hk\n\n return h\n\n def L2_norm_error(self, u_ex, quad_points=None):\n # raise NotImplementedError(\"Need to update function for ElasticitySolver2D\")\n assert isinstance(self.u_h, np.ndarray)\n\n if quad_points == None:\n quad_points = self.quad_points\n\n E = 0.0\n\n \"\"\" For each element in triangulation. \"\"\" \n for k, element in enumerate(self.triang):\n \n J_inv = la.inv(self.generate_jacobian(k))\n\n F_inv = lambda p: self.global_to_reference_transformation(p, k, J_inv=J_inv)\n\n u_h_func = self.fem_solution(element=element, F_inv=F_inv)\n\n \"\"\" err = || u_h - u_ex ||₂**2 \"\"\"\n err = lambda p: la.norm(u_h_func(p) - u_ex(p), ord=2)**2\n \n \"\"\" Gauss quadrature approximation to contribution to square error from element k \"\"\"\n x1, x2, x3 = [self.nodes[node] for node in element]\n E += quadrature2D(err, x1, x2, x3, Nq=quad_points)\n\n return np.sqrt(E)\n\n \"\"\" Generalized eigenvalue problem \"\"\"\n\n def solve_vibration_modes(self, num=20):\n \"\"\"\n Find the 'num' lowest generalized eigenvalues, along with eigenvectors. \\n\n Aₕu = ω²Mₕu \n \"\"\"\n if self.A_h is None:\n self.generate_A_h()\n if self.M_h is None:\n self.generate_M_h()\n\n eigvals_small, eigvecs_small = spla.eigsh(A=self.A_h, M=self.M_h, \n k=num, which=\"LM\", sigma=0.0)\n self.num_eigenpairs = num\n self.vibration_frequencies = eigvals_small\n\n # List of the displacement eigenvectors for each eigenvalue: \n self.vibration_eigenvectors = []\n num_dof = int(self.num_nodes - len(self.dirichlet_BC_basis_functions)//2)\n for k in range(num):\n displacement_vec = np.zeros((num_dof, 2))\n \n # Eigenvectors stored column-wise:\n vibration_eigenvec = eigvecs_small[:, k]\n \n for n, d in iter_product(range(num_dof), (0, 1)):\n displacement_vec[n, d] = vibration_eigenvec[2*n + d]\n \n self.vibration_eigenvectors.append(displacement_vec)\n \n print(\"Done solving for eigenmodes!\")\n\n def retrieve_vibration_eigenvector(self, k):\n if not self.applied_BC:\n displacement_vec = self.vibration_eigenvectors[k]\n else:\n displacement_vec = np.zeros((self.num_nodes, 2))\n num_Dir_BC_nodes = len(self.dirichlet_BC_basis_functions)//2\n Dir_BC_displacement = np.array(self.dirichlet_BC_values).reshape(num_Dir_BC_nodes, 2)\n\n displacement_vec[self.dirichlet_BC_nodes_mask] = Dir_BC_displacement\n displacement_vec[~self.dirichlet_BC_nodes_mask] = self.vibration_eigenvectors[k]\n return displacement_vec\n\n \"\"\" Visualization \"\"\"\n\n def find_max_stress(self, nodes=None, elements=None, displacement=None):\n\n if elements is None and nodes is None:\n element_triang = self.triang\n\n elif nodes is not None:\n # Find all triangles with nodes-elements as vertices.\n if type(nodes) is int:\n triangle_indices = [i for i, triangle in enumerate(self.triang) if nodes in triangle] \n else:\n # Stupidly unreadable One-line solution:\n triangle_indices = list(filter(lambda i: any((node in self.triang[i] for node in nodes)), \n np.arange(len(self.triang))))\n\n element_triang = self.triang[triangle_indices]\n\n else:\n element_triang = self.triang[elements]\n \n nodes_x = np.copy(self.nodes[:, 0])\n nodes_y = np.copy(self.nodes[:, 1])\n\n if displacement is not None:\n # Apply displacement to each node:\n assert(self.nodes.shape == displacement.shape)\n nodes_x += displacement[:, 0]\n nodes_y += displacement[:, 1]\n \n max_stress = 0\n\n for k, el in enumerate(element_triang):\n\n sigma_0 = self.sigma_vec(0, displacement[el[0],:], k=k)\n sigma_1 = self.sigma_vec(1, displacement[el[1],:], k=k)\n sigma_2 = self.sigma_vec(2, displacement[el[2],:], k=k)\n\n mts0 = 0.5 * ( sigma_0[0] + sigma_0[1])\n mts1 = 0.5 * ( sigma_1[0] + sigma_1[1])\n mts2 = 0.5 * ( sigma_2[0] + sigma_2[1])\n\n mts_el = ( mts0 + mts1 + mts2 ) / 3\n if np.abs(mts_el) > max_stress:\n max_stress = np.abs(mts_el)\n\n return max_stress\n\n def display_mesh(self, nodes=None, elements=None, displacement=None, show=True, ax=None):\n if elements is None and nodes is None:\n element_triang = self.triang\n\n elif nodes is not None:\n # Find all triangles with nodes-elements as vertices.\n if type(nodes) is int:\n triangle_indices = [i for i, triangle in enumerate(self.triang) if nodes in triangle] \n else:\n # Stupidly unreadable One-line solution:\n triangle_indices = list(filter(lambda i: any((node in self.triang[i] for node in nodes)), \n np.arange(len(self.triang))))\n\n element_triang = self.triang[triangle_indices]\n\n elif elements is not None:\n element_triang = self.triang[elements]\n \n nodes_x = np.copy(self.nodes[:, 0])\n nodes_y = np.copy(self.nodes[:, 1])\n\n if displacement is not None:\n # Apply displacement to each node:\n assert(self.nodes.shape == displacement.shape)\n nodes_x += displacement[:, 0]\n nodes_y += displacement[:, 1]\n\n if ax is not None:\n plot = ax.triplot(nodes_x, nodes_y, triangles=element_triang, color='black') \n else:\n plot = plt.triplot(nodes_x, nodes_y, triangles=element_triang)\n\n if show:\n plt.rcParams.update({\"font.size\": 20})\n plt.title(f\"{len(self.triang)} Element Triangulation\")\n axis = plt.gca()\n axis.set_xlabel(\"x\", fontsize=20)\n axis.set_ylabel(\"y\", fontsize=20)\n\n x_min, x_max = np.min(nodes_x), np.max(nodes_x)\n y_min, y_max = np.min(nodes_y), np.max(nodes_y)\n scale_x = abs(x_max - x_min)\n scale_y = abs(y_max - y_min)\n\n margin = 0.05\n plt.xlim(x_min - margin*scale_x, x_max + margin*scale_x)\n plt.ylim(y_min - margin*scale_y, y_max + margin*scale_y)\n\n plt.show()\n return \n \n else:\n return plot\n\n def display_mesh_stress(self, displacement=None, face_colors=None, norm=None, show=True, ax=None):\n\n element_triang = self.triang\n \n nodes_x = np.copy(self.nodes[:, 0])\n nodes_y = np.copy(self.nodes[:, 1])\n\n if displacement is not None:\n # Apply displacement to each node:\n assert(self.nodes.shape == displacement.shape)\n nodes_x += displacement[:, 0]\n nodes_y += displacement[:, 1]\n\n if face_colors is None:\n zcolors = np.zeros(len(element_triang), dtype=float)\n\n for k, el in enumerate(element_triang):\n J_inv_T = la.inv(self.generate_jacobian(k)).T\n\n sum_mts = 0.0\n for i, d in enumerate(el):\n\n sigma_i = self.sigma_vec(i, displacement[d, :], J_inv_T=J_inv_T)\n mts_i = (sigma_i[0] + sigma_i[1])\n\n sum_mts += mts_i\n \n zcolors[k] = sum_mts / 6.0 # Average over 2 elements in tr(σ) and 3 nodes.\n \n if ax is not None:\n plot = ax.tripcolor(nodes_x, nodes_y, triangles=element_triang, facecolors=zcolors, edgecolors='k', norm=norm)\n else:\n # Set a bigger font size for text:\n plt.rcParams.update({'font.size': 20, 'figure.figsize': (16, 9)})\n plt.suptitle(\"Displacement Stress\")\n plt.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.93)\n plt.xlabel(\"x [m]\")\n plt.ylabel(\"y [m]\")\n plot = plt.tripcolor(nodes_x, nodes_y, triangles=element_triang, facecolors=zcolors, edgecolors='k', norm=norm)\n cbar = plt.colorbar(plot)\n cbar.ax.get_yaxis().labelpad = 50\n cbar.set_label(r\"Mean Total Stress $\\sigma$ [Pa]\", rotation=270, fontsize=22)\n\n if show:\n\n x_min, x_max = np.min(nodes_x), np.max(nodes_x)\n y_min, y_max = np.min(nodes_y), np.max(nodes_y)\n scale_x = abs(x_max - x_min)\n scale_y = abs(y_max - y_min)\n\n margin = 0.05\n plt.xlim(x_min - margin*scale_x, x_max + margin*scale_x)\n plt.ylim(y_min - margin*scale_y, y_max + margin*scale_y)\n\n plt.show()\n\n else:\n return plot\n\n def display_vector_field(self, u=None, title=None):\n \"\"\"\n Display a vector field over the domain Ω.\\n\n u([x, y]) → [u_x(x, y), u_y(x, y)]\n \"\"\"\n # Vector X- and Y-components:\n if callable(u):\n vectors = np.array([u(p) for p in self.nodes])\n elif type(u) is np.ndarray:\n # Assume u contains displacement of each node:\n # u.shape = (num_nodes, 2)\n vectors = np.array([u_p for u_p in u])\n else:\n # Use internal solution u_h:\n vectors = np.array([u_p for u_p in self.u_h])\n\n plt.rcParams.update({'font.size': 18})\n\n fig = plt.figure(figsize=(14, 14))\n ax = fig.add_subplot(111)\n\n if title is None:\n fig.suptitle(\"Vector Field Plot\")\n else:\n fig.suptitle(title)\n # Arrow locations:\n X, Y = self.nodes[:, 0], self.nodes[:, 1]\n\n U, V = vectors[:, 0], vectors[:, 1]\n \n Q = ax.quiver(X, Y, U, V, scale=12, angles='xy', scale_units='xy')\n ax.quiverkey(Q, X=0.3, Y=0.97, U=1, coordinates='figure',\n label='Quiver key, length = 1', labelpos='W')\n \n ax.triplot(X, Y, self.triang, alpha=0.3, zorder=-10) \n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\", rotation=0)\n\n plt.subplots_adjust(left=0.08, bottom=0.08, right=0.97, top=0.95)\n plt.show()\n\n def display_single_element_error(self, k: int, u: callable, ax=None, show=False, quiver=False):\n # assert callable(u)\n element = self.triang[k]\n p0, p1, p2 = self.nodes[element]\n\n num = 50\n r_s = np.linspace(0, 1.0, num)\n delta_r = 1.0/num\n \n internal_nodes = []\n for r in r_s:\n s = 0.0\n while s + r <= 1.0:\n internal_nodes.append(p0 + r*(p1 - p0) + s*(p2 - p0))\n s += delta_r\n\n internal_nodes = np.array(internal_nodes)\n u_h_func = self.fem_solution(k=k)\n errors = np.array([la.norm(u(p) - u_h_func(p), ord=2)**2 for p in internal_nodes])\n X, Y = internal_nodes[:, 0], internal_nodes[:, 1]\n\n if ax is None:\n fig = plt.figure(figsize=(14, 14))\n fig.suptitle(\"Single Element Vector Field Plot\")\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\", rotation=0)\n elif quiver == True:\n # Arrow locations:\n vectors = np.array([u(p) for p in internal_nodes])\n ax.scatter(X, Y)\n U, V = vectors[:, 0], vectors[:, 1]\n \n Q = ax.quiver(X, Y, U, V)\n ax.quiverkey(Q, X=0.3, Y=0.97, U=1, coordinates='figure',\n label='Quiver key, length = 1', labelpos='W')\n \n else:\n ax.plot_trisurf(X, Y, errors, color='black')\n \n if show:\n plt.subplots_adjust(left=0.08, bottom=0.08, right=0.97, top=0.95)\n plt.show()\n\n def display_L2_error(self, u_ex: callable):\n\n fig, ax = plt.subplots(figsize=(12, 12), subplot_kw={'projection': '3d'})\n for k, element in enumerate(self.triang):\n\n self.display_single_element_error(k, u=u_ex, ax=ax, show=False)\n\n plt.show()\n\n def display_vibration_mode(self, k):\n if k > self.num_eigenpairs - 1:\n raise ValueError(f\"Too high an eigen-number. Have only solved for {self.num_eigenpairs} eigenpairs.\")\n displacement_vec = self.retrieve_vibration_eigenvector(k)\n return self.display_mesh(displacement=displacement_vec)\n\n def vibration_stress_mosaic(self, k, alpha=1, dims=(3,3), figsize=(10,12), dpi=None, show=None,\n savename=None, title=None):\n \n if k > self.num_eigenpairs - 1:\n raise ValueError(f\"Too high an eigen-number. Have only solved for {self.num_eigenpairs} eigenpairs.\")\n\n displacement_vec = self.retrieve_vibration_eigenvector(k)\n\n max_stretch = self.find_max_stress(displacement=displacement_vec*alpha)\n\n # Set a bigger font size for text:\n fs = 36\n plt.rcParams.update({'font.size': fs})\n\n norm = mpl.colors.Normalize(vmin=-max_stretch, vmax=max_stretch)\n\n fig, axs = plt.subplots(dims[0], dims[1], figsize=figsize, sharex=True, sharey=True)\n \n if title is None:\n plt.subplots_adjust(top=0.95)\n\n fig.suptitle(title)\n\n plt.subplots_adjust(left=0.11, bottom=0.11, wspace=0, hspace=0, right=0.89)\n K = dims[0] * dims[1]\n\n for i, phi in enumerate(np.linspace(0, np.pi, K)):\n self.display_mesh_stress(displacement=alpha*np.cos(phi)*displacement_vec,\n norm=norm, show=False, ax=axs.flatten()[i])\n\n cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=None), ax=axs[:,dims[1]-1])\n cbar.ax.get_yaxis().labelpad = 30\n cbar.set_label(r\"Mean Total Stress $\\sigma$ [Pa]\", rotation=270, fontsize=fs)\n\n labax = fig.add_subplot(111, frameon=False)\n # hide tick and tick label of the big axis\n labax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n labax.set_xlabel(\"$x \\, [m]$\")\n labax.set_ylabel(\"$y \\, [m]$\")\n labax.get_yaxis().labelpad = 20\n\n if savename is not None:\n fig.savefig(f\"root/project_2/figures/{savename}.png\", dpi=dpi)\n\n if show is None:\n if savename is None:\n show = True\n else:\n show = False\n\n if show:\n plt.show()\n\n return\n\n def show_frequencies(self, show=None, savename=None, title=None, figsize=None):\n\n if self.vibration_frequencies is None:\n raise Exception(\"Need to calculate vibration frequencies first.\")\n\n if title is None:\n title = \"Vibration-frequencies\"\n \n plt.rcParams.update({'font.size': 24})\n\n fig, axs = plt.subplots(1,2, figsize=figsize)\n\n plt.subplots_adjust(wspace=0.21, left=0.15, right=0.95)\n\n fig.suptitle(title)\n\n if np.any(np.isclose(self.vibration_frequencies, 0, atol=1e-10*np.amax(self.vibration_frequencies))):\n eps = 1.1*np.abs(np.amin(self.vibration_frequencies))\n else:\n eps = 0\n \n vibs = np.sqrt(self.vibration_frequencies + eps) / (2*np.pi)\n axs[0].plot(np.arange(vibs.shape[0]), vibs, 'k.', markersize=15)\n axs[0].set_title(\"All frequencies, $f_k$\")\n axs[0].set_ylabel(\"[Hz]\")\n axs[0].get_yaxis().labelpad = 13\n axs[0].set_xlabel(r\"$k$\")\n\n non_zero_vibs = np.sqrt(self.vibration_frequencies[3:]) / (2*np.pi)\n\n fund_freq = non_zero_vibs[0] # Fundamental frequency. First non-zero frequency.\n n_max = np.max(non_zero_vibs / fund_freq)\n\n axs[1].plot(np.arange(3, non_zero_vibs.shape[0] + 3), non_zero_vibs / fund_freq, 'k.', markersize=15)\n for i in range(1, int(n_max) + 1):\n axs[1].axhline(y=i, color='black', lw=0.4)\n\n axs[1].set_title(\"Non-zero frequencies, $f_k \\, / \\, f_3$\")\n axs[1].set_xlabel(r\"$k$\")\n\n if show is None:\n if savename is None:\n show = True\n else:\n show = False\n\n if show:\n plt.show()\n return\n\n if savename is not None:\n fig.savefig(f\"root/project_2/figures/{savename}.png\")\n\n return\n\n \"\"\" Animation \"\"\"\n\n def animate_vibration_mode(self, k, alpha=1, l=1, show=None, savename=None, playtime=5, fps=60, repeat_delay=0, title=None):\n if k > self.num_eigenpairs - 1:\n raise ValueError(f\"Too high an eigen-number. Have only solved for {self.num_eigenpairs} eigenpairs.\")\n\n displacement_vec = self.retrieve_vibration_eigenvector(k)\n\n N_frames = playtime * fps\n ts = np.linspace(0, 2*np.pi, N_frames)\n\n fig, ax = plt.subplots(figsize=(16, 16))\n if title is None:\n fig.suptitle(f\"Eigen vibration mode {k}\", fontsize=18)\n else:\n fig.suptitle(title, fontsize=18)\n\n artists = [self.display_mesh(displacement=alpha*np.sin(l*t)*displacement_vec,\n show=False, ax=ax) for t in ts]\n\n ani = ArtistAnimation(fig, artists, interval=1000//fps, repeat_delay=repeat_delay, repeat=True, blit=True)\n\n if savename is not None:\n ani.save(f\"root/project_2/animations/{savename}.mp4\")\n\n if show is None:\n if savename is None:\n show = True\n else:\n show = False\n \n if show:\n plt.show()\n return\n\n # Clean up memory\n fig.clear()\n plt.close(fig)\n del fig, ax, artists, ani\n import gc\n gc.collect()\n\n return\n\n def animate_vibration_mode_stress(self, k, alpha=1, l=1, show=None, savename=None, \n playtime=5, fps=60, repeat_delay=0, title=None):\n if k > self.num_eigenpairs - 1:\n raise ValueError(f\"Too high an eigen-number. Have only solved for {self.num_eigenpairs} eigenpairs.\")\n\n displacement_vec = self.retrieve_vibration_eigenvector(k)\n\n N_frames = int(playtime * fps)\n ts = np.linspace(0, 2*np.pi, N_frames)\n\n max_stretch = self.find_max_stress(displacement=displacement_vec*alpha)\n\n # Set a bigger font size for text:\n plt.rcParams.update({'font.size': 20})\n norm = mpl.colors.Normalize(vmin=-max_stretch, vmax=max_stretch)\n\n fig, ax = plt.subplots(figsize=(1.5*16,1.5*9))\n \n plt.subplots_adjust(left=0.05, bottom=0.05, right=0.99, top=0.94)\n ax.set_xlabel(\"x [m]\")\n ax.set_ylabel(\"y [m]\")\n\n if title is None:\n fig.suptitle(f\"Vibration mode {k} for {self.area} with {len(self.triang)} Elements\")\n elif type(title) is str:\n fig.suptitle(title)\n else:\n pass\n\n artists = [self.display_mesh_stress(displacement=alpha*np.sin(l*t)*displacement_vec,\n norm=norm, show=False, ax=ax).findobj() for t in ts]\n\n ani = ArtistAnimation(fig, artists, interval=1000//fps, repeat_delay=repeat_delay, repeat=True, blit=True)\n cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=None), ax=ax)\n cbar.ax.get_yaxis().labelpad = 50\n cbar.set_label(r\"Mean Total Stress $\\sigma$ [Pa]\", rotation=270, fontsize=24)\n\n if savename is not None:\n ani.save(f\"root/project_2/animations/{savename}.mp4\")\n\n if show is None:\n if savename is None:\n show = True\n else:\n show = False\n \n if show:\n plt.show()\n return \n \n # Clean up memory\n fig.clear()\n plt.close(fig)\n del fig, ax, artists, ani, norm, cbar\n import gc\n gc.collect()\n\n return\n","sub_path":"root/project_2/elasticity_solver.py","file_name":"elasticity_solver.py","file_ext":"py","file_size_in_byte":39682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"125782853","text":"import pytest\nimport requests\n\nfrom yippi import YippiClient\nfrom yippi.Enums import Rating\nfrom yippi.Exceptions import APIError\nfrom yippi.Exceptions import UserError\n\n\n@pytest.fixture(scope=\"module\")\ndef vcr_cassette_dir(request):\n return \"tests/cassettes/sync\"\n\n\n@pytest.fixture\ndef client():\n session = requests.Session()\n return YippiClient(\"Yippi\", \"0.1\", \"Error-\", session=session)\n\n\n@pytest.mark.vcr()\ndef test_getpost(client):\n post = client.post(1383235)\n assert post.id == 1383235\n assert post.created_at == \"2017-11-20T12:23:11.340-05:00\"\n assert post.updated_at == \"2020-04-17T20:27:20.798-04:00\"\n assert post.file == {\n \"width\": 767,\n \"height\": 1000,\n \"ext\": \"png\",\n \"size\": 489122,\n \"md5\": \"539fd6c8c9af7b79693783b995ddf640\",\n \"url\": \"https://static1.e621.net/data/53/9f/539fd6c8c9af7b79693783b995ddf640.png\",\n }\n assert post.preview == {\n \"width\": 115,\n \"height\": 150,\n \"url\": \"https://static1.e621.net/data/preview/53/9f/539fd6c8c9af7b79693783b995ddf640.jpg\",\n }\n assert post.sample == {\n \"has\": False,\n \"height\": 1000,\n \"width\": 767,\n \"url\": \"https://static1.e621.net/data/53/9f/539fd6c8c9af7b79693783b995ddf640.png\",\n }\n assert post.score == {\"up\": 126, \"down\": -1, \"total\": 125}\n assert post.tags == {\n \"general\": [\n \"5_fingers\",\n \"anthro\",\n \"bed\",\n \"bedding\",\n \"blanket\",\n \"clothed\",\n \"clothing\",\n \"door\",\n \"duo\",\n \"fingers\",\n \"fur\",\n \"furniture\",\n \"lying\",\n \"male\",\n \"male/male\",\n \"on_side\",\n \"pillow\",\n \"sleeping\",\n \"spooning\",\n ],\n \"species\": [\n \"bird_dog\",\n \"canid\",\n \"canine\",\n \"canis\",\n \"domestic_dog\",\n \"golden_retriever\",\n \"hunting_dog\",\n \"mammal\",\n \"retriever\",\n \"wolf\",\n ],\n \"character\": [\"daniel_segja\", \"joel_mustard\"],\n \"copyright\": [\"patreon\"],\n \"artist\": [\"zeta-haru\"],\n \"invalid\": [],\n \"lore\": [],\n \"meta\": [\"comic\"],\n }\n assert post.locked_tags == []\n assert post.change_seq == 23384218\n assert post.flags == {\n \"pending\": False,\n \"flagged\": False,\n \"note_locked\": False,\n \"status_locked\": False,\n \"rating_locked\": False,\n \"deleted\": False,\n }\n assert post.rating == Rating.SAFE\n assert post.fav_count == 306\n assert post.sources == [\n \"https://www.furaffinity.net/view/25500100/\",\n \"https://furaffinity.net/user/zeta-haru\",\n ]\n assert post.pools == [6527]\n assert post.relationships == {\n \"parent_id\": None,\n \"has_children\": False,\n \"has_active_children\": False,\n \"children\": [],\n }\n assert post.approver_id == 38571\n assert post.uploader_id == 269143\n assert post.description == \"\"\n assert post.comment_count == 42\n assert not post.is_favorited\n\n\n@pytest.mark.vcr()\ndef test_404(client):\n with pytest.raises(UserError):\n client.post(99999999999)\n\n\n@pytest.mark.vcr()\ndef test_post_search(client):\n assert client.posts(\"m/m\")\n assert client.posts([\"m/m\", \"rating:s\"])\n assert len(client.posts(\"m/m\", limit=1)) == 1\n assert client.posts(\"m/m\", page=1)\n\n\n@pytest.mark.vcr()\ndef test_post_search_error(client):\n with pytest.raises(UserError):\n client.posts(\"m/m\", page=1000)\n\n\n@pytest.mark.vcr()\ndef test_note(client):\n note = client.notes(post_id=2222254, creator_id=366315, limit=1)[0]\n assert note.id == 257037\n assert note.created_at == \"2020-04-19T02:58:56.716-04:00\"\n assert note.updated_at == \"2020-04-19T02:58:56.716-04:00\"\n assert note.creator_id == 366315\n assert note.x == 774\n assert note.y == 136\n assert note.width == 36\n assert note.height == 50\n assert note.version == 2\n assert note.is_active\n assert note.post_id == 2222254\n assert note.body == \"Chu~\"\n assert note.creator_name == \"Mutter\"\n\n\n@pytest.mark.vcr()\ndef test_flags(client):\n flag = client.flags(post_id=2213076, limit=1)[-1]\n assert flag.id == 368383\n assert flag.created_at == \"2020-04-19T02:50:38.030-04:00\"\n assert flag.post_id == 2213076\n assert \"Inferior version\" in flag.reason\n assert not flag.is_resolved\n assert flag.updated_at == \"2020-04-19T02:50:38.030-04:00\"\n assert not flag.is_deletion\n assert flag.category == \"normal\"\n\n\n@pytest.mark.vcr()\ndef test_pools(client):\n pool = client.pools(\"Critical Success\")[0]\n assert pool.id == 6059\n assert pool.name == \"Critical_Success\"\n assert pool.created_at == \"2015-05-12T03:12:04.070-04:00\"\n assert pool.updated_at == \"2020-01-11T07:33:28.640-05:00\"\n assert pool.creator_id == 80719\n assert \"Terry already knew DMing a roleplay game\" in pool.description\n assert not pool.is_active\n assert pool.category == \"series\"\n assert not pool.is_deleted\n assert set([653514, 653515, 653820]).issubset(pool.post_ids)\n assert pool.creator_name == \"Emserdalf\"\n assert pool.post_count == 48\n\n\n@pytest.mark.vcr()\ndef test_500(client):\n with pytest.raises(APIError):\n # We can't simulate e621 error, so we just use external help.\n client._call_api(\"GET\", \"https://httpstat.us/500\")\n","sub_path":"tests/test_yippisync.py","file_name":"test_yippisync.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440359836","text":"from bookshelf import app\nfrom bookshelf.models import db, Book, Author\nimport unittest\n\n\nclass BooksTests(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n app.config.from_object('bookshelf.config.TestingConfig')\n with app.app_context():\n db.create_all()\n author = Author(name='Test Author')\n book = Book(title='Test Book', author=[author])\n db.session.add(author)\n db.session.add(book)\n db.session.commit()\n\n def tearDown(self):\n self.app = app.test_client()\n app.config.from_object('bookshelf.config.TestingConfig')\n with app.app_context():\n db.drop_all()\n\n def test_home_status_code(self):\n result = self.app.get('/')\n self.assertEquals(result.status_code, 200)\n self.assertIn('Bookshelf test app', result.data)\n\n def test_display_books_status_code(self):\n result = self.app.get('/books/list')\n self.assertEquals(result.status_code, 200)\n self.assertIn('Test Book', result.data)\n\n def test_create_book_status_code_without_login(self):\n result = self.app.post('/books/create')\n self.assertEquals(result.status_code, 302)\n\n def test_update_book_status_code_without_login(self):\n result = self.app.post('/books/update/1')\n self.assertEquals(result.status_code, 302)\n\n def test_delete_book_status_code_without_login(self):\n result = self.app.delete('/books/delete/1')\n self.assertEquals(result.status_code, 302)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"49582424","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 23 11:41:35 2018\n\n@author: wangg\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\nX = tf.placeholder(tf.float32,[None,784])\nY = tf.placeholder(tf.float32,[None,10])\nkeep_prob = tf.placeholder(tf.float32)\n\nLR = 1e-3\ntraining_epochs = 500\ndisplay_step = 10\nbatch_size = 128\ndropout = 0.75\n\nWeights = {\n \"Conv1\": tf.Variable(tf.random_normal([5,5,1,32])),\n \"Conv2\": tf.Variable(tf.random_normal([5,5,32,64])),\n \"FC1\": tf.Variable(tf.random_normal([7*7*64,1024])),\n \"FC2\": tf.Variable(tf.random_normal([1024,10]))\n }\n\nBiases = {\n \"bc1\": tf.Variable(tf.random_normal([32])),\n \"bc2\": tf.Variable(tf.random_normal([64])),\n \"bf1\": tf.Variable(tf.random_normal([1024])),\n \"bf2\": tf.Variable(tf.random_normal([10]))\n }\n\ndef conv2d(x, W, b, strides=1):\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef maxpool2d(x, k=2):\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n\ndef ConvMNIST(x,weights,biases,dropout):\n x = tf.reshape(x,shape=[-1,28,28,1])\n conv1 = conv2d(x,weights[\"Conv1\"],biases[\"bc1\"])\n conv1 = maxpool2d(conv1,k=2)\n conv2 = conv2d(conv1,weights[\"Conv2\"],biases[\"bc2\"])\n conv2 = maxpool2d(conv2,k=2)\n fc1 = tf.reshape(conv2, [-1, weights[\"FC1\"].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1,weights[\"FC1\"]),biases[\"bf1\"])\n fc1 = tf.nn.relu(fc1)\n fc1 = tf.nn.dropout(fc1,dropout)\n fc2 = tf.add(tf.matmul(fc1,weights[\"FC2\"]),biases[\"bf2\"])\n return fc2\n\nlogits = ConvMNIST(X,Weights,Biases,keep_prob)\nprediction = tf.nn.softmax(logits)\n\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\ntrain = tf.train.AdamOptimizer(learning_rate=LR).minimize(loss_op)\n\ncorrect_pred = tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))\n\ninit = tf.global_variables_initializer()\n#\nwith tf.Session() as sess:\n sess.run(init)\n for epochs in range(1,training_epochs+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n sess.run(train,feed_dict={X:batch_x,Y:batch_y,keep_prob: dropout})\n# print('finished')\n if epochs%display_step ==0 or epochs ==1:\n loss = sess.run(loss_op, feed_dict={X:batch_x,Y:batch_y,keep_prob: 1.0})\n acc = sess.run(accuracy, feed_dict={X:batch_x,Y:batch_y,keep_prob: 1.0})\n print(\"Step \" + str(epochs) + \", Minibatch Loss= \" +\"{:.4f}\".format(loss)+\",accuracy=\"\\\n +\"{:.3f}\".format(acc))\n# \n print('Training Finished!')\n test_acc = sess.run(accuracy, feed_dict={X: mnist.test.images,Y: mnist.test.labels,keep_prob: 1.0})\n print(\"Testing result:\"+\"{:.3f}\".format(test_acc))","sub_path":"CNN for MNIST.py","file_name":"CNN for MNIST.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"367351167","text":"from pyrogram import filters\r\nfrom pyrogram.handlers import MessageHandler\r\nimport player\r\nfrom config import SUDO_FILTER\r\nfrom strings import get_string as _\r\n\r\n\r\nasync def pause_resume(client, message):\r\n if player.pause_resume():\r\n await message.reply_text(_(\"paused\"))\r\n else:\r\n await message.reply_text(_(\"nothing_playing_pause\"))\r\n\r\n__handlers__ = [\r\n [\r\n MessageHandler(\r\n pause_resume,\r\n filters.command(\"pause\", \"/\")\r\n & SUDO_FILTER\r\n )\r\n ]\r\n]\r\n","sub_path":"handlers/pause_resume.py","file_name":"pause_resume.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"194963019","text":"from search import models\nimport json\nimport elasticsearch\n\n\n# elasticsearch --config=/usr/local/opt/elasticsearch/config/elasticsearch.yml\n\nes = elasticsearch.Elasticsearch()\n\nfor v in models.Venue.objects.all():\n v_dict = {}\n v_dict['name'] = v.name\n v_dict['location'] = json.loads(v.location)\n\n if v.menu:\n v_dict['menu'] = json.loads(v.menu)\n\n v_dict['stats'] = json.loads(v.stats)\n v_dict['categories'] = json.loads(v.categories)\n\n tips = v.tip_set.all()\n tips_dict = [t.as_dict() for t in tips]\n v_dict['tips'] = tips_dict\n\n es.index(index='4sreviews', doc_type='venues', id=v.venue_id, body=v_dict)\n","sub_path":"web/import_elasticsearch.py","file_name":"import_elasticsearch.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"347451676","text":"'''\nThis file will solve the sudoku puzzle using depth first search, The approach to solve \nthe sudoku problem is that every row, column and quadrant must have a unique value for 1-9 (no duplicates)\nin it. Thus, this means that the sum of every row, column and quadrant must be the same. for example in \na 9x9 (sum must be 45). In the file, the solver is using brute force (backtracking) to search through the board\nthus it try possible solutions to solve the grid and once it reached an invalid value, it will backtrack and \nrecursively try to solve itself. Note: a checking function is also added to ensure that the sum rule will\nwork for every case (i.e it will ensure that no duplicates are entered in the sudoku board)\n\nlink to documentation on using sum solving technique: https://en.wikipedia.org/wiki/Killer_sudoku#The_Rule_of_45\n'''\nimport time\n\nclass Sudoku(object):\n\n def __init__(self, initial):\n self.initial = initial \n self.size = len(initial) # Defines board type, ex. 9x9\n self.height = int(self.size**0.5) # Defines height of quadrant ex. 3 for 9x9\n\n def check_if_valid(self, state):\n '''\n Checks whether or not the values entered from the gui are valid i.e no duplicate values\n\n Returns: True or False \n '''\n # Check rows and columns \n for row in range(self.size):\n #elminates all the zeros in the row\n row_check = [x for x in state[row] if x is not 0]\n #check if there are duplicate values in the row \n if len(row_check)!=len(set(row_check)):\n return False \n col_checker = []\n for column in range(self.size):\n #don't want to add 0 to our list\n if state[column][row] == 0:\n pass\n #check if we already seen the value before -- indicates duplicate\n elif state[column][row] in col_checker:\n return False \n #never seen before, add to our checker \n else:\n col_checker.append(state[column][row])\n # Check quadrants\n for column in range(0,self.size,int(self.size**0.5)):\n for row in range(0,self.size,self.height):\n checker = []\n for block_row in range(0,self.height):\n for block_column in range(int(self.size**0.5)):\n #don't want to add 0 to our list\n if state[row + block_row][column + block_column] == 0:\n pass\n #check if we already seen the value before -- indicates duplicate\n elif state[row + block_row][column + block_column] in checker:\n print(\"not passing block\")\n return False\n #never seen before, add to our checker \n else: \n checker.append(state[row + block_row][column + block_column])\n return True\n\n def goal(self, state):\n '''\n The board with the solutions or the \"goal\" board, since the contraints in sudoku is that in each row, \n column and quadrant there must unique digits ex. on a 9x9 a row must contain all digits 1-9. Thus this means that\n the sum of the quadrants, row, columns must be the same ex. 9x9, sum= 45. So using this we can find whether\n a board is \"correct\" or incorrect without having to know the exact solution to the board\n\n Returns: True, if the sum is satisfied for all rows, columns and quadrants else:returns False\n '''\n # Expected sum of each row, column or quadrant.\n total = sum(range(1, self.size+1))\n\n # Check rows and columns \n for row in range(self.size):\n #sum of row is invalid or row contains empty values\n if (len(state[row]) != self.size) or (sum(state[row]) != total):\n return False\n #counter\n column_total = 0\n for column in range(self.size):\n #sums the column \n column_total += state[column][row]\n #column value is invalid\n if (column_total != total):\n return False\n\n # Check quadrants\n for column in range(0,self.size,int(self.size**0.5)):\n for row in range(0,self.size,self.height):\n #counter\n block_total = 0\n for block_row in range(0,self.height):\n for block_column in range(int(self.size**0.5)):\n #adds/sums the values in the quadrant\n block_total += state[row + block_row][column + block_column]\n #quadrant values are invalid\n if (block_total != total):\n return False\n #passed all conditions, boards is \"correct\"\n return True\n\n def get_values(self, values, used):\n '''\n Gets the set of valid numbers from values that are not currently in use\n\n Returns: set of valid numbers\n '''\n return [number for number in values if number not in used]\n\n def get_empty_spot(self, grid, state):\n '''\n Get the first empty spot on the grid that is empty (marked with 0)\n\n Returns: position of empty spot on grid \n '''\n for row in range(grid):\n for col in range(grid):\n #found an empty spot\n if state[row][col] == 0:\n return row, col\n\n def get_row_values(self, state, row):\n '''\n filter the valid value based on row \n ex. if a row was filled with values 1,2,3,4,5 then valid values would be 6,7,8,9\n\n Returns: valid values in row \n '''\n num_set = range(1, self.size+1) # Defines set of valid numbers that can be placed on board\n #finding the values in use\n in_row = [number for number in state[row] if (number != 0)] \n #get values that are not in the row\n valid_values = self.get_values(num_set, in_row)\n return valid_values\n\n def get_col_values(self, valid_values, state, column):\n '''\n filter the valid value based on column\n ex. if a column was filled with values 1,2,3,4,5 then valid values would be 6,7,8,9\n\n Returns: valid values in a column\n '''\n in_column = [] # List of valid values in spot's column\n for column_index in range(self.size):\n #get the values already in the column\n if state[column_index][column] != 0:\n in_column.append(state[column_index][column])\n #get the values that are not in the column\n valid_values = self.get_values(valid_values, in_column)\n return valid_values\n\n # Filter valid values based on quadrant\n def get_quad_values(self, valid_values, state, row, column):\n '''\n filter the valid value based on quadrants\n ex. if a column was filled with values 1,2,3,4,5 then valid values would be 6,7,8,9\n\n Returns: valid values in a quadrant\n '''\n in_quad = [] \n #row where the quadrant begins (so for ex row_start will be either 0, 3, 6 in a 9x9 grid)\n row_start = int(row/self.height)*self.height\n #column where quadrant begins (so for ex col_start will be either 0, 3, 6 in a 9x9 grid)\n col_start = int(column/int(self.size**0.5))*int(self.size**0.5)\n for quad_row in range(0, self.height):\n #get the the values in use in the quadrant\n for quad_col in range(int(self.size**0.5)):\n in_quad.append(state[row_start + quad_row][col_start + quad_col])\n #get the values that are not in the quadrant\n valid_values = self.get_values(valid_values, in_quad)\n return valid_values \n\n def get_options(self, state, row, column):\n '''\n Get the valid values that satisfy row, column and quadrant conditions\n\n Returns: valid values\n '''\n options = self.get_row_values(state, row)\n options = self.get_col_values(options, state, column)\n options = self.get_quad_values(options, state, row, column)\n\n return options \n\ndef backtrack(board):\n solution = Sudoku(board)\n #found the solution \n if solution.goal(board):\n return board\n # Get first empty spot\n row,col = solution.get_empty_spot(solution.size, board)\n # Get spot's viable options\n options = solution.get_options(board, row, col)\n\n for i in options:\n board[row][col] = i # Try option\n # Recursively fill in the board\n if backtrack(board):\n #found solution\n return board \n else:\n board[row][col] = 0 #backtracks\n\n return None\n\ndef Solve(board):\n '''\n Solves the Sudoku Board using backtracking and prints the time to solve on the terminal \n\n Returns: None\n '''\n check = Sudoku(board)\n #checks if the initial board we are starting with is valid\n if not check.check_if_valid(board):\n return False\n\n print (\"Solving using Backtracking\")\n start_time = time.time()\n solution = backtrack(board)\n elapsed_time = time.time() - start_time\n\n if solution:\n print (elapsed_time)\n return solution \n else:\n return False\n\n#########FOR THE GUI TO BE ABLE TO READ THE SOLUTION#########################\n\ndef convert(size,board):\n '''\n Converts a larger list containing the values of the sudoku board \n to a list of sublist containing the values of the sudoku board (ex. [1,2,3,4] -> [[1,2], [3,4]]) \n\n Returns: original list as a list of sublist\n '''\n return [board[x:x+size] for x in range(0, size**2, size)]\n\ndef send(size,rows,cols,grid):\n '''\n Converts the list(grid of solutions) into a readable form to be \n read by the gui so it can be displayed on the gui \n\n Returns: list will filled solutions\n '''\n #convert the board received from gui into a readble form for our DFS solver\n board = convert(size,grid)\n #find the solution\n solution = Solve(board)\n if solution == False:\n return None\n #reconvert board(with the solution) into the original form(format) from the gui\n s = {}\n #~ cols = '123456789' #the x-axis of the sudoku grid\n #~ rows = 'ABCDEFGHI' #the y-axis of the sudoku grd\n for i in range(size):\n for j in range(size):\n s[rows[i]+cols[j]] = solution[i][j]\n #return the gui readable solution\n return s\n\n","sub_path":"backtracking_solve.py","file_name":"backtracking_solve.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"468141698","text":"from keras.models import Model\nfrom keras.layers import Dense,Input,LSTM,Bidirectional,Activation,Conv1D,GRU\nfrom keras.layers import Dropout,Embedding, MaxPooling1D, Flatten\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\nfrom keras.optimizers import Adam\n\n\nfrom keras.preprocessing import text, sequence\nimport numpy as np\n\ndef get_model(opt): \n inp = Input(shape=(opt['maxlen'], ))\n x = Embedding(opt['vocab_size'], opt['embedding_size'],\n weights=[opt['embedding_matrix']], trainable=opt['train_embed'])(inp)\n x = SpatialDropout1D(opt['embed_dropout'])(x)\n\n x = Bidirectional(GRU(opt['hidden_size'], return_sequences=True,\n dropout=opt['dropout'],recurrent_dropout=opt['dropout']))(x)\n x = Conv1D(opt['num_filters'], kernel_size=3, padding=\"valid\", kernel_initializer=\"glorot_uniform\")(x)\n avg_pool = GlobalAveragePooling1D()(x)\n max_pool = GlobalMaxPooling1D()(x)\n x = concatenate([avg_pool, max_pool]) \n # x = Dense(128, activation='relu')(x)\n # x = Dropout(opt['dropout'])(x)\n\n outp = Dense(opt['nb_classes'], activation=\"softmax\")(x)\n \n model = Model(inputs=inp, outputs=outp)\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=opt['lr']),\n metrics=['accuracy'])\n model.summary()\n return model\n\n\ndef main():\n opt = { 'maxlen': 50,\n 'vocab_size': 100000,\n 'embedding_size': 100,\n 'train_embed': True,\n 'embed_dropout': 0.1,\n 'dropout': 0.1,\n 'hidden_size': 128,\n 'num_filters': 128,\n 'nb_classes': 5,\n 'lr': 1e-3\n }\n\n opt['embedding_matrix'] = np.zeros((opt['vocab_size'], opt['embedding_size']))\n model = get_model(opt)\n\n\nif __name__ == '__main__':\n main()","sub_path":"sentiment_analysis/models/BiGruWithConv.py","file_name":"BiGruWithConv.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"594231742","text":"#!/usr/bin/env python\nfrom receptionist import Receptionist\nimport sys\nfrom RPi import ReceptionPi\nfrom UserDB import UserDB\nfrom FacialRecognition_Drive import FaceRecognition_Drive\n\n\nclass App:\n \"\"\"\n This class shows the Library Management System\n Menu options as well as ensures basic functionalities.\n\n ...\n\n Methods\n -------\n menu()\n This method shows and runs the main functionalities \n of the Library Managment System consol application.\n\n \"\"\"\n\n def menu(self):\n re = Receptionist()\n drive = FaceRecognition_Drive()\n receptionPi = ReceptionPi()\n print(\"\"\"\n =====================================================\n * * * * * * \n * * * * * * * \n * * * * * * \n * * * * * *\n * * * * * * *\n * * * * * * * * * * *\n ======================================================\n \"\"\")\n while True:\n print(\" ************MAIN MENU**************\")\n choice = input(\"\"\"\n 1: Register\n 2: Log in using username and passward\n 3: Log in using face recognation\n 4: Quit/Log Out\n\n Please enter your choice: \"\"\")\n\n if choice == '1':\n re.register()\n elif choice == '2':\n name = re.login()\n if name is not False:\n receptionPi.login(name)\n\n elif choice == '3':\n data = drive.face_Recognation()\n receptionPi.login(name)\n elif choice == '4':\n sys.exit()\n else:\n print(\"You must only select either 1,2,3 or 4.\")\n print(\"Please try again\")\n\n\nif __name__ == \"__main__\":\n App().menu()\n","sub_path":"RPi/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114540694","text":"import numpy as np\nfrom scipy.interpolate import bisplrep, bisplev\nfrom glob import glob\nfrom blobFinder import BlobFinder\n\nclass Calibrate:\n def __init__(self, fname, grid_size):\n \"\"\"\n fname: Filepath to directory with calibration pictures\n \n The pictures names need to follow the convention:\n cal*.jpg, where * is an int starting from 0. Each picture increments\n the location of the object on the x-axis by one unit until grid_size, where\n the y-axis is increased and the x-axis resets.\n\n i.e. for grid_size 3 and 6 pictures\n cal0: 0,0\n cal1: 1,0\n cal2: 2,0\n cal3: 0,1\n cal4: 1,1\n cal5: 2,1\n\n There must also be an image called bg.jpg, which is the background image that will\n be used throughout.\n \"\"\"\n imageFnames = glob(fname+'/cal*.jpg')\n tableCoords = []\n blobs = []\n for imgName in imageFnames:\n start = imgName.rfind('cal')\n number = int(imgName[start+3:-4])\n tableCoords.append((number%grid_size,number//grid_size))\n\n blob = BlobFinder(imgName,fname+'/bg.jpg').findBlob()\n blobs.append(blob)\n\n chart = Chart(tableCoords, blobs)\n\n return chart\n\nclass Chart:\n def __init__(self, tableCoords, blobs):\n \"\"\"\n tableCoords: List of 2-tuples of x,y coordinates\n blobs: Matching list of initialized blob objects\n \"\"\"\n tableXs = np.array([_[0] for _ in tableCoords])\n tableYs = np.array([_[1] for _ in tableCoords])\n\n blobXs = np.array([_.getLocation()[0] for _ in blobs])\n blobYs = np.array([_.getLocation()[1] for _ in blobs])\n\n self.tableToBlobTckX = bisplrep(tableXs,tableYs,blobXs)\n self.tableToBlobTckY = bisplrep(tableXs,tableYs,blobYs)\n\n self.blobToTableTckX = bisplrep(blobXs,blobYs,tableXs)\n self.blobToTableTckY = bisplrep(blobXs,blobYs,tableYs)\n\n def blobToTable(self, x, y):\n tableX = bisplev(x, y, self.blobToTableTckX)\n tableY = bisplev(x, y, self.blobToTableTckY)\n return tableX, tableY\n\n def tableToBlob(self, x, y):\n blobX = bisplev(x, y, self.tableToBlobTckX)\n blobY = bisplev(x, y, self.tableToBlobTckY)\n return blobX, blobY","sub_path":"calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561597688","text":"import random\r\nfrom typing import List, Tuple\r\n\r\nfrom Classes.Board import Board\r\nfrom Classes.Delta import Delta\r\nfrom Classes.Pos2D import Pos2D\r\nfrom Classes.Square import Square\r\nfrom Enums.GamePhase import GamePhase\r\nfrom Enums.PlayerColor import PlayerColor\r\n\r\n\r\nclass IDSAgent:\r\n\r\n # When playing massacre, we don't want to sacrifice our own pieces to kill\r\n # enemy pieces. Therefore, weigh white pieces higher than black pieces.\r\n _WHITE_WEIGHT: float = 1.1\r\n _BLACK_WEIGHT: float = 1.0\r\n # How much to weigh the manhattan distance. We only want to weigh the\r\n # manhattan distance enough for it to play a role when there are no options\r\n # to kill an enemy piece, hence the low weight.\r\n _DIST_WEIGHT: float = 0.001\r\n # We really don't want to repeat board states. When a repeat board is\r\n _REPEAT_BOARD: int = -99999\r\n # Rating-decimal place rounding. Used to prevent floating point imprecision\r\n # from interfering with move decisions.\r\n _RATING_NUM_ROUNDING: int = 10\r\n # The length of the history of recent board states the agent can remember.\r\n # Used to avoid repeat moves. Can be quiet high, as each board (being\r\n # represented by a string) does not take much memory.\r\n _BOARD_HISTORY_MEMORY: int = 512\r\n\r\n # A reference to the current board that the agent is on.\r\n _board: Board\r\n # The depth to go in each iteration of the iterative-deepening search\r\n # algorithm i.e. number of moves to look ahead.\r\n _depth: int\r\n # A list of recent boards. Each board is stored in its string form, for\r\n # memory and efficiency purposes.\r\n # This will allow us to check if a move results in a previous board, and\r\n # therefore not perform that move, avoiding\r\n # endless loops in the process.\r\n _recent_board_history: List[str] = []\r\n\r\n def __init__(self, start_board: Board, depth: int, seed: int = None):\r\n self._board = start_board\r\n self._depth = depth\r\n if (seed is not None):\r\n random.seed(seed)\r\n\r\n def massacre(self):\r\n \"\"\"\r\n This method runs the algorithm until all enemy pieces are killed (or the\r\n game is over otherwise). This method uses a variant of iterative\r\n deepening search. All moves within '_DEPTH' + 1 levels will be explored,\r\n at which point the most promising move will be made. We then, again,\r\n evaluate all possible moves within '_DEPTH' + 1 levels, perform the\r\n best-rated move, etc etc.\r\n \"\"\"\r\n\r\n # While the game is not finished, perform the algorithm.\r\n while (self._board.phase != GamePhase.FINISHED):\r\n # Evaluate all of the possible moves from this board.\r\n deltas: List[Delta] = self._board.get_all_possible_deltas(PlayerColor.WHITE)\r\n # Shuffling the calculated deltas can assist in avoiding endless\r\n # loops.\r\n random.shuffle(deltas)\r\n\r\n # Get the best move to perform.\r\n best_delta: Delta = \\\r\n IDSAgent.get_best_delta(self._board, PlayerColor.WHITE, self._depth,\r\n self._recent_board_history)[0]\r\n\r\n # Before performing the move, save the current board into the recent\r\n # boards history list.\r\n self._recent_board_history = \\\r\n ([str(self._board)]\r\n + self._recent_board_history[:IDSAgent._BOARD_HISTORY_MEMORY])\r\n\r\n # Perform the move, replacing the reference to the old board with\r\n # the new one.\r\n self._board = self._board.get_next_board(best_delta)\r\n\r\n # Print the delta/move made.\r\n print(best_delta)\r\n\r\n @staticmethod\r\n def get_board_ratings(board: Board, depth: int,\r\n recent_board_history: List[str]) -> List[float]:\r\n \"\"\"\r\n Returns a list of ratings for the given board of size 'depth' + 1. For\r\n example, if this function returns this list: [2.2, 1.6, 1.7], that means\r\n that the given board is rated 1.7, the best board state from the given\r\n board is rated 1.6, and the best board state from that 1.6 board is\r\n rated 2.2. i.e. the list of ratings corresponds to levels as so:\r\n [depth, depth - 1, depth - 2, ... 0] where 0 is the given/current\r\n board's rating. By returning a list of ratings, we can better compare\r\n series of moves, particularly a series which results in the same state,\r\n but by different means e.g. 'kill enemy piece at (2, 2) then move to\r\n (2, 1)' vs 'move to (2, 1) then kill enemy piece at (2, 2)'. In this\r\n scenario, we can prioritize the former, thanks to the list of ratings\r\n being returned. recent_board_history is list of boards represented by\r\n strings used to avoid repeating previous board states, avoiding endless\r\n loops in the process.\r\n \"\"\"\r\n\r\n # If the current board has been explored in recent board history, return\r\n # the appropriate rating to discourage its selection in a list e.g.\r\n # [-99999].\r\n if (str(board) in recent_board_history):\r\n return [IDSAgent._REPEAT_BOARD]\r\n\r\n # If we're at the end of our search, either due to depth being equal to\r\n # 0 or the board being marked as finished, simply return the heuristic\r\n # value of the board in a list.\r\n if (depth == 0 or board.phase == GamePhase.FINISHED):\r\n return [IDSAgent.get_heuristic_value(board)]\r\n\r\n # Evaluate all possible moves from this board and keep track of the best\r\n # one.\r\n best_rating = IDSAgent.get_best_delta(board, PlayerColor.WHITE, depth,\r\n recent_board_history)[1]\r\n\r\n # Return the list of ratings from the best move and attach this given\r\n # board's rating onto the end.\r\n return best_rating + [IDSAgent.get_heuristic_value(board)]\r\n\r\n @staticmethod\r\n def get_best_delta(board: Board, player: PlayerColor, depth: int,\r\n recent_board_history: List[str]) \\\r\n -> Tuple[Delta, List[float]]:\r\n \"\"\"\r\n Returns the highest-rated (or best) move from the current board for the\r\n given player, exploring 'depth' number of levels to determine the best\r\n move. recent_board_history helps avoid repeating board states. Along\r\n with the delta object for the best move, also returns a list of floats,\r\n containing the ratings for the series of moves used to rate the returned\r\n delta. This list is more thoroughly explained in the docs for\r\n 'get_board_ratings'.\r\n \"\"\"\r\n # Evaluate all of the possible moves from this board.\r\n deltas: List[Delta] = board.get_all_possible_deltas(player)\r\n # Shuffling the calculated deltas can assist in avoiding endless loops,\r\n # particularly if board states are being repeated.\r\n random.shuffle(deltas)\r\n\r\n # Iterate through every valid move, rating them and keeping track of the\r\n # best move along the way.\r\n best_delta: Tuple[Delta, List[float]] = (None, [-999999])\r\n for delta in deltas:\r\n delta_ratings: List[float] = \\\r\n IDSAgent.get_board_ratings(board.get_next_board(delta),\r\n depth - 1, recent_board_history)\r\n\r\n # This \"max\" criteria defined by the lambda looks a bit complex, so\r\n # let's explain. Keep in mind that floats further to the left in a\r\n # given list represents the rating of a board further down the game\r\n # three. When finding the best move sequence (from the current\r\n # board's perspective), we prioritize moves that result in the best\r\n # score at its furthest down board state i.e. [5 1 1] is better than\r\n # [1 9 9], because three moves down, it will have a board rated 5 vs\r\n # the other's board which is rated 1. In the event of this first\r\n # value being equal, we prefer the shortest list i.e. [5 2] is\r\n # better than [5 3 2]. This is because the shorter list means that\r\n # the game will be over sooner (but still have a board rating as\r\n # good as the longer list). This will help the algorithm execute\r\n # killing moves in 'Massacre' and not put them off by doing an\r\n # inconsequential move first. Finally, if the lengths are the same,\r\n # we prioritize the list with the highest rating at any given index\r\n # i.e. [5 3 3] is better than [5 3 2] because it means that we're\r\n # doing the moves that will keep the board's rating as high as\r\n # possible (again, only if the comparison gets to this point).\r\n # Sorted example according to this criteria:\r\n # [4 3 2] > [3 1] > [3 2 2] > [3 2 1] > [-999].\r\n best_delta = max([best_delta, (delta, delta_ratings)],\r\n key=lambda x: (x[1][0], -len(x[1]), x[1]))\r\n\r\n return best_delta\r\n\r\n @staticmethod\r\n def get_heuristic_value(board: Board):\r\n \"\"\"\r\n Given a board, calculates and returns its rating based on heuristics.\r\n \"\"\"\r\n # Get a list of all squares with white pieces and a list of squares with\r\n # black pieces.\r\n white_squares: List[Square] = board.get_player_squares(PlayerColor.WHITE)\r\n black_squares: List[Square] = board.get_player_squares(PlayerColor.BLACK)\r\n\r\n # If there are any black pieces, calculate the sum of all white pieces'\r\n # manhattan displacement to the first black piece in the list. This\r\n # piece will remain consistent until it is dead. This fixes the issue of\r\n # white pieces, when separated from the black pieces, not being able to\r\n # find their way to the black pieces easily.\r\n manhattan_dist_sum: int = 0\r\n if (len(black_squares) > 0):\r\n black_square: Square = black_squares[0]\r\n for white_square in white_squares:\r\n displacement: Pos2D = (black_square.pos - white_square.pos)\r\n manhattan_dist_sum += abs(displacement.x) + abs(displacement.y)\r\n\r\n # Calculate the number of white and black pieces. This is a very\r\n # important heuristic that will help prioritize preserving white's own\r\n # pieces and killing the enemy's black pieces.\r\n num_white_pieces: int = len(white_squares)\r\n num_black_pieces: int = len(black_squares)\r\n\r\n # Return the heuristic rating by using the appropriate weights.\r\n return round(IDSAgent._WHITE_WEIGHT * num_white_pieces\r\n - IDSAgent._BLACK_WEIGHT * num_black_pieces\r\n - IDSAgent._DIST_WEIGHT * manhattan_dist_sum,\r\n IDSAgent._RATING_NUM_ROUNDING)\r\n","sub_path":"Part B/Classes/Agents/IDSAgent.py","file_name":"IDSAgent.py","file_ext":"py","file_size_in_byte":10921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161608882","text":"'''\nCreated on Dec 26, 2014\n\n@author: dsingh\n'''\nfrom TreesAndGraphs.__init__ import errorCodes\n\nclass binarySearchTree():\n '''\n classdocs\n '''\n INITIALVALUE = 9999\n\n def __init__(self,value):\n '''\n Constructor\n '''\n self.value = value\n self.leftChild = None\n self.rightChild = errorCodes.NULL\n \n def searchNode(self,currentNode,value):\n if currentNode == errorCodes.NULL:\n print('No node with value', value,'is found')\n return errorCodes.NULL\n \n currentNodeValue = currentNode.value\n \n if currentNodeValue == value:\n print('Node found')\n return currentNode\n \n if value < currentNodeValue:\n currentNode = currentNode.leftChild\n elif value > currentNodeValue:\n currentNode = currentNode.rightChild\n \n self.searchNode(currentNode,value)\n \n def addNode(self,value):\n currentNode = self\n parentNode = self\n \n while not currentNode == errorCodes.NULL:\n parentNode = currentNode\n if value > parentNode.value:\n currentNode = parentNode.rightChild\n elif value < parentNode.value:\n currentNode = parentNode.leftChild\n else:\n print('Node already exists!!')\n return\n \n \n newNode = binarySearchTree(value)\n if value > parentNode.value:\n parentNode.rightChild = newNode\n else:\n parentNode.leftChild = newNode\n \n def inOrderTraversal(self,node):\n if node == errorCodes.NULL:\n return\n \n self.inOrderTraversal(node.leftChild)\n print(node.value,'<-->')\n self.inOrderTraversal(node.rightChild)\n \n \n \n \n \n \n ","sub_path":"pythonPrograms.src/TreesAndGraphs/binaryTree.py","file_name":"binaryTree.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"352228370","text":"import time\nimport telebot as tb\nfrom lxml import etree\nfrom DataSearch import utility as ut\nfrom ChatBot import dictionary as cc\n\n\ntoken = '706415631:AAG1Y6sfLmvxU_TENOaVwGA3hzXdaGJiaWo'\npathOfPhoto = 'C:/Users\\linuk\\Downloads\\Staedel_Teilset\\Abbildungen_Teilset/'\npathOfDataset = 'C:/Users\\linuk\\Downloads\\Staedel_Teilset/Objekte.xml'\npathOfGene = 'C:/Users\\linuk\\Workspace\\HeyDr.Jo\\src/ChatBot/generatedDataSet.xml'\ntree = etree.parse(pathOfDataset)\nroot = tree.getroot()\n__currentrecord__ = None\n__knowInfo__ = 0\n__artistName__ =''\n__style__ = ''\n\ntreeGene = etree.parse(pathOfGene)\nrootGene = treeGene.getroot()\ndictionary = cc.dict\nbot = tb.TeleBot(token)\n\n\ndef get_semantic(text,dict):\n for dd in dict:\n if text in dict[dd]:\n return dd\n\n\ndef get_from_data(command,rootAll,rootGene):\n return ut.get_start_info(command,rootAll)\n\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, u\"Dear customer, I am Dr. Jo! \"\n u\"\\nToday I will be your museum guide \"\n u\"and provide you some professional and interesting information about our art objects! \"\n u\"\\nWhich object are you currently looking at or interested in? \")\n\n\n@bot.message_handler(content_types='text')\ndef get_input(message):#List):\n try:\n global __currentrecord__, __knowInfo__, __artistName__, __style__\n chatid = message.chat.id\n #if message.text.upper() == 'YES' and __knowInfo__ == 1:\n # detail_Info, dict = ut.get_details(__currentrecord__)\n # __artistName__ = dict['artist']\n # __style__ = dict['style']\n # bot.reply_to(message, detail_Info)\n #chatid = message.chat.id\n # __knowInfo__=2\n #return\n if message.text.upper()=='YES' and __knowInfo__== 2:\n chatid = message.chat.id\n bot.send_message(chatid,u'What would you like to know, ' \\\n ' introductions about the artist or style or some related objects of this object in our museum?' \\\n '\\n(artist,style,related objects)')\n __knowInfo__=3\n return\n elif message.text.upper()==\"ARTIST\" and __knowInfo__==3:\n chatid=message.chat.id\n bot.send_message(chatid, ut.search_artist_xml(__artistName__,rootGene) )\n __knowInfo__ = 2\n bot.send_message(chatid, u'\\n\\n\\nWould you like to know about the style or '\n u'related objects of this object in our museum? ')\n return\n elif message.text.upper()==\"STYLE\" and __knowInfo__==3 :\n chatid = message.chat.id\n bot.send_message(chatid,ut.search_style_xml(__style__,rootGene))\n __knowInfo__ = 2\n bot.send_message(chatid, u'\\n\\n\\nWould you like to know about the artist or '\n u'related objects of this object in our museum? ')\n return\n\n elif message.text.upper()==\"RELATED OBJECTS\" and __knowInfo__==3:\n chatid = message.chat.id\n bot.send_message(chatid, u'still working, Coming Soon...')\n __knowInfo__= 2\n bot.send_message(chatid, u'\\n\\n\\nWould you like to know about the style or '\n u'artist of this object? ')\n return\n\n elif message.text.upper() == 'NO':\n bot.send_message(chatid, u'Please give the number or the name of your interested object!')\n __knowInfo__ = 0\n return\n else:\n print(str(__knowInfo__))\n #for message in messageList:\n title, artist, period, refnum,record = get_from_data(message.text, root, rootGene)\n __artistName__ = artist\n __currentrecord__ = record\n detail_Info, dict = ut.get_details(__currentrecord__)\n __style__ = dict['style']\n bot.reply_to(message, 'Title: \\n%s\\n\\nCreator: \\n%s\\n\\nCreated period: \\n%s\\n\\n\\n%s'%(title,artist,period,detail_Info))\n bot.send_message(chatid, u'Sending photo... Please wait')\n photo = open(pathOfPhoto+refnum+'.png','rb')\n bot.send_photo(chatid,photo)\n bot.send_message(chatid, u'Should I introduce more information about the artist or style of this object?')\n __knowInfo__ =2\n return\n except (AttributeError):\n bot.send_message(chatid,'Sorry I don\\'t understand!')\n\n\nif __name__ == '__main__':\n #bot.set_update_listener(get_input)\n while True:\n bot.polling(none_stop=True)\n time.sleep(1)\n","sub_path":"src/ChatBot/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"260608628","text":"#!/usr/bin/python3\n\nimport subprocess\nimport sys\nimport ipaddress\nfrom datetime import datetime\n\n# Define main \ndef main(ip_target,cdir):\n if len(cdir)<1:\n cdir=\"/29\"\n #Creating an the range for the ip\n ip_range= str(ip_target) + cdir\n net = list(ipaddress.ip_network(ip_range,False).hosts())\n \n # IP on the network to fping\n for ip in net:\n action = \"fping -a -C 5 -q \" + str(ip)\n # worked with Ricky and Alf\n try:\n results = subprocess.check_output(action,stderr=subprocess.STDOUT,shell=True)\n results_split = results.split(b\":\")\n ip = str(results_split[0].decode('UTF-8').strip())\n time = str(results_split[1].decode('UTF-8').strip())\n ip_list.append(ip)\n response = ip + \" is detected online. Response time: \" + time\n #print(response)\n except subprocess.CalledProcessError as e:\n error = e.output \n # Print Results.\n print(\"Detected Hosts:\")\n print(\"==============\")\n \n \n for ip in ip_list:\n print(ip)\n time_elapsed = datetime.now() -start_time\n print('Time Elapsed: (hh:mm:ss.ms) {}'.format(time_elapsed))\n# help from Alf \nif __name__=='__main__':\n # targets and times\n targets = sys.argv\n targets.pop(0)\n target = targets[0]\n ip_list =[]\n start_time = datetime.now()\n \n if len(targets) > 1:\n cdir = targets[1]\n main(target,cdir)\n else:\n main(target,\"/24\")","sub_path":"pysweep.py","file_name":"pysweep.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"166375520","text":"import pyttsx3\nimport datetime\nimport time\nimport speech_recognition as sr\nimport pyautogui\nimport pyaudio\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\n# engine.setProperty('volume',0.5)\nengine.setProperty('rate', 155)\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\ndef downmouse():\n time.sleep(1)\n pyautogui.moveRel(0, 100, duration=0.1)\n\ndef rightmouse():\n time.sleep(1)\n pyautogui.moveRel(100, 0, duration=0.1)\ndef leftmouse():\n time.sleep(1)\n pyautogui.moveRel(-100,0,duration=0.1)\ndef upmouse():\n time.sleep(1)\n pyautogui.moveRel(0,-100,duration=0.1)\ndef rightclick():\n pyautogui.click(button='right')\ndef leftclick():\n pyautogui.click(button='left')\ndef takeCommand():\n # It takes microphone input from the usr and return string output\n\n r = sr.Recognizer()\n with sr.Microphone() as source2:\n print(\"Listening...\")\n r.adjust_for_ambient_noise(source2, duration=1)\n r.pause_threshold = 2\n\n audio = r.listen(source2)\n print(\"running\")\n\n try:\n print(\"recognising...\")\n querry = r.recognize_google(audio, language='en-in')\n print(f\"User said : {querry}\\n\")\n except Exception as e:\n print(\"Say that again please...\")\n return 'None'\n return querry\nif __name__ == '__main__':\n while True:\n querry1 = list(takeCommand().split())\n # print(querry1)\n flag=False\n for i in querry1:\n # print(i)\n if i==\"left\":\n leftmouse()\n elif i==\"right\":\n rightmouse()\n elif i==\"up\":\n upmouse()\n elif i==\"down\":\n downmouse()\n elif i==\"leftclick\":\n leftclick()\n elif i==\"rightclick\":\n rightclick()\n elif i==(\"stop\" or \"exit\" or \"quit\"):\n flag=True\n break\n if flag==True:\n break\n","sub_path":"mousefunction.py","file_name":"mousefunction.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619212216","text":"import os\nimport unittest\nimport vtk, qt, ctk, slicer\nimport math\nimport sys\n\n#\n# AstroMomentMapsSelfTest\n#\n\nclass AstroMomentMapsSelfTest:\n def __init__(self, parent):\n parent.title = \"Astro MomentMaps SelfTest\"\n parent.categories = [\"Testing.TestCases\"]\n parent.dependencies = [\"AstroVolume\"]\n parent.contributors = [\"\"\"\n Davide Punzo (Kapteyn Astronomical Institute) and\n Thijs van der Hulst (Kapteyn Astronomical Institute).\"\"\"]\n parent.helpText = \"\"\"\n This module was developed as a self test to perform the operations needed for generating moment maps.\n \"\"\"\n parent.acknowledgementText = \"\"\"\n\"\"\" # replace with organization, grant and thanks.\n self.parent = parent\n\n # Add this test to the SelfTest module's list for discovery when the module\n # is created. Since this module may be discovered before SelfTests itself,\n # create the list if it doesn't already exist.\n try:\n slicer.selfTests\n except AttributeError:\n slicer.selfTests = {}\n slicer.selfTests['Astro MomentMaps SelfTest'] = self.runTest\n\n def runTest(self):\n tester = AstroMomentMapsSelfTestTest()\n tester.runTest()\n\n#\n# qAstroMomentMapsSelfTestWidget\n#\n\nclass AstroMomentMapsSelfTestWidget:\n def __init__(self, parent = None):\n if not parent:\n self.parent = slicer.qMRMLWidget()\n self.parent.setLayout(qt.QVBoxLayout())\n self.parent.setMRMLScene(slicer.mrmlScene)\n else:\n self.parent = parent\n self.layout = self.parent.layout()\n if not parent:\n self.setup()\n self.parent.show()\n\n def setup(self):\n # Instantiate and connect widgets ...\n\n # reload button\n # (use this during development, but remove it when delivering\n # your module to users)\n self.reloadButton = qt.QPushButton(\"Reload\")\n self.reloadButton.toolTip = \"Reload this module.\"\n self.reloadButton.name = \"AstroMomentMapsSelfTest Reload\"\n self.layout.addWidget(self.reloadButton)\n self.reloadButton.connect('clicked()', self.onReload)\n\n # reload and test button\n # (use this during development, but remove it when delivering\n # your module to users)\n self.reloadAndTestButton = qt.QPushButton(\"Reload and Test\")\n self.reloadAndTestButton.toolTip = \"Reload this module and then run the self tests.\"\n self.layout.addWidget(self.reloadAndTestButton)\n self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n def cleanup(self):\n pass\n\n def onReload(self,moduleName=\"AstroMomentMapsSelfTest\"):\n \"\"\"Generic reload method for any scripted module.\n ModuleWizard will subsitute correct default moduleName.\n \"\"\"\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)\n\n def onReloadAndTest(self,moduleName=\"AstroMomentMapsSelfTest\"):\n self.onReload()\n evalString = 'globals()[\"%s\"].%sTest()' % (moduleName, moduleName)\n tester = eval(evalString)\n tester.runTest()\n\n#\n# AstroMomentMapsSelfTestLogic\n#\n\nclass AstroMomentMapsSelfTestLogic:\n \"\"\"This class should implement all the actual\n computation done by your module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget\n \"\"\"\n def __init__(self):\n pass\n\n def hasImageData(self,volumeNode):\n \"\"\"This is a dummy logic method that\n returns true if the passed in volume\n node has valid image data\n \"\"\"\n if not volumeNode:\n print('no volume node')\n return False\n if volumeNode.GetImageData() is None:\n print('no image data')\n return False\n return True\n\n\nclass AstroMomentMapsSelfTestTest(unittest.TestCase):\n \"\"\"\n This is the test case for your scripted module.\n \"\"\"\n\n def delayDisplay(self,message,msec=100):\n \"\"\"This utility method displays a small dialog and waits.\n This does two things: 1) it lets the event loop catch up\n to the state of the test so that rendering and widget updates\n have all taken place before the test continues and 2) it\n shows the user/developer/tester the state of the test\n so that we'll know when it breaks.\n \"\"\"\n print(message)\n self.info = qt.QDialog()\n self.infoLayout = qt.QVBoxLayout()\n self.info.setLayout(self.infoLayout)\n self.label = qt.QLabel(message,self.info)\n self.infoLayout.addWidget(self.label)\n qt.QTimer.singleShot(msec, self.info.close)\n self.info.exec_()\n\n def setUp(self):\n slicer.mrmlScene.Clear(0)\n\n def runTest(self):\n self.setUp()\n self.test_AstroMomentMapsSelfTest()\n\n def test_AstroMomentMapsSelfTest(self):\n print(\"Running AstroMomentMapsSelfTest Test case:\")\n\n self.downloadWEIN069()\n astroVolume = slicer.util.getNode(\"WEIN069\")\n rms = astroVolume.GetDisplayThreshold()\n\n mainWindow = slicer.util.mainWindow()\n mainWindow.moduleSelector().selectModule('AstroVolume')\n mainWindow.moduleSelector().selectModule('AstroMomentMaps')\n\n astroMomentMapsModule = module = slicer.modules.astromomentmaps\n astroMomentMapsModuleWidget = astroMomentMapsModule.widgetRepresentation()\n\n AstroMomentMapsParameterNode = slicer.util.getNode(\"AstroMomentMapsParameters\")\n AstroMomentMapsParameterNode.SetIntensityMin(rms * 3)\n\n QPushButtonList = astroMomentMapsModuleWidget.findChildren(qt.QPushButton)\n for QPushButton in (QPushButtonList):\n if QPushButton.name == \"ApplyButton\":\n ApplyPushButton = QPushButton\n\n self.delayDisplay('Calculating moment maps', 700)\n ApplyPushButton.click()\n\n ZeroMomentMapVolume = slicer.mrmlScene.GetNodeByID(AstroMomentMapsParameterNode.GetZeroMomentVolumeNodeID())\n pixelValue0 = ZeroMomentMapVolume.GetImageData().GetScalarComponentAsFloat(56, 68, 0, 0)\n FirstMomentMapVolume = slicer.mrmlScene.GetNodeByID(AstroMomentMapsParameterNode.GetFirstMomentVolumeNodeID())\n pixelValue1 = FirstMomentMapVolume.GetImageData().GetScalarComponentAsFloat(56, 68, 0, 0)\n SecondMomentMapVolume = slicer.mrmlScene.GetNodeByID(AstroMomentMapsParameterNode.GetSecondMomentVolumeNodeID())\n pixelValue2 = SecondMomentMapVolume.GetImageData().GetScalarComponentAsFloat(56, 68, 0, 0)\n\n if (math.fabs(pixelValue0 - 0.511788547039) < 1.e-6 and \\\n math.fabs(pixelValue1 - 5231.70947266) < 1.e-6 and \\\n math.fabs(pixelValue2 - 28.8058509827) < 1.e-6):\n self.delayDisplay('Test passed', 700)\n else:\n self.delayDisplay('Test failed', 700)\n # if run from Slicer interface remove the followinf exit\n sys.exit()\n\n\n def downloadWEIN069(self):\n import AstroSampleData\n astroSampleDataLogic = AstroSampleData.AstroSampleDataLogic()\n self.delayDisplay('Getting WEIN069 Astro Volume')\n WEIN069Volume = astroSampleDataLogic.downloadSample(\"WEIN069\")\n return WEIN069Volume\n\n\n","sub_path":"AstroMomentMaps/Testing/Python/AstroMomentMapsSelfTest.py","file_name":"AstroMomentMapsSelfTest.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"630936741","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom pizza_app.models import PizzaOrder, Address\n\n\nclass DeliveryForm(forms.ModelForm):\n class Meta:\n model = Address\n fields = [\n 'full',\n ]\n\n\nclass PizzaOrderForm(forms.ModelForm):\n class Meta:\n model = PizzaOrder\n exclude = [\n 'delivered',\n 'date_created',\n 'date_delivered',\n 'delivery',\n ]\n\n def clean(self):\n data = self.cleaned_data\n excluded = data['exclude']\n\n errors = []\n for item in excluded:\n if item in data['extra']:\n errors.append(str(item))\n\n if errors:\n raise ValidationError(\n 'Ingredients [{}] are in extras and excludes!'.format(\n ', '.join(errors)\n )\n )\n return data\n\n def save(self, commit=True, delivery=None):\n if delivery is None:\n raise ValueError('Delivery was not set')\n\n inst = super().save(commit=False)\n inst.delivery = delivery\n\n if commit:\n inst.save()\n\n return inst\n","sub_path":"PYTHON/course22/pizza_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"321652922","text":"from copy import deepcopy\nimport logging\nimport time\nimport random\n\n\nclass Node(object):\n def __init__(self, state, moveMade, isMax, parent, roundNum, me, depth):\n self.state = state\n self.moveMade = moveMade\n self.isMax = isMax\n self.parent = parent\n self.roundNum = roundNum\n self.me = me\n self.depth = depth\n\n self.bestVal = None # assign to value of the state\n self.bestMove = [None, None]\n # logging.basicConfig(filename='player' + str(self.me) + '_' + str(time.time()) + '.log', level=logging.DEBUG)\n\n def setParentBest(self, value):\n if self.bestVal is None:\n self.bestVal = value\n if self.parent is not None and self.parent.bestVal is None:\n self.parent.setParentBest(value)\n\n def calc_best_move(self):\n \"\"\"\n returns a tuple (k, (x, y)) representing the value k after taking the best move (x,y)\n available from the set of valid moves\n :return:\n \"\"\"\n # if self.isMax:\n # logging.info('MAX WITH THIS STARTING STATE at depth ' + str(self.depth))\n # else:\n # logging.info('MIN WITH THIS STARTING STATE at depth ' + str(self.depth))\n\n if self.me == 1:\n my_id = 0\n their_id = 1\n else:\n my_id = 1\n their_id = 0\n\n # get possible moves\n if self.isMax:\n validMoves = self.getValidMoves(self.roundNum, my_id + 1)\n else:\n validMoves = self.getValidMoves(self.roundNum, their_id + 1)\n\n if self.roundNum < 4:\n return 0, random.choice(validMoves)\n\n # if this node is a designated leaf node\n if self.depth == 0 or len(validMoves) == 0:\n # return the value of this state\n self.bestVal = self.state_value()\n if self.parent is not None and self.parent.bestVal is None:\n self.parent.setParentBest(self.state_value())\n\n # for row in reversed(self.state):\n # row_str = []\n # for el in row:\n # if el == 0:\n # row_str.append(u'\\u2B1A')\n # elif el == 1:\n # row_str.append(u'\\u2B1C')\n # else:\n # row_str.append(u'\\u2B1B')\n # row_str.append(' ')\n # logging.info(''.join(row_str))\n # logging.info(\"Current best value: \" + str(self.bestVal))\n # if self.parent is not None:\n # logging.info(\"Parent best value: \" + str(self.parent.bestVal))\n # logging.info(' ')\n\n if self.depth == 0 or len(validMoves) == 0:\n # return the value of this state\n # logging.info(\"Leaf node returning \" + str(self.state_value()))\n return self.state_value(), None\n\n # for each possible move[row][col]\n for move in validMoves:\n # at least once - create a node and calculate it's value\n temp_state = deepcopy(self.state)\n\n # make the move onto the given state\n # change the captured stones\n if self.isMax:\n temp_state[move[0]][move[1]] = my_id\n temp_state = self.make_move(temp_state, move, my_id)\n else:\n temp_state[move[0]][move[1]] = their_id\n temp_state = self.make_move(temp_state, move, their_id)\n\n next_isMax = not self.isMax\n next_roundNum = self.roundNum + 1\n\n move_node = Node(temp_state, move, next_isMax, self, next_roundNum, self.me, self.depth - 1)\n move_res = move_node.calc_best_move()\n\n if self.parent is None:\n # Handle the decision making of the root node\n if move_res[0] >= self.bestVal:\n self.bestVal = move_res[0]\n self.bestMove = move\n else:\n # Minimize or Maximize based on parent's best value\n if self.isMax:\n # I am a max node and my parent is a min node\n if move_res[0] > self.parent.bestVal:\n # if my move is greater than my parents, no matter how high I get they will never choose me\n # I will now return the best choice out of all my turns\n # logging.error('pruning a MAX node at depth ' + str(self.depth) + \" because best value is \" + str(move_res[0]))\n return self.bestVal, None\n elif move_res[0] > self.bestVal:\n # update personal best\n # logging.info(\"Max updating best value at depth \" + str(self.depth) + \" from \" + str(self.bestVal) + \" to \" + str(move_res[0]))\n self.bestVal = move_res[0]\n else:\n # I am a min node and my parent is a max node\n if move_res[0] < self.parent.bestVal:\n # if my move is less than my parents, no matter how low I get it, they will never choose me\n # I will now return the best choice out of all my turns\n # logging.error('pruning a MIN node at depth ' + str(self.depth) + \" because best value is \" + str(move_res[0]))\n return self.bestVal, None\n elif move_res[0] < self.bestVal:\n # update personal best\n # logging.info(\"Min updating best value at depth \" + str(self.depth) + \" from \" + str(self.bestVal) + \" to \" + str(move_res[0]))\n self.bestVal = move_res[0]\n\n # if self.parent is None:\n # logging.info(\"BEST SOLUTIONS HAS BEEN FOUND\")\n # logging.info('returning move: ' + str(self.bestMove) + '+++++++++++++++++++++++++++++++++++')\n return self.bestVal, self.bestMove\n\n def make_move(self, pre_state, move, player_id):\n \"\"\"\n takes in a state and a move, returns the state after the move is made\n :return:\n \"\"\"\n pre_state[move[0]][move[1]] = player_id + 1\n return self.changeColors(pre_state, move[0], move[1], player_id)\n\n def state_value(self):\n \"\"\"\n calculates the difference between the white and black pieces and returns the \"value\" of the board\n for the player requesting the state value\n :return:\n \"\"\"\n one_count = 0\n two_count = 0\n cornerVal = 10\n sideVal = 5\n basicVal = 1\n\n # top left corner\n if self.state[0][0] == 1:\n one_count += cornerVal\n if self.state[0][0] == 2:\n two_count += cornerVal\n\n # bottom left corner\n if self.state[0][7] == 1:\n one_count += cornerVal\n if self.state[0][7] == 2:\n two_count += cornerVal\n\n # top left corner\n if self.state[7][0] == 1:\n one_count += cornerVal\n if self.state[7][0] == 2:\n two_count += cornerVal\n\n # top right corner\n if self.state[7][7] == 1:\n one_count += cornerVal\n if self.state[7][7] == 2:\n two_count += cornerVal\n\n # top row\n for cell in range(1, 6):\n if self.state[cell][0] == 1:\n one_count += sideVal\n if self.state[cell][0] == 2:\n two_count += sideVal\n\n # bottom row\n for cell in range(1, 6):\n if self.state[cell][7] == 1:\n one_count += sideVal\n if self.state[cell][7] == 2:\n two_count += sideVal\n\n # left column\n for cell in range(1, 6):\n if self.state[0][cell] == 1:\n one_count += sideVal\n if self.state[0][cell] == 2:\n two_count += sideVal\n\n # right column\n for cell in range(1, 6):\n if self.state[7][cell] == 1:\n one_count += sideVal\n if self.state[7][cell] == 2:\n two_count += sideVal\n\n for y in range(1, 6):\n for x in range(1, 6):\n if self.state[x][y] == 1:\n one_count += basicVal\n if self.state[x][y] == 2:\n two_count += basicVal\n\n if self.me == 1:\n return one_count - two_count\n elif self.me == 2:\n return two_count - one_count\n else:\n return 0\n\n \"\"\"\n OTHER\n \"\"\"\n\n def changeColors(self, start_state, row, col, turn):\n for incx in range(-1, 2):\n for incy in range(-1, 2):\n if incx == 0 and incy == 0:\n continue\n start_state = self.check_direction(start_state, row, col, incx, incy, turn)\n # for el in start_state:\n # logging.debug(str(el))\n # logging.debug(' ')\n # logging.debug(' ')\n return start_state\n\n def check_direction(self, start_state, row, col, incx, incy, turn):\n sequence = []\n end_state = deepcopy(start_state)\n for i in range(1, 8):\n r = row + incy * i\n c = col + incx * i\n\n if (r < 0) or (r > 7) or (c < 0) or (c > 7):\n break\n\n sequence.append(self.state[r][c])\n\n count = 0\n for i in range(len(sequence)):\n if turn == 0:\n if sequence[i] == 2:\n count += 1\n else:\n if sequence[i] == 1 and count > 0:\n count = 20\n break\n else:\n if sequence[i] == 1:\n count += 1\n else:\n if sequence[i] == 2 and count > 0:\n count = 20\n break\n\n if count > 10:\n if turn == 0:\n i = 1\n r = row + incy * i\n c = col + incx * i\n while (end_state[r][c] == 2):\n end_state[r][c] = 1\n i += 1\n r = row + incy * i\n c = col + incx * i\n else:\n i = 1\n r = row + incy * i\n c = col + incx * i\n while (end_state[r][c] == 1):\n end_state[r][c] = 2\n i += 1\n r = row + incy * i\n c = col + incx * i\n\n return end_state\n\n \"\"\"\n FROM AIguy\n \"\"\"\n\n def checkDirection(self, row, col, incx, incy, me):\n sequence = []\n for i in range(1, 8):\n r = row + incy * i\n c = col + incx * i\n\n if (r < 0) or (r > 7) or (c < 0) or (c > 7):\n break\n\n sequence.append(self.state[r][c])\n\n count = 0\n for i in range(len(sequence)):\n if me == 1:\n if sequence[i] == 2:\n count += 1\n else:\n if (sequence[i] == 1) and (count > 0):\n return True\n break\n else:\n if sequence[i] == 1:\n count += 1\n else:\n if (sequence[i] == 2) and (count > 0):\n return True\n break\n return False\n\n def couldBe(self, row, col, me):\n for incx in range(-1, 2):\n for incy in range(-1, 2):\n if (incx == 0) and (incy == 0):\n continue\n\n if self.checkDirection(row, col, incx, incy, me):\n return True\n\n return False\n\n # generates the set of valid moves for the player; returns a list of valid moves (validMoves)\n def getValidMoves(self, roundNum, me):\n validMoves = []\n # print \"Round: \" + str(roundNum)\n\n # for i in range(8):\n # print self.state[i]\n\n if roundNum < 4:\n if self.state[3][3] == 0:\n validMoves.append((3, 3))\n if self.state[3][4] == 0:\n validMoves.append((3, 4))\n if self.state[4][3] == 0:\n validMoves.append((4, 3))\n if self.state[4][4] == 0:\n validMoves.append((4, 4))\n else:\n for i in range(8):\n for j in range(8):\n if self.state[i][j] == 0:\n if self.couldBe(i, j, me):\n validMoves.append((i, j))\n\n return validMoves\n","sub_path":"ReversiAI_Python_v2/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":12483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"312043576","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nimport multiprocessing\nimport signal\nimport sys\nimport time\nimport paramiko\nimport traceback\nimport shutil\nimport tarfile\nimport hashlib\nfrom multiprocessing.managers import SyncManager\nfrom colors import colored\n\n\nclass deploy_environments:\n def __init__(self, logger, args, credentials, execution_name=None, ENV_NAME=None, ENV_DATA=None, uuid=None):\n self._logger = logger\n self._args = args\n self._credentials = credentials\n self._SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))\n self._EXECUTION_NAME = execution_name\n self._ENV_NAME = ENV_NAME\n self._ENV_DATA = ENV_DATA\n self._UUID = uuid\n self._COMPRESSED_FILE_NAME = 'METEOR.tar.gz'\n self._servers = '' if self._args.servers is None else '--servers \"{}\"'.format(self._args.servers)\n\n # Setup the Execution Logs Path\n self._LOGS_PATH = self._args.logs_path if self._args.logs_path.endswith('/') else self._args.logs_path + '/'\n self._LOCAL_EXECUTION_LOGS_PATH = \"{}{}/execution\".format(self._LOGS_PATH, self._EXECUTION_NAME)\n\n # Environment Variables\n if self._ENV_DATA is not None:\n # Deploy Path\n if self._ENV_DATA['ssh']['enabled'] == 'True':\n self._DEPLOY_PATH = self._ENV_DATA['ssh']['deploy_path'] + 'meteor/'\n else:\n self._DEPLOY_PATH = self._SCRIPT_PATH + '/'\n\n # Remote Execution Logs Path\n self._REMOTE_EXECUTION_LOGS_PATH = \"{}logs/{}/execution\".format(self._DEPLOY_PATH, execution_name)\n\n def validate(self, output=True, shared_array=None):\n try:\n if output:\n environment_type = '[LOCAL]' if self._ENV_DATA['ssh']['enabled'] == 'False' else '[SSH]'\n self._logger.info(colored('{} Region: {}'.format(environment_type, self._ENV_DATA['region']), attrs=['bold']))\n else:\n environment_type = '[LOCAL]' if self._ENV_DATA['ssh']['enabled'] == 'False' else '[SSH] '\n self._logger.info(colored('--> {} Region \\'{}\\' Started...'.format(environment_type, self._ENV_DATA['region']), 'yellow'))\n\n if self._ENV_DATA['ssh']['enabled'] == \"True\":\n same_version = self.check_version(output)\n if same_version:\n if output:\n self._logger.info(colored('- Region Updated.', 'green'))\n else:\n if output:\n self._logger.info(colored('- Region Outdated. Starting uploading the Meteor Engine...', 'red'))\n # Install Meteor in all SSH Regions\n self.prepare(output)\n # Setup User Execution Environment ('credentials.json', 'query_execution.py')\n self.setup(output)\n\n # Check SQL Connection of the Environment [True: All SQL Connections Succeeded | False: Some SQL Connections Failed]\n status = self.__check_sql_connection(output)\n\n if status is True:\n response = '{} Region \\'{}\\' Finished.'.format(environment_type, self._ENV_DATA['region'])\n self._logger.info(colored('--> ' + response, 'green'))\n if shared_array is not None:\n shared_array.append({\"region\": self._ENV_DATA['region'], \"success\": True, \"response\": response})\n else:\n # Handle SQL Error\n response = '{} Region \\'{}\\' Failed.'.format(environment_type, self._ENV_DATA['region'])\n self._logger.info(colored('--> ' + response, 'red'))\n if shared_array is not None:\n shared_array.append({\"region\": self._ENV_DATA['region'], \"success\": False, \"response\": response})\n\n return status\n\n except Exception as e:\n if self._ENV_DATA['ssh']['enabled'] == \"True\":\n # Handle SSH Error\n if self._credentials['execution_mode']['parallel'] == 'True':\n self._logger.info(colored(\" [{}/SSH] {} \".format(self._ENV_DATA['region'], self._ENV_DATA['ssh']['hostname']), attrs=['bold']) + str(e))\n else:\n self._logger.info(colored(\"✘\", 'red') + colored(\" [{}] \".format(self._ENV_DATA['ssh']['hostname']), attrs=['bold']) + str(e))\n response = '{} Region \\'{}\\' Failed.'.format(environment_type, self._ENV_DATA['region'])\n self._logger.info(colored('--> ' + response, 'red'))\n if shared_array is not None:\n shared_array.append({\"region\": self._ENV_DATA['region'], \"ssh\": self._ENV_DATA['ssh']['hostname'], \"success\": False, \"response\": response, \"error\": str(e)})\n raise Exception()\n except KeyboardInterrupt:\n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n def check_version(self, output=True): \n # Get SSH Version\n ssh_version = self.__ssh(\"cat {}version.txt\".format(self._DEPLOY_PATH))['stdout']\n if len(ssh_version) == 0:\n return False\n else:\n ssh_version = ssh_version[0].replace('\\n', '')\n\n # Get Local Version\n with open(self._SCRIPT_PATH + '/version.txt') as file_content:\n local_version = file_content.read().replace('\\n', '')\n\n # Compare Local & SSH Version\n if local_version != ssh_version:\n return False\n\n return True\n\n def generate_app_version(self):\n version = ''\n files = os.listdir(self._SCRIPT_PATH)\n for f in files:\n if not os.path.isdir(self._SCRIPT_PATH + '/' + f) and not f.endswith('.pyc') and not f.startswith('.') and not f.endswith('.gz') and f not in ['version.txt', 'query_execution.py', 'credentials.json']:\n with open(\"{0}/{1}\".format(self._SCRIPT_PATH, f), 'rb') as file_content:\n file_hash = hashlib.sha512(file_content.read()).hexdigest()\n version += file_hash\n return version\n\n def prepare(self, output=True):\n if output:\n self._logger.info('- Preparing Deploy...')\n self.__ssh(\"mkdir -p {0} && chmod 700 {0} && rm -rf {0}*\".format(self._DEPLOY_PATH))\n\n if output:\n self._logger.info('- Creating Deploy...')\n self.__local('rm -rf \"{0}\" && tar -czvf \"{0}\" . --exclude \"logs\" --exclude \"*.git*\" --exclude \"*.pyc\" --exclude \"web\" --exclude \"credentials.json\" --exclude \"query_execution.py\"'.format(self._COMPRESSED_FILE_NAME), show_output=False)\n\n if output:\n self._logger.info('- Uploading Deploy...')\n self.__put(self._SCRIPT_PATH + '/' + self._COMPRESSED_FILE_NAME, self._DEPLOY_PATH + self._COMPRESSED_FILE_NAME)\n\n if output:\n self._logger.info(\"- Uncompressing Deploy...\")\n self.__ssh(\"tar -xvzf {0}{1} -C {0} && rm -rf {0}{1}\".format(self._DEPLOY_PATH, self._COMPRESSED_FILE_NAME))\n\n if output:\n self._logger.info(\"- Installing Requirements...\")\n self.__ssh('pip install -r {}meteor/requirements.txt --user'.format(self._ENV_DATA['ssh']['deploy_path']))\n\n def setup(self, output=True):\n if output:\n self._logger.info(\"- Setting Up New Execution...\")\n\n logs_path = \"{}logs/{}/\".format(self._DEPLOY_PATH, self._EXECUTION_NAME)\n self.__ssh('mkdir -p {}'.format(logs_path))\n self.__put(self._SCRIPT_PATH + '/credentials.json', logs_path + 'credentials.json')\n self.__put(self._SCRIPT_PATH + '/query_execution.py', logs_path + 'query_execution.py')\n\n def start(self, shared_array=None, progress_array=None):\n try:\n # Parallel Execution\n if self._credentials['execution_mode']['parallel'] == \"True\":\n # SSH Execution\n if self._ENV_DATA['ssh']['enabled'] == 'True':\n # Start the Execution\n if self._args.env_start_deploy:\n deploy = self.__ssh('cd \"{0}\" && python -u meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_start_deploy --execution_name \"{4}\" --uuid \"{5}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._UUID), show_output=True, progress_array=progress_array)\n else:\n deploy = self.__ssh('cd \"{0}\" && python -u meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --execution_name \"{4}\" --uuid \"{5}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._UUID), show_output=True, progress_array=progress_array)\n # Local Execution\n else:\n if self._args.env_start_deploy:\n deploy = self.__local('python -u {0}/meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_start_deploy --execution_name \"{4}\" --logs_path \"{5}\" --uuid \"{6}\"'.format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._args.logs_path, self._UUID), show_output=True, progress_array=progress_array)\n else:\n deploy = self.__local('python -u {0}/meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --execution_name \"{4}\" --logs_path \"{5}\" --uuid \"{6}\"'.format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._args.logs_path, self._UUID), show_output=True, progress_array=progress_array)\n\n # Check for Execution Error\n if len(deploy['stderr']) > 0:\n # Parse stderr\n stderr_parsed = str(deploy['stderr']).split(\"Traceback (most recent call last):\\n\")\n if len(stderr_parsed) > 1:\n stderr_parsed = \"Traceback (most recent call last):\\n\" + stderr_parsed[1]\n if stderr_parsed.splitlines()[-1].startswith('Process Process-'):\n stderr_parsed = stderr_parsed.rsplit(\"\\n\",2)[0]\n else:\n stderr_parsed = deploy['stderr']\n\n shared_array.append({ \"region\": self._ENV_DATA['region'], \"success\": False, \"error\": stderr_parsed })\n else:\n shared_array.append({ \"region\": self._ENV_DATA['region'], \"success\": True })\n\n # Sequential Execution\n else:\n # SSH Execution\n if self._ENV_DATA['ssh']['enabled'] == 'True':\n # Start the Execution\n if self._args.env_start_deploy:\n stderr = self.__ssh('cd \"{0}\" && python meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_start_deploy --execution_name \"{4}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME), show_output=True)['stderr']\n else:\n stderr = self.__ssh('cd \"{0}\" && python meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --execution_name \"{4}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME), show_output=True)['stderr']\n # Local Execution\n else:\n if self._args.env_start_deploy:\n stderr = self.__local('python {0}/meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_start_deploy --execution_name \"{4}\" --logs_path \"{5}\"'.format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._args.logs_path), show_output=True)['stderr']\n else:\n stderr = self.__local('python {0}/meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --execution_name \"{4}\" --logs_path \"{5}\"'.format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME, self._args.logs_path), show_output=True)['stderr']\n\n # Check for Execution Error\n if len(stderr) > 0:\n # Remove last '\\n' character\n raise Exception(stderr[:-1])\n\n except KeyboardInterrupt:\n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n def compress_logs(self, shared_array=None):\n try:\n output = self.__ssh('cd \"{0}\" && python meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_compress --execution_name \"{4}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], self._EXECUTION_NAME))\n\n if len(output['stderr']) > 0:\n shared_array.append(output['stderr'])\n\n except (KeyboardInterrupt):\n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n def get_logs(self, shared_array=None):\n try:\n if self._ENV_DATA['ssh']['enabled'] == 'True':\n remote_path = \"{0}/{1}.tar.gz\".format(self._REMOTE_EXECUTION_LOGS_PATH, self._ENV_DATA['region'])\n local_path = \"{0}/{1}\".format(self._LOCAL_EXECUTION_LOGS_PATH, self._ENV_DATA['region'])\n\n # 1. Download Compressed Logs\n status = self.__get(remote_path, local_path + '.tar.gz')\n\n if status:\n # 2. Uncompress Downloaded Logs\n with tarfile.open(local_path + '.tar.gz') as tar:\n tar.extractall(path=self._LOCAL_EXECUTION_LOGS_PATH)\n\n # 3. Delete Downloaded Compressed Logs\n os.remove(local_path + '.tar.gz')\n\n except Exception:\n if self._credentials['execution_mode']['parallel'] != 'True':\n self._logger.error(colored(\"--> Error Downloading Logs:\\n{}\".format(traceback.format_exc()), 'red'))\n raise\n else:\n shared_array.append(traceback.format_exc())\n \n except KeyboardInterrupt:\n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n def clean_remote(self, shared_array=None):\n environment_logs = \"{}logs/{}/\".format(self._DEPLOY_PATH, self._EXECUTION_NAME)\n output = self.__ssh('rm -rf {0}'.format(environment_logs))\n\n if len(output['stderr']) > 0:\n shared_array.append(output['stderr'])\n\n def clean_local(self):\n # Delete Uncompressed Deployment Folder\n if os.path.exists(self._LOGS_PATH + self._EXECUTION_NAME):\n if os.path.isdir(self._LOGS_PATH + self._EXECUTION_NAME):\n shutil.rmtree(self._LOGS_PATH + self._EXECUTION_NAME)\n\n # Delete 'METEOR.tar.gz'\n self.__local('rm -rf {0}'.format(self._COMPRESSED_FILE_NAME), show_output=False)\n\n # Handle SIGINT from SyncManager object\n def mgr_sig_handler(self, signal, frame):\n pass\n\n # Initilizer for SyncManager\n def __mgr_init(self):\n signal.signal(signal.SIGINT, self.mgr_sig_handler)\n\n def __check_sql_connection(self, output):\n connection_succeeded = True\n\n if output:\n self._logger.info(\"- Checking SQL Connections...\")\n\n if self._credentials['execution_mode']['parallel'] == \"True\":\n # Init SyncManager\n manager = SyncManager()\n manager.start(self.__mgr_init)\n shared_array = manager.list()\n processes = []\n\n try:\n for sql in self._ENV_DATA['sql']:\n p = multiprocessing.Process(target=self.__check_sql_connection_logic, args=(sql, output, shared_array))\n p.start()\n processes.append(p)\n\n for process in processes:\n process.join()\n\n for data in shared_array:\n connection_succeeded &= data['success']\n if data['success'] is False:\n self._logger.info(colored(\" [{}/SQL] {} \".format(self._ENV_DATA['region'], data['sql']), attrs=['bold']) + data['error'])\n\n except KeyboardInterrupt:\n for process in processes:\n process.join()\n raise\n else:\n for sql in self._ENV_DATA['sql']:\n connection_succeeded &= self.__check_sql_connection_logic(sql, output)\n\n return connection_succeeded\n\n def __check_sql_connection_logic(self, sql, output, shared_array=None):\n try:\n if self._ENV_DATA['ssh']['enabled'] == 'True':\n command = 'cd \"{0}\" && python meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_check_sql \"{4}\" --execution_name \"{5}\"'.format(self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], sql['name'], self._EXECUTION_NAME)\n result = self.__ssh(command)['stdout']\n else:\n result = self.__local('cd \"{0}\" && python meteor.py --environment \"{1}\" {2} --env_id \"{3}\" --env_check_sql \"{4}\" --execution_name \"{5}\" --logs_path \"{6}\"'.format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], sql['name'], self._EXECUTION_NAME, self._args.logs_path), show_output=False)['stdout']\n \n if len(result) == 0:\n if output:\n self._logger.info(colored(\"✔\", 'green') + colored(\" [{}]\".format(sql['name']), attrs=['bold']) + \" Connection Succeeded\")\n if shared_array is not None:\n shared_array.append({\"region\": self._ENV_DATA['region'], \"success\": True, \"sql\": sql['name']})\n return True\n else:\n result = result[0] if type(result) is list else result\n if output:\n self._logger.error(colored(\"✘\", 'red') + colored(\" [{}] \".format(sql['name']), attrs=['bold']) + str(result.replace('\\n','')))\n if shared_array is not None:\n shared_array.append({\"region\": self._ENV_DATA['region'], \"success\": False, \"sql\": sql['name'], \"error\": result.replace('\\n','')})\n return False\n\n except KeyboardInterrupt:\n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n except Exception: \n if self._credentials['execution_mode']['parallel'] != 'True':\n raise\n\n def sigint(self):\n command = \"ps -U $USER -u $USER u | grep \\\"\" + str(self._UUID) + \"\\\" | grep -v grep | awk '{print $2}' | xargs kill -2\"\n\n if self._ENV_DATA['ssh']['enabled'] == 'False':\n self.__local(command)\n else:\n self.__ssh(command)\n \n def check_processes(self):\n # Check Processes Currently Executing\n attempts = 99\n\n for i in range(attempts+1):\n command = \"ps -U $USER -u $USER u | grep \\\"\" + str(self._UUID) + \"\\\" | grep -v grep | awk '{print $2}' | wc -l\"\n \n if self._ENV_DATA['ssh']['enabled'] == 'False':\n count = int(self.__local(command)['stdout'])\n print(\"-- [Attempt {}/{}] Remaining Processes: {}\".format(i+1, attempts, int(count)))\n else:\n count = int(self.__ssh(command)['stdout'][0])\n print(\"-- [Attempt {}/{}] Remaining Processes: {}\".format(i+1, attempts, int(count)))\n\n if int(count) == 0:\n break\n time.sleep(10)\n\n\n def sigkill(self):\n command = \"ps -U $USER -u $USER u | grep \\\"\" + str(self._UUID) + \"\\\" | grep -v grep | awk '{print $2}' | xargs kill -9 2> /dev/null\"\n if self._ENV_DATA['ssh']['enabled'] == 'False':\n self.__local(command)\n else:\n self.__ssh(command)\n\n ################\n # Core Methods #\n ################\n def __local(self, command, show_output=False, progress_array=None):\n # Paramiko Execute Local Command\n client = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Show Output \n if show_output:\n for line in client.stdout:\n if progress_array is None:\n print(line.rstrip())\n else:\n progress_array.append(line)\n\n # Return Execution Output\n return { \"stdout\": client.stdout.readlines(), \"stderr\": ''.join(client.stderr.readlines()) }\n\n def __ssh(self, command, show_output=False, progress_array=None):\n try:\n # Supress Errors Output\n sys_stderr = sys.stderr\n sys.stderr = open('/dev/null', 'w')\n\n # Init Paramiko SSH Connection\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n client.connect(self._ENV_DATA['ssh']['hostname'], port=22, username=self._ENV_DATA['ssh']['username'], password=self._ENV_DATA['ssh']['password'], key_filename=self._ENV_DATA['ssh']['key'])\n\n # Show Errors Output Again\n sys.stderr = sys_stderr\n\n # Paramiko Execute Command\n stdin, stdout, stderr = client.exec_command(command, get_pty=False)\n stdin.close()\n\n if show_output:\n for line in stdout:\n if progress_array is None:\n print(line.rstrip())\n else:\n progress_array.append(line)\n\n # Return Execution Output\n return { \"stdout\": stdout.readlines(), \"stderr\": ''.join(stderr.readlines()) }\n\n finally:\n # Paramiko Close Connection\n client.close()\n\n def __get(self, remote_path, local_path):\n try:\n # Supress Errors Output\n sys_stderr = sys.stderr\n sys.stderr = open('/dev/null', 'w')\n\n # Init Paramiko Connection\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n client.connect(self._ENV_DATA['ssh']['hostname'], port=22, username=self._ENV_DATA['ssh']['username'], password=self._ENV_DATA['ssh']['password'], key_filename=self._ENV_DATA['ssh']['key'])\n \n # Show Errors Output Again\n sys.stderr = sys_stderr\n\n # Open sftp connection\n sftp = client.open_sftp()\n\n # Check if file exists (ls)\n sftp.stat(remote_path)\n \n # Download File\n sftp.get(remote_path, local_path)\n return True\n\n except IOError:\n return False\n finally:\n sftp.close()\n\n def __put(self, local_path, remote_path):\n try:\n # Init Paramiko Connection\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n client.connect(self._ENV_DATA['ssh']['hostname'], port=22, username=self._ENV_DATA['ssh']['username'], password=self._ENV_DATA['ssh']['password'], key_filename=self._ENV_DATA['ssh']['key'])\n\n # Open sftp connection\n sftp = client.open_sftp()\n\n # Upload File\n sftp.put(local_path, remote_path)\n\n finally:\n if 'sftp' in locals():\n sftp.close()\n","sub_path":"app/deploy_environments.py","file_name":"deploy_environments.py","file_ext":"py","file_size_in_byte":23412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"413727657","text":"#!/usr/bin/env python3\n\n\nclass Node:\n def __init__(self, datum=None, next=None):\n self.datum = datum\n self.next = next\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = Node()\n\n def enumerate(self):\n if self.head is None:\n return []\n\n current = self.head\n while current.next is not None:\n yield current.datum\n current = current.next\n\n def add_front(self, datum):\n next = Node(datum, self.head)\n self.head = next\n\n def add_back(self, datum):\n previous = None\n current = self.head\n while current.next is not None:\n previous = current\n current = current.next\n\n added = Node(datum, current)\n if previous is None:\n self.head = added\n else:\n previous.next = added\n\n\ndef test_empty():\n empty_list = SinglyLinkedList()\n assert len(list(empty_list.enumerate())) == 0\n\n\ndef test_single_entry():\n sll = SinglyLinkedList()\n\n sll.add_front(1)\n\n enumerated = list(sll.enumerate())\n assert enumerated == [1]\n\n\ndef test_front_filled():\n sll = SinglyLinkedList()\n\n sll.add_front(1)\n sll.add_front(2)\n sll.add_front(3)\n\n enumerated = list(sll.enumerate())\n assert enumerated == [3, 2, 1]\n\n\ndef test_back_filled():\n sll = SinglyLinkedList()\n\n sll.add_back(1)\n sll.add_back(2)\n sll.add_back(3)\n\n enumerated = list(sll.enumerate())\n assert enumerated == [1, 2, 3]\n","sub_path":"Python/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"291925627","text":"import configparser, re, os, gi, traceback\nfrom shutil import copyfile\nfrom pathlib import Path\nfrom functools import reduce\nfrom inspect import signature\nimport regex\nfrom ptxprint.font import TTFont\nfrom ptxprint.runner import checkoutput\nfrom ptxprint import sfm\nfrom ptxprint.sfm import usfm, style, Text\nfrom ptxprint.usfmutils import Usfm, Sheets, isScriptureText, Module\nfrom ptxprint.utils import _, universalopen, localhdrmappings, pluralstr, multstr, coltoonemax, \\\n chaps, books, bookcodes, allbooks, oneChbooks, asfloat, f2s, cachedData, pycodedir\nfrom ptxprint.dimension import Dimension\nimport ptxprint.scriptsnippets as scriptsnippets\nfrom ptxprint.interlinear import Interlinear\nfrom ptxprint.reference import Reference, RefRange, RefList, RefSeparators, AnyBooks\nfrom ptxprint.xrefs import Xrefs\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# After universalopen to resolve circular import. Kludge\nfrom ptxprint.snippets import FancyIntro, PDFx1aOutput, Diglot, FancyBorders, ThumbTabs, Colophon, Grid\n\ndef loosint(x):\n try:\n return int(x)\n except (ValueError, TypeError):\n return 0\n\nModelMap = {\n \"L_\": (\"c_diglot\", lambda w,v: \"L\" if v else \"\"),\n \"R_\": (\"c_diglot\", lambda w,v: \"R\" if v else \"\"),\n \"date_\": (\"_date\", None),\n \"pdfdate_\": (\"_pdfdate\", None),\n \"xmpdate_\": (\"_xmpdate\", None),\n \"ifusediglotcustomsty_\": (\"_diglotcustomsty\", lambda w,v: \"%\" if not v else \"\"),\n \"ifusediglotmodsty_\": (\"_diglotmodsty\", lambda w,v: \"%\" if not v else \"\"),\n \"ifdiglotincludefootnotes_\":(\"_diglotinclfn\", lambda w,v: \"%\" if not v else \"\"),\n \"ifdiglotincludexrefs_\": (\"_diglotinclxr\", lambda w,v: \"%\" if not v else \"\"),\n \"transparency_\": (\"fcb_outputFormat\", lambda w,v: \"false\" if v in (None, \"None\", \"PDF/X-4\") else \"true\"),\n\n \"config/notes\": (\"t_configNotes\", lambda w,v: v or \"\"),\n \"config/pwd\": (\"t_invisiblePassword\", lambda w,v: v or \"\"),\n \"config/version\": (\"_version\", None),\n\n # \"project/hideadvsettings\": (\"c_showAdvancedOptions\", lambda w,v: not v),\n \"project/bookscope\": (\"r_book\", None),\n \"project/uilevel\": (\"fcb_uiLevel\", None),\n \"project/book\": (\"ecb_book\", None),\n \"project/modulefile\": (\"btn_chooseBibleModule\", lambda w,v: v.replace(\"\\\\\",\"/\") if v is not None else \"\"),\n \"project/booklist\": (\"ecb_booklist\", lambda w,v: v or \"\"),\n \"project/ifinclfrontpdf\": (\"c_inclFrontMatter\", None),\n \"project/frontincludes\": (\"btn_selectFrontPDFs\", lambda w,v: \"\\n\".join('\\\\includepdf{{{}}}'.format(s.as_posix()) \\\n for s in w.FrontPDFs) if (w.get(\"c_inclFrontMatter\") and w.FrontPDFs is not None\n and w.FrontPDFs != 'None') else \"\"),\n \"project/ifinclbackpdf\": (\"c_inclBackMatter\", None),\n \"project/backincludes\": (\"btn_selectBackPDFs\", lambda w,v: \"\\n\".join('\\\\includepdf{{{}}}'.format(s.as_posix()) \\\n for s in w.BackPDFs) if (w.get(\"c_inclBackMatter\") and w.BackPDFs is not None\n and w.BackPDFs != 'None') else \"\"),\n \"project/processscript\": (\"c_processScript\", None),\n \"project/when2processscript\": (\"r_when2processScript\", None),\n \"project/selectscript\": (\"btn_selectScript\", lambda w,v: w.customScript.as_posix() if w.customScript is not None else \"\"),\n \"project/selectxrfile\": (\"btn_selectXrFile\", None),\n \"project/usechangesfile\": (\"c_usePrintDraftChanges\", lambda w,v :\"true\" if v else \"false\"),\n \"project/ifusemodstex\": (\"c_useModsTex\", lambda w,v: \"\" if v else \"%\"),\n \"project/ifusepremodstex\": (\"c_usePreModsTex\", lambda w,v: \"\" if v else \"%\"),\n \"project/ifusecustomsty\": (\"c_useCustomSty\", lambda w,v: \"\" if v else \"%\"),\n \"project/ifusemodssty\": (\"c_useModsSty\", lambda w,v: \"\" if v else \"%\"),\n \"project/ifstarthalfpage\": (\"c_startOnHalfPage\", lambda w,v :\"true\" if v else \"false\"),\n \"project/randompicposn\": (\"c_randomPicPosn\", None),\n \"project/canonicalise\": (\"c_canonicalise\", None),\n \"project/interlinear\": (\"c_interlinear\", lambda w,v: \"\" if v else \"%\"),\n \"project/interlang\": (\"t_interlinearLang\", None),\n \"project/ruby\": (\"c_ruby\", lambda w,v : \"t\" if v else \"b\"),\n \"project/license\": (\"ecb_licenseText\", None),\n \"project/copyright\": (\"t_copyrightStatement\", lambda w,v: re.sub(r\"\\\\u([0-9a-fA-F]{4})\",\n lambda m: chr(int(m.group(1), 16)), v) if v is not None else \"\"),\n \"project/iffrontmatter\": (\"c_frontmatter\", lambda w,v: \"\" if v else \"%\"),\n \"project/periphpagebreak\": (\"c_periphPageBreak\", None),\n \"project/colophontext\": (\"txbf_colophon\", lambda w,v: re.sub(r\"\\\\u([0-9a-fA-F]{4})\",\n lambda m: chr(int(m.group(1), 16)), v) if v is not None else \"\"),\n \"project/ifcolophon\": (\"c_colophon\", lambda w,v: \"\" if v else \"%\"),\n \"project/pgbreakcolophon\": (\"c_standAloneColophon\", lambda w,v: \"\" if v else \"%\"),\n\n \"paper/height\": (\"ecb_pagesize\", lambda w,v: re.sub(r\"^.*?[,xX]\\s*(.+?)\\s*(?:\\(.*|$)\", r\"\\1\", v or \"210mm\")),\n \"paper/width\": (\"ecb_pagesize\", lambda w,v: re.sub(r\"^(.*?)\\s*[,xX].*$\", r\"\\1\", v or \"148mm\")),\n \"paper/pagesize\": (\"ecb_pagesize\", None),\n \"paper/ifwatermark\": (\"c_applyWatermark\", lambda w,v: \"\" if v else \"%\"),\n \"paper/watermarkpdf\": (\"btn_selectWatermarkPDF\", lambda w,v: w.watermarks.as_posix() \\\n if (w.get(\"c_applyWatermark\") and w.watermarks is not None and w.watermarks != 'None') else \"\"),\n \"paper/ifcropmarks\": (\"c_cropmarks\", lambda w,v :\"true\" if v else \"false\"), \n \"paper/ifgrid\": (\"c_grid\", lambda w,v :\"\" if v else \"%\"),\n \"paper/ifverticalrule\": (\"c_verticalrule\", lambda w,v :\"true\" if v else \"false\"),\n \"paper/margins\": (\"s_margins\", lambda w,v: round(float(v)) if v else \"12\"),\n \"paper/topmargin\": (\"s_topmargin\", None),\n \"paper/bottommargin\": (\"s_bottommargin\", None),\n \"paper/headerpos\": (\"s_headerposition\", None),\n \"paper/footerpos\": (\"s_footerposition\", None),\n \"paper/rulegap\": (\"s_rhruleposition\", None),\n\n \"paper/ifaddgutter\": (\"c_pagegutter\", lambda w,v :\"true\" if v else \"false\"),\n \"paper/gutter\": (\"s_pagegutter\", lambda w,v: round(float(v)) if v else \"0\"),\n \"paper/colgutteroffset\": (\"s_colgutteroffset\", lambda w,v: \"{:.1f}\".format(float(v)) if v else \"0.0\"),\n \"paper/columns\": (\"c_doublecolumn\", lambda w,v: \"2\" if v else \"1\"),\n \"paper/bottomrag\": (\"s_bottomRag\", lambda w,v: str(int(v or 0)+0.95)),\n \"paper/fontfactor\": (\"s_fontsize\", lambda w,v: f2s(float(v) / 12, dp=8) if v else \"1.000\"),\n\n \"grid/gridlines\": (\"c_gridLines\", lambda w,v: \"\\doGridLines\" if v else \"\"),\n \"grid/gridgraph\": (\"c_gridGraph\", lambda w,v: \"\\doGraphPaper\" if v else \"\"),\n \"grid/majorcolor\": (\"col_gridMajor\", None),\n \"majorcolor_\": (\"col_gridMajor\", lambda w,v: \"{:.2f} {:.2f} {:.2f}\".format(*coltoonemax(v)) if v else \"0.8 0.8 0.8\"),\n \"grid/minorcolor\": (\"col_gridMinor\", None),\n \"minorcolor_\": (\"col_gridMinor\", lambda w,v: \"{:.2f} {:.2f} {:.2f}\".format(*coltoonemax(v)) if v else \"0.8 1.0 1.0\"),\n \"grid/majorthickness\": (\"s_gridMajorThick\", None),\n \"grid/minorthickness\": (\"s_gridMinorThick\", None),\n \"grid/units\": (\"fcb_gridUnits\", None),\n \"grid/divisions\": (\"s_gridMinorDivisions\", lambda w,v: int(float(v)) if v else \"10\"),\n \"grid/xyadvance\": (\"s_gridMinorDivisions\", lambda w,v: (1 / max(asfloat(v, 4), 1)) if v else \"0.25\"),\n \"grid/xyoffset\": (\"fcb_gridOffset\", None),\n \n \"fancy/enableborders\": (\"c_borders\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/pageborder\": (\"c_inclPageBorder\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/pageborderfullpage\": (\"c_borderPageWide\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/pagebordernfullpage_\": (\"c_borderPageWide\", lambda w,v: \"%\" if v else \"\"),\n \"fancy/pageborderpdf\": (\"btn_selectPageBorderPDF\", lambda w,v: w.pageborder.as_posix() \\\n if (w.pageborder is not None and w.pageborder != 'None') \\\n else get(\"/ptxprintlibpath\")+\"/A5 page border.pdf\"),\n \"fancy/sectionheader\": (\"c_inclSectionHeader\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/sectionheaderpdf\": (\"btn_selectSectionHeaderPDF\", lambda w,v: w.sectionheader.as_posix() \\\n if (w.sectionheader is not None and w.sectionheader != 'None') \\\n else get(\"/ptxprintlibpath\")+\"/A5 section head border.pdf\"),\n \"fancy/sectionheadershift\": (\"s_inclSectionShift\", lambda w,v: float(v or \"0\")),\n \"fancy/sectionheaderscale\": (\"s_inclSectionScale\", lambda w,v: int(float(v or \"1.0\")*1000)),\n \"fancy/endofbook\": (\"c_inclEndOfBook\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/endofbookpdf\": (\"btn_selectEndOfBookPDF\", lambda w,v: w.endofbook.as_posix() \\\n if (w.endofbook is not None and w.endofbook != 'None') \\\n else get(\"/ptxprintlibpath\")+\"/decoration.pdf\"),\n \"fancy/versedecorator\": (\"c_inclVerseDecorator\", lambda w,v: \"\" if v else \"%\"),\n \"fancy/versedecoratortype\": (\"r_decorator\", None),\n \"fancy/versedecoratorpdf\": (\"btn_selectVerseDecorator\", lambda w,v: w.versedecorator.as_posix() \\\n if (w.versedecorator is not None and w.versedecorator != 'None') \\\n else get(\"/ptxprintlibpath\")+\"/Verse number star.pdf\"),\n \"fancy/versedecoratorshift\": (\"s_verseDecoratorShift\", lambda w,v: float(v or \"0\")),\n \"fancy/versedecoratorscale\": (\"s_verseDecoratorScale\", lambda w,v: int(float(v or \"1.0\")*1000)),\n \"fancy/endayah\": (\"c_decorator_endayah\", lambda w,v: \"\" if v else \"%\"), # In the UI this is \"Move Ayah\"\n\n \"paragraph/linespacing\": (\"s_linespacing\", lambda w,v: f2s(float(v), dp=8) if v else \"15\"),\n \"paragraph/linespacebase\": (\"c_AdvCompatLineSpacing\", lambda w,v: 14 if v else 12),\n \"paragraph/useglyphmetrics\": (\"c_AdvCompatGlyphMetrics\", lambda w,v: \"%\" if v else \"\"),\n \"paragraph/ifjustify\": (\"c_justify\", lambda w,v: \"true\" if v else \"false\"),\n \"paragraph/ifhyphenate\": (\"c_hyphenate\", lambda w,v: \"\" if v else \"%\"),\n \"paragraph/ifomithyphen\": (\"c_omitHyphen\", lambda w,v: \"\" if v else \"%\"),\n \"paragraph/ifnothyphenate\": (\"c_hyphenate\", lambda w,v: \"%\" if v else \"\"),\n \"paragraph/ifusefallback\": (\"c_useFallbackFont\", None),\n \"paragraph/missingchars\": (\"t_missingChars\", lambda w,v: v or \"\"),\n\n \"document/sensitive\": (\"c_sensitive\", None),\n \"document/title\": (None, lambda w,v: \"[Unknown]\" if w.get(\"c_sensitive\") else w.ptsettings.get('FullName', \"[Unknown]\")),\n \"document/subject\": (\"ecb_booklist\", lambda w,v: v if w.get(\"r_book\") == \"multiple\" else w.get(\"ecb_book\")),\n \"document/author\": (None, lambda w,v: \"\" if w.get(\"c_sensitive\") else w.ptsettings.get('Copyright', \"\")),\n\n \"document/startpagenum\": (\"s_startPageNum\", lambda w,v: int(float(v)) if v else \"1\"),\n \"document/multibook\": (\"r_book_multiple\", lambda w,v: \"\" if v else \"%\"),\n \"document/toc\": (\"c_autoToC\", lambda w,v: \"\" if v else \"%\"),\n \"document/toctitle\": (\"t_tocTitle\", lambda w,v: v or \"\"),\n \"document/usetoc1\": (\"c_usetoc1\", lambda w,v: \"true\" if v else \"false\"),\n \"document/usetoc2\": (\"c_usetoc2\", lambda w,v: \"true\" if v else \"false\"),\n \"document/usetoc3\": (\"c_usetoc3\", lambda w,v: \"true\" if v else \"false\"),\n \"document/tocleaders\": (\"fcb_leaderStyle\", None),\n \"document/chapfrom\": (\"s_chapfrom\", lambda w,v: int(float(v)) if v else \"1\"),\n \"document/chapto\": (\"s_chapto\", lambda w,v: int(float(v)) if v else \"999\"),\n \"document/colgutterfactor\": (\"s_colgutterfactor\", lambda w,v: round(float(v or 4)*3)), # Hack to be fixed\n \"document/ifrtl\": (\"fcb_textDirection\", lambda w,v:\"true\" if v == \"rtl\" else \"false\"),\n \"document/toptobottom\": (\"fcb_textDirection\", lambda w,v: \"\" if v == \"ttb\" else \"%\"),\n \"document/iflinebreakon\": (\"c_linebreakon\", lambda w,v: \"\" if v else \"%\"),\n \"document/linebreaklocale\": (\"t_linebreaklocale\", lambda w,v: v or \"\"),\n \"document/script\": (\"fcb_script\", lambda w,v: \":script=\"+v.lower() if v and v != \"Zyyy\" else \"\"),\n \"document/ch1pagebreak\": (\"c_ch1pagebreak\", None),\n \"document/marginalverses\": (\"c_marginalverses\", lambda w,v: \"\" if v else \"%\"),\n \"document/columnshift\": (\"s_columnShift\", lambda w,v: v or \"16\"),\n \"document/ifshowchapternums\": (\"c_chapterNumber\", lambda w,v: \"%\" if v else \"\"),\n \"document/showxtrachapnums\": (\"c_showNonScriptureChapters\", None),\n \"document/ifshow1chbooknum\": (\"c_show1chBookNum\", None),\n \"document/ifomitverseone\": (\"c_omitverseone\", lambda w,v: \"true\" if v else \"false\"),\n \"document/ifshowversenums\": (\"c_verseNumbers\", lambda w,v: \"%\" if v else \"\"),\n \"document/ifmainbodytext\": (\"c_mainBodyText\", None),\n \"document/glueredupwords\": (\"c_glueredupwords\", None),\n \"document/ifinclfigs\": (\"c_includeillustrations\", lambda w,v: \"true\" if v else \"false\"),\n \"document/ifusepiclist\": (\"c_includeillustrations\", lambda w,v :\"\" if v else \"%\"),\n \"document/iffigexclwebapp\": (\"c_figexclwebapp\", None),\n \"document/iffigskipmissing\": (\"c_skipmissingimages\", None),\n \"document/iffigcrop\": (\"c_cropborders\", None),\n \"document/iffigplaceholders\": (\"c_figplaceholders\", lambda w,v: \"true\" if v else \"false\"),\n \"document/iffigshowcaptions\": (\"c_fighidecaptions\", lambda w,v: \"false\" if v else \"true\"),\n \"document/iffighiderefs\": (\"c_fighiderefs\", None),\n \"document/picresolution\": (\"r_pictureRes\", None),\n \"document/customfiglocn\": (\"c_useCustomFolder\", lambda w,v :\"\" if v else \"%\"),\n \"document/exclusivefolder\": (\"c_exclusiveFiguresFolder\", None),\n \"document/customfigfolder\": (\"btn_selectFigureFolder\", lambda w,v: w.customFigFolder.as_posix() \\\n if w.customFigFolder is not None else \"\"),\n \"document/imagetypepref\": (\"t_imageTypeOrder\", None),\n \"document/glossarymarkupstyle\": (\"fcb_glossaryMarkupStyle\", None),\n \"document/filterglossary\": (\"c_filterGlossary\", None),\n \"document/hangpoetry\": (\"c_hangpoetry\", lambda w,v: \"\" if v else \"%\"),\n \"document/preventorphans\": (\"c_preventorphans\", None),\n \"document/preventwidows\": (\"c_preventwidows\", None),\n \"document/sectionheads\": (\"c_sectionHeads\", None),\n \"document/parallelrefs\": (\"c_parallelRefs\", None),\n \"document/bookintro\": (\"c_bookIntro\", None),\n \"document/introoutline\": (\"c_introOutline\", None),\n \"document/indentunit\": (\"s_indentUnit\", lambda w,v: round(float(v or \"1.0\"), 1)),\n \"document/firstparaindent\": (\"c_firstParaIndent\", lambda w,v: \"true\" if v else \"false\"),\n \"document/ifhidehboxerrors\": (\"c_showHboxErrorBars\", lambda w,v :\"%\" if v else \"\"),\n \"document/hidemptyverses\": (\"c_hideEmptyVerses\", None),\n \"document/elipsizemptyvs\": (\"c_elipsizeMissingVerses\", None),\n \"document/ifspacing\": (\"c_spacing\", lambda w,v :\"\" if v else \"%\"),\n \"document/spacestretch\": (\"s_maxSpace\", lambda w,v : str((int(float(v or 150)) - 100) / 100.)),\n \"document/spaceshrink\": (\"s_minSpace\", lambda w,v : str((100 - int(float(v or 66))) / 100.)),\n \"document/ifletter\": (\"c_letterSpacing\", lambda w,v: \"\" if v else \"%\"),\n \"document/letterstretch\": (\"s_letterStretch\", lambda w,v: float(v or \"5.0\") / 100.),\n \"document/lettershrink\": (\"s_letterShrink\", lambda w,v: float(v or \"1.0\") / 100.),\n \"document/ifcolorfonts\": (\"c_colorfonts\", lambda w,v: \"%\" if v else \"\"),\n\n \"document/ifchaplabels\": (\"c_useChapterLabel\", lambda w,v: \"%\" if v else \"\"),\n \"document/clabelbooks\": (\"t_clBookList\", lambda w,v: v.upper() if v else \"\"),\n \"document/clabel\": (\"t_clHeading\", None),\n \"document/diffcolayout\": (\"c_differentColLayout\", None),\n \"document/diffcolayoutbooks\": (\"t_differentColBookList\", None),\n \"document/cloptimizepoetry\": (\"c_optimizePoetryLayout\", None),\n\n \"document/ifdiglot\": (\"c_diglot\", lambda w,v : \"\" if v else \"%\"),\n \"document/diglotprifraction\": (\"s_diglotPriFraction\", lambda w,v : round((float(v)/100), 3) if v is not None else \"0.550\"),\n \"document/diglotsecfraction\": (\"s_diglotPriFraction\", lambda w,v : round(1 - (float(v)/100), 3) if v is not None else \"0.450\"),\n \"document/diglotsecprj\": (\"fcb_diglotSecProject\", None),\n \"document/diglotpicsources\": (\"fcb_diglotPicListSources\", None),\n \"document/diglot2captions\": (\"c_diglot2captions\", None),\n \"document/diglotswapside\": (\"c_diglotSwapSide\", lambda w,v: \"true\" if v else \"false\"),\n \"document/diglotsepnotes\": (\"c_diglotSeparateNotes\", lambda w,v: \"true\" if v else \"false\"),\n \"document/diglotsecconfig\": (\"ecb_diglotSecConfig\", None),\n \"document/diglotmergemode\": (\"c_diglotMerge\", lambda w,v: \"simple\" if v else \"doc\"),\n \"document/diglotadjcenter\": (\"c_diglotAdjCenter\", None),\n \"document/diglotnotesrule\": (\"c_diglotNotesRule\", lambda w,v: \"true\" if v else \"false\"),\n \"document/diglotjoinvrule\": (\"c_diglotJoinVrule\", lambda w,v: \"true\" if v else \"false\"),\n\n \"document/hasnofront_\": (\"c_frontmatter\", lambda w,v: \"%\" if v else \"\"),\n \"document/noblankpage\": (\"c_periphSuppressPage\", None),\n\n \"header/ifshowbook\": (\"c_rangeShowBook\", lambda w,v :\"false\" if v else \"true\"),\n \"header/ifshowchapter\": (\"c_rangeShowChapter\", lambda w,v :\"false\" if v else \"true\"),\n \"header/ifshowverse\": (\"c_rangeShowVerse\", lambda w,v :\"true\" if v else \"false\"),\n # \"header/chvseparator\": (\"c_sepColon\", lambda w,v : \":\" if v else \".\"),\n \"header/chvseparator\": (\"r_CVsep\", lambda w,v : \":\" if v == \"colon\" else \".\"),\n \"header/ifrhrule\": (\"c_rhrule\", lambda w,v: \"\" if v else \"%\"),\n \"header/hdrleftside\": (\"r_hdrLeft\", None),\n \"header/hdrleft\": (\"ecb_hdrleft\", lambda w,v: v or \"-empty-\"),\n \"header/hdrcenterside\": (\"r_hdrCenter\", None),\n \"header/hdrcenter\": (\"ecb_hdrcenter\", lambda w,v: v or \"-empty-\"),\n \"header/hdrrightside\": (\"r_hdrRight\", None),\n \"header/hdrright\": (\"ecb_hdrright\", lambda w,v: v or \"-empty-\"),\n \"header/mirrorlayout\": (\"c_mirrorpages\", lambda w,v: \"true\" if v else \"false\"),\n \n \"footer/ftrcenterside\": (\"r_ftrCenter\", None),\n \"footer/ftrcenter\": (\"ecb_ftrcenter\", lambda w,v: v or \"-empty-\"),\n \"footer/ifftrtitlepagenum\": (\"c_pageNumTitlePage\", lambda w,v: \"\" if v else \"%\"),\n \"footer/ifprintconfigname\": (\"c_printConfigName\", lambda w,v: \"\" if v else \"%\"),\n # \"footer/noinkinfooter\": (\"c_noInkFooter\", None),\n\n \"notes/includefootnotes\": (\"c_includeFootnotes\", lambda w,v: \"%\" if v else \"\"),\n \"notes/fneachnewline\": (\"c_fneachnewline\", lambda w,v: \"%\" if v else \"\"),\n \"notes/fnoverride\": (\"c_fnOverride\", None),\n \"notes/iffnautocallers\": (\"c_fnautocallers\", lambda w,v :\"true\" if v else \"false\"),\n \"notes/fncallers\": (\"t_fncallers\", lambda w,v: v if w.get(\"c_fnautocallers\") else \"\"),\n \"notes/fnresetcallers\": (\"c_fnpageresetcallers\", lambda w,v: \"\" if v else \"%\"),\n \"notes/fnomitcaller\": (\"c_fnomitcaller\", lambda w,v: \"%\" if v else \"\"),\n\n \"notes/includexrefs\": (\"c_includeXrefs\", lambda w,v: \"%\" if v else \"\"),\n \"notes/xreachnewline\": (\"c_xreachnewline\", lambda w,v: \"%\" if v else \"\"),\n \"notes/xroverride\": (\"c_xrOverride\", None),\n \"notes/ifxrautocallers\": (\"c_xrautocallers\", lambda w,v :\"true\" if v else \"false\"),\n \"notes/xrcallers\": (\"t_xrcallers\", lambda w,v: v if w.get(\"c_xrautocallers\") else \"\"),\n \"notes/xrresetcallers\": (\"c_xrpageresetcallers\", lambda w,v: \"\" if v else \"%\"),\n \"notes/xromitcaller\": (\"c_xromitcaller\", lambda w,v: \"%\" if v else \"\"),\n\n \"notes/xrlocation\": (\"r_xrpos\", lambda w,v: r\"\" if v == \"centre\" else \"%\"),\n \"notes/xrpos\": (\"r_xrpos\", None),\n \"notes/xrcolside\": (\"fcb_colXRside\", None),\n \"notes/xrcentrecolwidth\": (\"s_centreColWidth\", lambda w,v: int(float(v)) if v else \"60\"),\n \"notes/xrguttermargin\": (\"s_xrGutterWidth\", lambda w,v: \"{:.1f}\".format(float(v)) if v else \"2.0\"),\n \"notes/xrcolrule\": (\"c_xrColumnRule\", lambda w,v: \"true\" if v else \"false\"),\n \"notes/xrcolbottom\": (\"c_xrColumnBottom\", lambda w,v: \"true\" if v else \"false\"),\n \"notes/ifxrexternalist\": (\"c_useXrefList\", lambda w,v: \"%\" if v else \"\"),\n \"notes/xrlistsource\": (\"r_xrSource\", None),\n \"notes/xrlistsize\": (\"s_xrSourceSize\", lambda w,v: int(float(v)) if v else \"3\"),\n \"notes/xrfilterbooks\": (\"fcb_filterXrefs\", None),\n # \"notes/xrlocalstrongs\": (\"c_strongsLocal\", None), # now added to strongsndx section below\n \"notes/addcolon\": (\"c_addColon\", None),\n \"notes/keepbookwithrefs\": (\"c_keepBookWithRefs\", None),\n \"notes/glossaryfootnotes\": (\"c_glossaryFootnotes\", None),\n \"notes/fnpos\": (\"r_fnpos\", None),\n \"notes/columnnotes_\": (\"r_fnpos\", lambda w,v: \"true\" if v == \"column\" else \"false\"),\n \"notes/endnotes_\": (\"r_fnpos\", lambda w,v: \"\" if v == \"endnotes\" else \"%\"),\n\n \"notes/iffootnoterule\": (\"c_footnoterule\", lambda w,v: \"%\" if v else \"\"),\n \"notes/ifxrefrule\": (\"c_xrefrule\", lambda w,v: \"%\" if v else \"\"),\n\n \"notes/abovenotespace\": (\"s_fnAboveSpace\", None),\n \"notes/belownoterulespace\": (\"s_fnBelowSpace\", None),\n \"notes/fnruleposn\": (\"fcb_fnHorizPosn\", None),\n \"notes/fnruleindent\": (\"s_fnIndent\", None),\n \"notes/fnrulelength\": (\"s_fnLength\", None),\n \"notes/fnrulethick\": (\"s_fnThick\", None),\n \n \"notes/abovexrefspace\": (\"s_xrAboveSpace\", None),\n \"notes/belowxrefrulespace\": (\"s_xrBelowSpace\", None),\n \"notes/xrruleposn\": (\"fcb_xrHorizPosn\", None),\n \"notes/xrruleindent\": (\"s_xrIndent\", None),\n \"notes/xrrulelength\": (\"s_xrLength\", None),\n \"notes/xrrulethick\": (\"s_xrThick\", None),\n \n \"notes/internotespace\": (\"s_internote\", lambda w,v: f2s(float(v or 3))),\n\n \"notes/horiznotespacemin\": (\"s_notespacingmin\", lambda w,v: f2s(float(v)) if v is not None else \"7\"),\n \"notes/horiznotespacemax\": (\"s_notespacingmax\", lambda w,v: f2s(float(v)) if v is not None else \"27\"),\n\n \"document/fontregular\": (\"bl_fontR\", lambda w,v,s: v.asTeXFont(s.inArchive) if v else \"\"),\n \"document/fontbold\": (\"bl_fontB\", lambda w,v,s: v.asTeXFont(s.inArchive) if v else \"\"),\n \"document/fontitalic\": (\"bl_fontI\", lambda w,v,s: v.asTeXFont(s.inArchive) if v else \"\"),\n \"document/fontbolditalic\": (\"bl_fontBI\", lambda w,v,s: v.asTeXFont(s.inArchive) if v else \"\"),\n \"document/fontextraregular\":(\"bl_fontExtraR\", lambda w,v,s: v.asTeXFont(s.inArchive) if v else \"\"),\n \"snippets/fancyintro\": (\"c_prettyIntroOutline\", None),\n \"snippets/pdfoutput\": (\"fcb_outputFormat\", None),\n \"snippets/diglot\": (\"c_diglot\", lambda w,v: True if v else False),\n \"snippets/fancyborders\": (\"c_borders\", None),\n \"document/includeimg\": (\"c_includeillustrations\", None),\n \"thumbtabs/ifthumbtabs\": (\"c_thumbtabs\", None),\n \"thumbtabs/numtabs\": (\"s_thumbtabs\", None),\n \"thumbtabs/length\": (\"s_thumblength\", None),\n \"thumbtabs/height\": (\"s_thumbheight\", None),\n \"thumbtabs/background\": (\"col_thumbback\", None),\n \"thumbtabs/rotate\": (\"c_thumbrotate\", None),\n \"thumbtabs/rotatetype\": (\"fcb_rotateTabs\", None),\n \"thumbtabs/thumbiszthumb\": (\"c_thumbIsZthumb\", None),\n \"thumbtabs/restart\": (\"c_thumbrestart\", None),\n \"thumbtabs/groups\": (\"t_thumbgroups\", None),\n\n \"scripts/mymr/syllables\": (\"c_scrmymrSyllable\", None),\n\n \"strongsndx/includenames\": (\"c_strongsInclNames\", None),\n \"strongsndx/localterms\": (\"c_strongsLocal\", None),\n \"strongsndx/showhebrew\": (\"c_strongsHeb\", None),\n \"strongsndx/showgreek\": (\"c_strongsGrk\", None),\n \"strongsndx/showindex\": (\"c_strongsNdx\", None),\n \"strongsndx/sourcelang\": (\"c_strongsSrcLg\", None),\n \"strongsndx/transliterate\": (\"c_strongsTranslit\", None),\n \"strongsndx/renderings\": (\"c_strongsRenderings\", None),\n \"strongsndx/definitions\": (\"c_strongsDefn\", None),\n \"strongsndx/keyvrsrefs\": (\"c_strongsKeyVref\", None),\n \"strongsndx/fallbackprj\": (\"fcb_strongsFallbackProj\", None),\n \"strongsndx/majorlang\": (\"fcb_strongsMajorLg\", None),\n \"strongsndx/nocomments\": (\"c_strongsNoComments\", None),\n \"strongsndx/wildcards\": (\"fcb_strongswildcards\", None),\n \"strongsndx/raglines\": (\"s_strongRag\", None),\n \"strongsndx/ndxbookid\": (\"fcb_strongsNdxBookId\", None),\n \"strongsndx/twocols\": (\"c_strongs2cols\", None),\n \"strongsndx/openineditor\": (\"c_strongsOpenIndex\", None),\n}\n\nBorders = {'c_inclPageBorder': ('pageborder', 'fancy/pageborderpdf', 'A5 page border.pdf'),\n 'c_inclSectionHeader': ('sectionheader', 'fancy/sectionheaderpdf', 'A5 section head border.pdf'),\n 'c_inclEndOfBook': ('endofbook', 'fancy/endofbookpdf', 'decoration.pdf'),\n 'c_inclVerseDecorator': ('versedecorator', 'fancy/versedecoratorpdf', 'Verse number star.pdf'),\n 'c_inclFrontMatter': ('FrontPDFs', 'project/frontincludes', '\\\\includepdf{{{}}}'),\n 'c_inclBackMatter': ('BackPDFs', 'project/backincludes', '\\\\includepdf{{{}}}'),\n 'c_applyWatermark': ('watermarks', 'paper/watermarkpdf', r'\\def\\MergePDF{{\"{}\"}}')\n}\n\n\nclass TexModel:\n _peripheralBooks = [\"FRT\", \"INT\", \"GLO\", \"TDX\", \"NDX\", \"CNC\", \"OTH\", \"BAK\", \"XXA\", \"XXB\", \"XXC\", \"XXD\", \"XXE\", \"XXF\", \"XXG\"]\n _fonts = {\n \"fontregular\": (\"bl_fontR\", None, None, None, None),\n \"fontbold\": (\"bl_fontB\", None, \"c_fakebold\", \"fontbold/embolden\", \"fontbold/slant\"),\n \"fontitalic\": (\"bl_fontI\", None, \"c_fakeitalic\", \"fontitalic/embolden\", \"fontitalic/slant\"),\n \"fontbolditalic\": (\"bl_fontBI\", None, \"c_fakebolditalic\", \"fontbolditalic/embolden\", \"fontbolditalic/slant\"),\n \"fontextraregular\": (\"bl_fontExtraR\", \"c_useFallbackFont\", None, None, None),\n }\n _mirrorRL = {r'\\lastref': r'\\firstref',\n r'\\firstref': r'\\lastref'\n }\n _swapRL = {'left': 'right',\n 'center': 'center',\n 'right': 'left'\n }\n _glossarymarkup = {\n \"no\": r\"\\1\", # \"None\": \n None: r\"\\1\", # None: \n \"bd\": r\"\\\\bd \\1\\\\bd*\", # \"format as bold\": \n \"it\": r\"\\\\it \\1\\\\it*\", # \"format as italics\": \n \"bi\": r\"\\\\bdit \\1\\\\bdit*\", # \"format as bold italics\": \n \"em\": r\"\\\\em \\1\\\\em*\", # \"format with emphasis\": \n \"ww\": r\"\\\\w \\1\\\\w*\", # \"\\w ...\\w* char style\": \n # Note that these glossary markers can be styled with \\zglm \n # But this doesn't work if fallback font is turned on for these chars\n \"fb\": r\"\\\\zglm \\u2E24\\\\zglm*\\1\\\\zglm \\u2E25\\\\zglm*\", # \"with ⸤floor⸥ brackets\": \n \"fc\": r\"\\\\zglm \\u230a\\\\zglm*\\1\\\\zglm \\u230b\\\\zglm*\", # \"with ⌊floor⌋ characters\": \n \"cc\": r\"\\\\zglm \\u231e\\\\zglm*\\1\\\\zglm \\u231f\\\\zglm*\", # \"with ⌞corner⌟ characters\":\n \"sb\": r\"*\\1\", # \"star *before word\": \n \"sa\": r\"\\1*\", # \"star after* word\": \n \"cb\": r\"^\\1\", # \"circumflex ^before word\": \n \"ca\": r\"\\1^\" # \"circumflex after^ word\": \n }\n _snippets = {\n \"snippets/fancyintro\": (\"c_prettyIntroOutline\", None, FancyIntro),\n \"snippets/pdfoutput\": (\"fcb_outputFormat\", lambda x: True, PDFx1aOutput),\n \"snippets/diglot\": (\"c_diglot\", None, Diglot),\n \"snippets/fancyborders\": (\"c_borders\", None, FancyBorders),\n \"snippets/thumbtabs\": (\"c_thumbtabs\", None, ThumbTabs),\n \"snippets/colophon\": (\"c_colophon\", None, Colophon),\n \"snippets/grid\": (\"c_grid\", None, Grid)\n }\n _settingmappings = {\n \"notes/xrcallers\": \"crossrefs\",\n \"notes/fncallers\": \"footnotes\"\n }\n _crossRefInfo = None\n\n _periphids = {\n \"title page\": \"title\",\n \"half title page\": \"halftitle\",\n \"promotional page\": \"promo\",\n \"imprimatur\": \"imprimatur\",\n \"publication data\": \"pubdata\",\n \"foreword\": \"foreword\",\n \"preface\": \"preface\",\n \"table of contents\": \"contents\",\n \"alphabetical contents\": \"alphacontents\",\n \"table of abbreviations\": \"abbreviations\",\n \"bible introduction\": \"intbible\",\n \"old testament introduction\": \"intot\",\n \"pentateuch introduction\": \"intpent\",\n \"history introduction\": \"inthistory\",\n \"poetry introduction\": \"intpoetry\",\n \"prophecy introduction\": \"intprophesy\",\n \"deuterocanon introduction\": \"intdc\",\n \"new testament introduction\": \"intnt\",\n \"gospels introduction\": \"intgospels\",\n \"epistles introduction\": \"intepistles\",\n \"letters introduction\": \"intletters\",\n \"chronology\": \"chron\",\n \"weights and measures\": \"measures\",\n \"map index\": \"maps\",\n \"lxx quotes in nt\": \"lxxquotes\",\n \"cover\": \"cover\",\n \"spine\": \"spine\"\n }\n\n _tocleaders = [\n \"\",\n r\"\\hskip .5pt .\\hskip .5pt\",\n r\"\\hskip 3pt .\\hskip 3pt\",\n r\"\\hskip 6pt \\emdash\\hskip 3pt\",\n r\"\\hrule\"\n ]\n\n _specialchars = {\n '*': 'asterisk',\n '%': 'percent',\n '#': 'hash',\n '$': 'dollar',\n '&': 'ampersand',\n '^': 'circumflex'\n }\n # '|': 'pipe'\n\n def __init__(self, printer, path, ptsettings, prjid=None, inArchive=False):\n from ptxprint.view import VersionStr\n self.VersionStr = VersionStr\n self.printer = printer\n self.ptsettings = ptsettings\n self.inArchive = inArchive\n self.changes = None\n self.localChanges = None\n self.debug = False\n self.interlinear = None\n self.imageCopyrightLangs = {}\n self.frontperiphs = None\n self.xrefs = None\n libpath = pycodedir()\n self.dict = {\"/ptxpath\": str(path).replace(\"\\\\\",\"/\"),\n \"/ptxprintlibpath\": libpath.replace(\"\\\\\",\"/\"),\n \"/iccfpath\": os.path.join(libpath, \"default_cmyk.icc\").replace(\"\\\\\",\"/\"),\n \"/ptx2pdf\": self.printer.scriptsdir.replace(\"\\\\\", \"/\")}\n self.prjid = prjid\n if self.prjid is not None:\n self.dict['project/id'] = self.prjid\n self._hdrmappings = localhdrmappings()\n if self.printer is not None:\n self.sheets = Sheets(self.printer.getStyleSheets(generated=True))\n self.update()\n\n def docdir(self):\n base = os.path.join(self.dict[\"/ptxpath\"], self.dict[\"project/id\"])\n docdir = os.path.join(base, 'local', 'ptxprint', self.printer.configName())\n return docdir, base\n\n def update(self):\n \"\"\" Update model from UI \"\"\"\n j = os.path.join\n rel = lambda x, y:os.path.relpath(x, y).replace(\"\\\\\", \"/\")\n self.printer.setDate() # Update date/time to now\n cpath = self.printer.configPath(self.printer.configName())\n rcpath = self.printer.configPath(\"\")\n self.updatefields(ModelMap.keys())\n docdir, base = self.docdir()\n self.dict[\"document/directory\"] = \".\" # os.path.abspath(docdir).replace(\"\\\\\",\"/\")\n self.dict['project/adjlists'] = rel(j(cpath, \"AdjLists\"), docdir).replace(\"\\\\\",\"/\") + \"/\"\n self.dict['project/triggers'] = rel(j(cpath, \"triggers\"), docdir).replace(\"\\\\\",\"/\") + \"/\"\n self.dict['project/piclists'] = rel(j(self.printer.working_dir, \"tmpPicLists\"), docdir).replace(\"\\\\\",\"/\") + \"/\"\n self.dict['project/id'] = self.printer.prjid\n self.dict['config/name'] = self.printer.configId\n self.dict['/ptxrpath'] = rel(self.dict['/ptxpath'], docdir)\n self.dict['/cfgrpath'] = rel(cpath, docdir)\n self.processHdrFtr(self.printer)\n # sort out caseless figures folder. This is a hack\n for p in (\"Figures\", \"figures\"):\n picdir = j(base, p)\n if os.path.exists(picdir):\n break\n self.dict[\"project/picdir\"] = rel(picdir, docdir).replace(\"\\\\\",\"/\")\n # Look in local Config folder for ptxprint-mods.tex, and drop back to shared/ptxprint if not found\n fpath = j(cpath, \"ptxprint-mods.tex\")\n if not os.path.exists(fpath):\n fpath = j(rcpath, \"ptxprint-mods.tex\")\n self.dict['/modspath'] = rel(fpath, docdir).replace(\"\\\\\",\"/\")\n fpath = j(cpath, \"ptxprint-premods.tex\")\n if not os.path.exists(fpath):\n fpath = j(rcpath, \"ptxprint-premods.tex\")\n self.dict['/premodspath'] = rel(fpath, docdir).replace(\"\\\\\",\"/\")\n if \"document/diglotcfgrpath\" not in self.dict:\n self.dict[\"document/diglotcfgrpath\"] = \"\"\n self.dict['paragraph/linespacingfactor'] = f2s(float(self.dict['paragraph/linespacing']) \\\n / self.dict[\"paragraph/linespacebase\"] / float(self.dict['paper/fontfactor']), dp=8)\n self.dict['paragraph/ifhavehyphenate'] = \"\" if os.path.exists(os.path.join(self.printer.configPath(\"\"), \\\n \"hyphen-\"+self.dict[\"project/id\"]+\".tex\")) else \"%\"\n # forward cleanup. If ask for ptxprint-mods.tex but don't have it, copy PrintDraft-mods.tex\n if self.dict[\"project/ifusemodssty\"] == \"\":\n modspath = os.path.join(cpath, \"ptxprint-mods.sty\")\n if not os.path.exists(modspath):\n spath = os.path.join(docdir, \"PrintDraft-mods.sty\")\n if os.path.exists(spath):\n copyfile(spath, modspath)\n self.dict[\"paper/pagegutter\"] = \"{:.2f}mm\".format(Dimension(self.dict[\"paper/width\"]).asunits(\"mm\") \\\n - (self.dict[\"paper/gutter\"] if self.dict[\"paper/ifaddgutter\"] == \"true\" else 0.))\n if self.dict[\"project/interlinear\"] != \"%\":\n self.interlinear = Interlinear(self.dict[\"project/interlang\"],\n os.path.join(self.dict[\"/ptxpath\"], self.dict[\"project/id\"]))\n regfont = self.printer.get(\"bl_fontR\")\n if regfont is not None:\n self.dict[\"document/spacecntxtlztn\"] = \"2\" if regfont.isCtxtSpace else \"0\"\n self.calculateMargins()\n if self.inArchive:\n for b, a in Borders.items():\n if self.dict[a[1]] is None or not self.dict[a[1]]:\n continue\n islist = a[2].startswith(\"\\\\\")\n fname = getattr(self.printer, a[0], (None if islist else a[2]))\n if fname is None:\n fname = Path(\".\" if islist else a[2])\n if islist and not isinstance(fname, (list, tuple)):\n fname = [fname]\n if islist:\n self.dict[a[1]] = \"\\n\".join(a[2].format(\"../shared/ptxprint/{}\".format(f.name)) for f in fname)\n else:\n self.dict[a[1]] = \"../shared/ptxprint/{}\".format(fname.name)\n if self.dict[\"fancy/versedecorator\"] != \"%\":\n self.dict[\"fancy/versedecoratorisfile\"] = \"\" if self.dict[\"fancy/versedecoratortype\"] == \"file\" else \"%\"\n self.dict[\"fancy/versedecoratorisayah\"] = \"\" if self.dict[\"fancy/versedecoratortype\"] == \"ayah\" else \"%\"\n else:\n self.dict[\"fancy/versedecoratorisfile\"] = \"%\"\n self.dict[\"fancy/versedecoratorisayah\"] = \"%\"\n self.dict['notes/abovenotetotal'] = f2s(float(self.dict['notes/abovenotespace'] or 0)\n + float(self.dict['notes/belownoterulespace'] or 0))\n # print(\", \".join(\"{}={}\".format(a, self.dict[\"fancy/versedecorator\"+a]) for a in (\"\", \"type\", \"isfile\", \"isayah\")))\n \n a = self.printer.get('fcb_gridOffset')\n if a == 'margin':\n vals = (self.dict[\"paper/margins\"], self.dict[\"paper/topmargin\"])\n else:\n vals = (\"0.0\", \"0.0\")\n (self.dict[\"grid/xoffset_\"], self.dict[\"grid/yoffset_\"]) = vals\n for a in ('project/frontfile', 'project/ptxprintstyfile_', 'diglot/ptxprintstyfile_'):\n if a not in self.dict:\n self.dict[a] = ''\n\n if self.dict.get('document/tocleaders', None) is None:\n self.dict['document/tocleaders'] = 0\n self.dict['document/iftocleaders'] = '' if int(self.dict['document/tocleaders'] or 0) > 0 else '%'\n self.dict['document/tocleaderstyle'] = self._tocleaders[int(self.dict['document/tocleaders'] or 0)]\n self.calcRuleParameters()\n\n def updatefields(self, a):\n global get\n def get(k): return self[k]\n for k in a:\n v = ModelMap[k]\n val = self.printer.get(v[0], skipmissing=k.startswith(\"scripts/\")) if v[0] is not None else None\n if v[1] is None:\n self.dict[k] = val\n else:\n try:\n sig = signature(v[1])\n if len(sig.parameters) == 2:\n self.dict[k] = v[1](self.printer, val)\n else:\n self.dict[k] = v[1](self.printer, val, self)\n except Exception as e:\n raise type(e)(\"In TeXModel with key {}, \".format(k) + str(e))\n\n def calcRuleParameters(self):\n notemap = {'fn': 'note', 'xr': 'xref'}\n fnrule = None\n enrule = None\n endnotes = []\n for a in ('fn', 'xr'):\n if self.dict['notes/{}pos'.format(a)] == 'endnote':\n enrule = a if enrule is None else enrule\n endnotes.append(r\"\\NoteAtEnd{{{}}}\".format(a[0]))\n elif fnrule is None:\n fnrule = a\n for a in (('Foot', fnrule), ('End', enrule)):\n dat = []\n if a[1] is not None:\n pos = int(self.dict['notes/{}ruleposn'.format(a[1])] or 0)\n left = \"\\hskip {:.2f} mm\".format(float(self.dict['notes/{}ruleindent'.format(a[1])] or 0.))\n right = r\"\\hss\"\n if pos == 2 or pos == 4: # Right or Outer\n right, left = (left, right)\n elif pos == 5:\n left = r\"\\hss\"\n if pos < 3 or pos == 5: # Left, Right or Centre\n dat.append(r\"\\def\\{}NoteRuleLeftIndent{{{}}}\".format(a[0], left))\n dat.append(r\"\\def\\{}NoteRuleRightIndent{{{}}}\".format(a[0], right))\n else:\n dat.append(r\"\\def\\{}NoteRuleLeftIndent{{\\ifodd\\pageno {}\\else {}\\fi}}\".format(a[0], left, right))\n dat.append(r\"\\def\\{}NoteRuleRightIndent{{\\ifodd\\pageno {}\\else {}\\fi}}\".format(a[0], right, left))\n dat.append(r\"\\def\\{}NoteRuleThickness{{{} pt}}\".format(a[0], self.dict['notes/{}rulethick'.format(a[1])] or \"0.4\"))\n dat.append(r\"\\def\\{}NoteRuleWidth{{{:.2f}}}\".format(a[0], float(self.dict['notes/{}rulelength'.format(a[1])] or 100.)/100))\n bspace = float(self.dict['notes/below{}rulespace'.format(notemap[a[1]])] or 0.)\n dat.append(r\"\\def\\Below{}NoteRuleSpace{{{:.1f} pt}}\".format(a[0], bspace))\n aspace = float(self.dict['notes/above{}space'.format(notemap[a[1]])] or 0.) + bspace\n dat.append(r\"\\Above{}NoteSpace={:.1f} pt\".format(a[0] if a[0] != \"Foot\" else \"\", aspace))\n self.dict['noterules/{}'.format(a[0].lower())] = \"\\n\".join(dat)\n self.dict['noterules/endnotemarkers'] = \"\\n\".join(endnotes)\n\n def __getitem__(self, key):\n return self.dict[key]\n\n def __setitem__(self, key, value):\n self.dict[key] = value\n\n def asBool(self, key, true=None, false=None):\n val = self.dict.get(key, None)\n if val is None:\n return False\n elif true is not None:\n return val == true\n elif false is not None:\n return val != false\n elif isinstance(val, bool):\n return val\n elif val == \"%\" or val == \"false\":\n return False\n else:\n return True\n\n def prePrintChecks(self):\n reasons = []\n for a in ('regular', 'bold', 'italic', 'bolditalic'):\n # print(\"Checking {}: {}\".format(a, self.dict['document/font{}'.format(a)]))\n if not self.dict['document/font{}'.format(a)]:\n reasons.append(_(\"Missing font ({})\").format(a))\n break\n return reasons\n\n def processHdrFtr(self, printer):\n \"\"\" Update model headers from model UI read values \"\"\"\n diglot = True if self.dict[\"document/ifdiglot\"] == \"\" else False\n v = self.dict[\"footer/ftrcenter\"]\n pri = self.dict[\"footer/ftrcenterside\"] == \"Pri\" \n t = self._hdrmappings.get(v, v)\n if diglot:\n t = self._addLR(t, pri)\n swap = self.dict['document/diglotswapside'] == 'true'\n ratio = float(self.dict['document/diglotprifraction'])\n # print(f\"{ratio=}\")\n if ratio > 0.5:\n lhfil = \"\\\\hskip 0pt plus {}fil\".format(f2s(ratio/(1-ratio)-1))\n rhfil = \"\"\n else:\n rhfil = \"\\\\hskip 0pt plus {}fil\".format(f2s((1-ratio)/ratio-1))\n lhfil = \"\"\n self.dict['footer/oddcenter'] = t\n self.dict['footer/evencenter'] = t\n if self.dict['footer/ifftrtitlepagenum'] == \"\":\n self.dict['footer/titleevencenter'] = self.dict['footer/titleoddcenter'] = self._addLR('\\\\pagenumber', pri)\n elif self.dict['footer/ifprintconfigname'] == \"\":\n self.dict['footer/titleevencenter'] = self.dict['footer/titleoddcenter'] = self.dict['config/name']\n else:\n self.dict['footer/titleevencenter'] = self.dict['footer/evencenter']\n self.dict['footer/titleoddcenter'] = self.dict['footer/oddcenter']\n\n mirror = self.asBool(\"header/mirrorlayout\")\n for side in ('left', 'center', 'right'):\n v = self.dict[\"header/hdr\"+side]\n pri = self.dict[\"header/hdr\"+side+\"side\"] == \"Pri\"\n t = self._hdrmappings.get(v, v)\n if diglot:\n t = self._addLR(t, pri)\n self.dict['header/odd{}'.format(side)] = t\n if mirror:\n self.dict['header/even{}'.format(self._swapRL[side])] = self.mirrorHeaders(t, diglot)\n else:\n self.dict['header/even{}'.format(side)] = t\n\n if t.startswith((r'\\first', r'\\last', r'\\range')): # ensure noVodd + noVeven is \\empty\n self.dict['header/noVodd{}'.format(side)] = r'\\empty'\n else:\n self.dict['header/noVodd{}'.format(side)] = t # copy the other header as is\n if mirror:\n if t.startswith((r'\\first', r'\\last', r'\\range')):\n self.dict['header/noVeven{}'.format(self._swapRL[side])] = r'\\empty'\n else:\n self.dict['header/noVeven{}'.format(self._swapRL[side])] = self.mirrorHeaders(t, diglot)\n else:\n if t.startswith((r'\\first', r'\\last', r'\\range')): # ensure noVodd + noVeven is \\empty\n self.dict['header/noVeven{}'.format(side)] = r'\\empty'\n else:\n self.dict['header/noVeven{}'.format(side)] = t \n\n if side == \"center\" and diglot and self.dict[\"document/diglotadjcenter\"]:\n for a in ('header/odd', 'header/even', 'footer/odd', 'footer/even',\n 'footer/titleeven', 'footer/titleodd', 'header/noVeven', 'header/noVodd'):\n b = (a+\"{}\").format(side)\n if 'even' in a:\n self.dict[b] = (rhfil if mirror ^ swap else lhfil) \\\n + self.dict[b] + (lhfil if mirror ^ swap else rhfil)\n else:\n self.dict[b] = (rhfil if swap else lhfil) + self.dict[b] + (lhfil if swap else rhfil)\n\n def _addLR(self, t, pri):\n if t in [r\"\\firstref\", r\"\\lastref\", r\"\\rangeref\", r\"\\pagenumber\", r\"\\hrsmins\", r\"\\isodate\" \\\n r\"\\book\", r\"\\bookalt\"]: \n if pri:\n t = t+'L'\n else:\n t = t+'R'\n elif t == r\"\\empty\":\n pass\n else:\n if pri:\n t = \"\\headfootL{{{}}}\".format(t)\n else:\n t = \"\\headfootR{{{}}}\".format(t)\n return t\n\n def mirrorHeaders(self, h, dig=False):\n if dig and h.endswith((\"L\", \"R\")):\n try:\n return self._mirrorRL[h[:-1]]+h[-1:]\n except KeyError:\n return h\n else:\n try:\n return self._mirrorRL[h]\n except KeyError:\n return h\n\n def calculateMargins(self):\n (marginmms, topmarginmms, bottommarginmms, headerposmms, footerposmms,\n ruleposmms, headerlabel, footerlabel) = self.printer.getMargins()\n self.dict[\"paper/topmarginfactor\"] = f2s(topmarginmms / marginmms)\n self.dict[\"paper/bottommarginfactor\"] = f2s(bottommarginmms / marginmms)\n self.dict[\"paper/headerposition\"] = f2s(headerposmms / marginmms)\n self.dict[\"paper/footerposition\"] = f2s(footerposmms / marginmms)\n self.dict[\"paper/ruleposition\"] = f2s(ruleposmms * 72.27 / 25.4)\n \n def texfix(self, path):\n return path.replace(\" \", r\"\\ \")\n\n def asTex(self, template=\"template.tex\", filedir=\".\", jobname=\"Unknown\", extra=\"\"):\n for k, v in self._settingmappings.items():\n if self.dict[k] == \"\":\n self.dict[k] = self.ptsettings.dict.get(v, \"a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z\")\n res = []\n resetPageDone = False\n docdir, docbase = self.docdir()\n self.dict['jobname'] = jobname\n self.dict['document/imageCopyrights'] = self.generateImageCopyrightText()\n # if self.dict['document/includeimg'] else self.generateEmptyImageCopyrights()\n self.dict['project/colophontext'] = re.sub(r'://', r':/ / ', self.dict['project/colophontext'])\n self.dict['project/colophontext'] = re.sub(r\"(?i)(\\\\zimagecopyrights)([A-Z]{2,3})\", \\\n lambda m:m.group(0).lower(), self.dict['project/colophontext'])\n with universalopen(os.path.join(pycodedir(), template)) as inf:\n for l in inf.readlines():\n if l.startswith(r\"%\\ptxfile\"):\n res.append(r\"\\PtxFilePath={\"+os.path.relpath(filedir, docdir).replace(\"\\\\\",\"/\")+\"/}\")\n for i, f in enumerate(self.dict['project/bookids']):\n fname = self.dict['project/books'][i]\n if extra != \"\":\n fname = re.sub(r\"^([^.]*).(.*)$\", r\"\\1\"+extra+r\".\\2\", fname)\n if i == len(self.dict['project/bookids']) - 1 and self.dict['project/ifcolophon'] == \"\":\n res.append(r\"\\lastptxfiletrue\")\n if self.dict['project/pgbreakcolophon'] != '%':\n res.append(r\"\\endbooknoejecttrue\")\n if not resetPageDone and f not in self._peripheralBooks: \n if not self.dict['document/noblankpage']:\n res.append(r\"\\ifodd\\pageno\\else\\emptyoutput \\fi\")\n res.append(r\"\\pageno={}\".format(self.dict['document/startpagenum']))\n resetPageDone = True\n if not self.asBool('document/ifshow1chbooknum') and \\\n self.asBool('document/ifshowchapternums', '%') and \\\n f in oneChbooks:\n res.append(r\"\\OmitChapterNumbertrue\")\n res.append(r\"\\ptxfile{{{}}}\".format(fname))\n res.append(r\"\\OmitChapterNumberfalse\")\n elif self.dict['document/diffcolayout'] and \\\n f in self.dict['document/diffcolayoutbooks']:\n cols = self.dict['paper/columns']\n res.append(r\"\\BodyColumns={}\".format('2' if cols == '1' else '1'))\n res.append(r\"\\ptxfile{{{}}}\".format(fname))\n res.append(r\"\\BodyColumns={}\".format(cols))\n else:\n res.append(r\"\\ptxfile{{{}}}\".format(fname))\n elif l.startswith(r\"%\\extrafont\") and self.dict[\"document/fontextraregular\"]:\n spclChars = re.sub(r\"\\[uU]([0-9a-fA-F]{4,6})\", lambda m:chr(int(m.group(1), 16)),\n self.dict[\"paragraph/missingchars\"])\n # print(spclChars.split(' '), [len(x) for x in spclChars.split(' ')])\n if self.dict[\"paragraph/ifusefallback\"] and len(spclChars):\n badlist = \"\\u2018\\u2019\\u201c\\u201d*#%\"\n specials = spclChars.replace(\" \", \"\").encode(\"raw_unicode_escape\").decode(\"raw_unicode_escape\")\n a = [\"\".join(chr(ord(c) + 16 if ord(c) < 58 else ord(c) - 23)\n for c in str(hex(ord(x)))[2:]).lower() for x in specials]\n b = [\"\".join((c) for c in str(hex(ord(x)))[2:]).lower() for x in specials]\n c = [x for x in zip(a,b) if chr(int(x[1],16)) not in badlist]\n if not len(c):\n continue\n res.append(r\"% for defname @active+ @+digit => 0->@, 1->a ... 9->i A->j B->k .. F->o\")\n res.append(r\"% 12 (size) comes from \\p size\")\n res.append(r'\\def\\extraregular{{\"{}\"}}'.format(self.dict[\"document/fontextraregular\"]))\n res.append(r\"\\catcode`\\@=11\")\n res.append(r\"\\def\\do@xtrafont{\\x@\\s@textrafont\\ifx\\thisch@rstyle\\undefined\\m@rker\\else\\thisch@rstyle\\fi}\")\n for a,b in c:\n res.append(r\"\\def\\@ctive{}{{\\leavevmode{{\\do@xtrafont {}{}}}}}\".format(a, '^'*len(b), b))\n res.append(r\"\\DefineActiveChar{{{}{}}}{{\\@ctive{}}}\".format( '^'*len(b), b, a))\n res.append(r\"\\@ctivate\")\n res.append(r\"\\catcode`\\@=12\")\n else:\n res.append(r\"% No special/missing characters specified for fallback font\")\n elif l.startswith(r\"%\\horiznotespacing\"):\n mins = float(self.dict[\"notes/horiznotespacemin\"])\n maxs = float(self.dict[\"notes/horiznotespacemax\"])\n tgts = mins + ((maxs - mins) / 3)\n minus = tgts - mins\n plus = maxs - tgts\n res.append(r\"\\internoteskip={}pt plus {}pt minus {}pt\".format(f2s(tgts), f2s(plus), f2s(minus)))\n elif l.startswith(r\"%\\optimizepoetry\"):\n bks = self.dict[\"document/clabelbooks\"]\n if self.dict[\"document/ifchaplabels\"] == \"%\" and len(bks):\n for bk in bks.split(\" \"):\n if bk in self.dict['project/bookids']:\n res.append((r\"\\setbookhook{{start}}{{{}}}{{\\gdef\\BalanceThreshold{{3}}\\clubpenalty=50\"\n + r\"\\widowpenalty=50}}\").format(bk))\n res.append((r\"\\setbookhook{{end}}{{{}}}{{\\gdef\\BalanceThreshold{{0}}\\clubpenalty=10000\"\n + r\"\\widowpenalty=10000}}\").format(bk))\n elif l.startswith(r\"%\\snippets\"):\n for k, c in sorted(self._snippets.items(), key=lambda x: x[1][2].order):\n if c[1] is None:\n v = self.asBool(k)\n else:\n v = c[1](self.dict[k])\n if v:\n fn = getattr(c[2], 'generateTex', None)\n if fn is not None:\n res.append(fn(v, self))\n elif c[2].processTex:\n res.append(c[2].texCode.format(**self.dict))\n else:\n res.append(c[2].texCode)\n script = self.dict[\"document/script\"]\n if len(script) > 0:\n sclass = getattr(scriptsnippets, script[8:].lower(), None)\n if sclass is not None:\n res.append(sclass.tex(self))\n res.append(\"\"\"\n\\\\catcode\"FDEE=1 \\\\catcode\"FDEF=2\n\\\\prepusfm\n\\\\def\\\\zcopyright\\uFDEE{project/copyright}\\uFDEF\n\\\\def\\\\zlicense\\uFDEE{project/license}\\uFDEF\n\"\"\".format(**self.dict))\n if \"diglot/copyright\" in self.dict:\n res.append(\"\\\\def\\\\zcopyrightR\\uFDEE{}\\uFDEF\".format(self.dict[\"diglot/copyright\"]))\n res.append(\"\\\\unprepusfm\")\n elif l.startswith(r\"%\\defzvar\"):\n for k in self.printer.allvars():\n res.append(r\"\\defzvar{{{}}}{{{}}}\".format(k, self.printer.getvar(k)))\n for k, e in (('toctitle', 'document/toctitle'),):\n res.append(r\"\\defzvar{{{}}}{{{}}}\".format(k, self.dict[e]))\n else:\n res.append(l.rstrip().format(**self.dict))\n return \"\\n\".join(res).replace(\"\\\\OmitChapterNumberfalse\\n\\\\OmitChapterNumbertrue\\n\",\"\")\n\n def _doperiph(self, m):\n if self.frontperiphs is None:\n frtfile = os.path.join(self.printer.settings_dir, self.printer.prjid, self.printer.getBookFilename(\"FRT\"))\n self.frontperiphs = {}\n if not os.path.exists(frtfile):\n return \"\"\n with open(frtfile, encoding=\"utf-8\") as inf:\n mode = 0 # default\n currperiphs = []\n currk = None\n for l in inf.readlines():\n ma = re.match(r'\\\\periph\\s+([^|]+)(?:\\|\\s*(?:id\\s*=\\s*\"([^\"]+)|(\\S+)))', l)\n if ma:\n if mode == 1: # already collecting so save\n self.frontperiphs[currk] = \"\\n\".join(currperiphs)\n currk = ma[2] or ma[3]\n if not currk:\n currk = self._periphids.get(m[1].lower(), m[1].lower())\n currperiphs = []\n mode = 1\n elif mode == 1:\n if r\"\\periph\" in l:\n mode = 0\n else:\n currperiphs.append(l.rstrip())\n if currk is not None:\n self.frontperiphs[currk] = \"\\n\".join(currperiphs)\n k = m[1]\n if k in self.frontperiphs:\n return self.frontperiphs[k]\n else:\n return \"\"\n\n def createFrontMatter(self, outfname):\n self.dict['project/frontfile'] = os.path.basename(outfname)\n infpath = self.printer.configFRT()\n bydir = os.path.join(pycodedir(), \"images\").replace(\"\\\\\", \"/\")\n fmt = self.dict['snippets/pdfoutput']\n cmyk = fmt in ('CMYK', 'PDF/X-1A', 'PDF/A-1')\n if os.path.exists(infpath):\n fcontent = []\n with open(infpath, encoding=\"utf-8\") as inf:\n seenperiph = False\n for l in inf.readlines():\n if l.strip().startswith(r\"\\periph\"):\n l = r\"\\pb\" if self.dict['project/periphpagebreak'] and seenperiph else \"\"\n seenperiph = True\n l = re.sub(r\"\\\\zperiphfrt\\s*\\|([^\\\\\\s]+)\\s*\\\\\\*\", self._doperiph, l)\n l = re.sub(r\"\\\\zbl\\s*\\|(\\d+)\\\\\\*\", lambda m: \"\\\\b\\n\" * int(m.group(1)), l)\n l = re.sub(r\"\\\\zccimg\\s*(.*?)(?:\\|(.*?))?\\\\\\*\",\n lambda m: r'\\fig |src=\"'+bydir+\"/\"+m.group(1)+(\"_cmyk\" if cmyk else \"\") \\\n + '.jpg\" copy=\"None\" ' + m.group(2)+ r'\\fig*', l)\n l = re.sub(r'(\\\\fig .*?src=\")(.*?)(\".*?\\\\fig\\*)', lambda m:m.group(1)+m.group(2).replace(\"\\\\\",\"/\")+m.group(3), l)\n fcontent.append(l.rstrip())\n with open(outfname, \"w\", encoding=\"utf-8\") as outf:\n outf.write(\"\\n\".join(fcontent))\n\n def flattenModule(self, infpath, outdir, usfm=None):\n outfpath = os.path.join(outdir, os.path.basename(infpath))\n doti = outfpath.rfind(\".\")\n if doti > 0:\n outfpath = outfpath[:doti] + \"-flat\" + outfpath[doti:]\n usfms = self.printer.get_usfms()\n try:\n mod = Module(infpath, usfms, usfm=usfm)\n res = mod.parse()\n except SyntaxError as e:\n return (None, e)\n if usfm is not None:\n return res\n with open(outfpath, \"w\", encoding=\"utf-8\") as outf:\n outf.write(sfm.generate(res))\n return outfpath\n\n def runConversion(self, infpath, outdir):\n outfpath = infpath\n if self.dict['project/processscript'] and self.dict['project/selectscript']:\n outfpath = os.path.join(outdir, os.path.basename(infpath))\n doti = outfpath.rfind(\".\")\n if doti > 0:\n outfpath = outfpath[:doti] + \"-conv\" + outfpath[doti:]\n cmd = [self.dict[\"project/selectscript\"], infpath, outfpath]\n checkoutput(cmd) # dont't pass cmd as list when shell=True\n return outfpath\n\n def runChanges(self, changes, bk, dat):\n for c in changes:\n logger.debug(\"Change: {}\".format(c))\n if c[0] is None:\n dat = c[1].sub(c[2], dat)\n elif isinstance(c[0], str):\n if c[0] == bk:\n dat = c[1].sub(c[2], dat)\n else:\n def simple(s):\n return c[1].sub(c[2], s)\n dat = c[0](simple, bk, dat)\n return dat\n\n def convertBook(self, bk, chaprange, outdir, prjdir, letterspace=\"\\uFDD0\"):\n printer = self.printer\n if self.changes is None:\n if self.asBool('project/usechangesfile'):\n # print(\"Applying PrntDrftChgs:\", os.path.join(prjdir, 'PrintDraftChanges.txt'))\n #cpath = self.printer.configPath(self.printer.configName())\n #self.changes = self.readChanges(os.path.join(cpath, 'changes.txt'), bk)\n self.changes = self.readChanges(os.path.join(printer.configPath(printer.configName()), 'changes.txt'), bk)\n else:\n self.changes = []\n draft = \"-\" + (printer.configName() or \"draft\")\n self.makelocalChanges(printer, bk, chaprange=chaprange)\n customsty = os.path.join(prjdir, 'custom.sty')\n if not os.path.exists(customsty):\n self.dict[\"/nocustomsty\"] = \"%\"\n else:\n self.dict[\"/nocustomsty\"] = \"\"\n fname = printer.getBookFilename(bk)\n if fname is None:\n infpath = os.path.join(prjdir, bk) # assume module\n infpath = self.flattenModule(infpath, outdir)\n if isinstance(infpath, tuple) and infpath[0] is None:\n self.printer.doError(\"Failed to flatten module text (due to a Syntax Error?):\", \n secondary=str(infpath[1]), \n title=\"PTXprint [{}] - Canonicalise Text Error!\".format(self.VersionStr),\n show=not self.printer.get(\"c_quickRun\"))\n return None\n else:\n infpath = os.path.join(prjdir, fname)\n if self.dict['project/when2processscript'] == \"before\":\n infpath = self.runConversion(infpath, outdir)\n outfname = os.path.basename(infpath)\n # outfname = fname\n doti = outfname.rfind(\".\")\n if doti > 0:\n outfname = outfname[:doti] + draft + outfname[doti:]\n outfpath = os.path.join(outdir, outfname)\n codepage = self.ptsettings.get('Encoding', 65001)\n with universalopen(infpath, cp=codepage) as inf:\n dat = inf.read()\n\n doc = None\n if self.interlinear is not None:\n doc = self._makeUSFM(dat.splitlines(True), bk)\n linelengths = [len(x) for x in dat.splitlines(True)]\n if doc is not None:\n doc.calc_PToffsets()\n self.interlinear.convertBk(bk, doc, linelengths)\n if len(self.interlinear.fails):\n printer.doError(\"The following references need to be reapproved: \" + \" \".join(self.interlinear.fails),\n show=not printer.get(\"c_quickRun\"))\n self.interlinear.fails = []\n elif bk.lower().startswith(\"xx\"):\n doc = self._makeUSFM(dat.splitlines(True), bk)\n #import pdb; pdb.set_trace()\n doc.doc = self.flattenModule(infpath, outfpath, usfm=doc)\n\n if self.changes is not None and len(self.changes):\n if doc is not None:\n dat = str(doc)\n doc = None\n dat = self.runChanges(self.changes, bk, dat)\n #self.analyzeImageCopyrights(dat)\n\n if self.dict['project/canonicalise'] \\\n or not self.asBool(\"document/bookintro\") \\\n or not self.asBool(\"document/introoutline\")\\\n or self.asBool(\"document/hidemptyverses\"):\n if doc is None:\n doc = self._makeUSFM(dat.splitlines(True), bk)\n if doc is not None:\n if not self.asBool(\"document/bookintro\") or not self.asBool(\"document/introoutline\"):\n logger.debug(\"stripIntro\")\n doc.stripIntro(not self.asBool(\"document/bookintro\"), not self.asBool(\"document/introoutline\"))\n if self.asBool(\"document/hidemptyverses\"):\n logger.debug(\"stripEmptyChVs\")\n doc.stripEmptyChVs(ellipsis=self.asBool(\"document/elipsizemptyvs\"))\n\n if self.dict['fancy/endayah'] == \"\":\n if doc is None:\n doc = self._makeUSFM(dat.splitlines(True), bk)\n logger.debug(\"versesToEnd\")\n doc.versesToEnd()\n\n if doc is not None and getattr(doc, 'doc', None) is not None:\n dat = str(doc)\n\n if self.localChanges is not None:\n dat = self.runChanges(self.localChanges, bk, dat)\n\n with open(outfpath, \"w\", encoding=\"utf-8\") as outf:\n outf.write(dat)\n if self.dict['project/when2processscript'] == \"after\":\n bn = os.path.basename(self.runConversion(outfpath, outdir))\n else:\n bn = os.path.basename(outfpath)\n\n if '-conv' in bn:\n newname = re.sub(r\"(\\{}\\-conv|\\-conv\\{}|\\-conv)\".format(draft, draft), draft, bn)\n copyfile(os.path.join(outdir, bn), os.path.join(outdir, newname))\n os.remove(os.path.join(outdir, bn))\n return newname\n else:\n return bn\n \n def _makeUSFM(self, txtlines, bk):\n # import pdb; pdb.set_trace()\n syntaxErrors = []\n try:\n doc = Usfm(txtlines, self.sheets)\n while len(doc.doc) > 1:\n if isinstance(doc.doc[0], sfm.Text):\n doc.doc.pop(0)\n else:\n break\n if len(doc.doc) != 1:\n raise ValueError(\"Badly formed USFM. Probably missing a \\\\id line\")\n doc.normalise()\n except SyntaxError as e:\n syntaxErrors.append(\"{} {} line:{}\".format(self.prjid, bk, str(e).split('line', maxsplit=1)[1]))\n except Exception as e:\n syntaxErrors.append(\"{} {} Error({}): {}\".format(self.prjid, bk, type(e), str(e)))\n traceback.print_exc()\n if len(syntaxErrors):\n dlgtitle = \"PTXprint [{}] - USFM Text Error!\".format(self.VersionStr)\n # print(syntaxErrors[0])\n # logger.info(syntaxErrors[0])\n errbits = re.match(r\"(\\S+) (...) line: (\\d+),\\d+\\s*\\[(.*?)\\]: orphan marker (\\\\.+?)\", syntaxErrors[0])\n if errbits is not None:\n self.printer.doError(\"Syntax Error warning: \", \n secondary=_(\"Examine line {} in {} on the 'Final SFM' tab of the View+Edit \" + \\\n \"page to determine the cause of this issue related to marker: {} as found in the markers: {}.\").format(\n errbits[3], errbits[2], errbits[5], errbits[4]) + \\\n \"\\n\\n\"+_(\"This warning was triggered due to 'Auto-Correct USFM' being \" + \\\n \"enabled on the Advanced tab but is due to an orphaned marker. \" + \\\n \"It means the marker does not belong in that position, or it \" + \\\n \"is missing a valid parent marker.\"), title=dlgtitle,\n show=not self.printer.get(\"c_quickRun\"))\n else:\n prtDrft = _(\"And check if a faulty rule in changes.txt has caused the error(s).\") if self.asBool(\"project/usechangesfile\") else \"\"\n self.printer.doError(_(\"Failed to normalize texts due to a Syntax Error: \"), \n secondary=\"\\n\".join(syntaxErrors)+\"\\n\\n\"+_(\"Run the Basic Checks in Paratext to ensure there are no Marker errors \"+ \\\n \"in either of the diglot projects. If this error persists, try running the Schema Check in Paratext as well.\") + \" \" + prtDrft,\n title=dlgtitle, show=not self.printer.get(\"c_quickRun\"))\n \n return None\n else:\n return doc \n\n def make_contextsfn(self, bk, *changes):\n # functional programmers eat your hearts out\n def makefn(reg, currfn):\n if currfn is not None:\n def compfn(fn, b, s):\n def domatch(m):\n return currfn(lambda x:fn(x.group(0)), m.group(0))\n return reg.sub(domatch, s) if bk is None or b == bk else s\n else:\n def compfn(fn, b, s):\n return reg.sub(lambda m:fn(m.group(0)), s) if bk is None or b == bk else s\n return compfn\n return reduce(lambda currfn, are: makefn(are, currfn), reversed([c for c in changes if c is not None]), None)\n\n def readChanges(self, fname, bk):\n changes = []\n if not os.path.exists(fname):\n return []\n qreg = r'(?:\"((?:[^\"\\\\]|\\\\.)*?)\"|' + r\"'((?:[^'\\\\]|\\\\.)*?)')\"\n with universalopen(fname) as inf:\n for i, l in enumerate(inf.readlines()):\n l = l.strip().replace(u\"\\uFEFF\", \"\")\n l = re.sub(r\"\\s*#.*$\", \"\", l)\n if not len(l):\n continue\n contexts = []\n atcontexts = []\n m = re.match(r\"^\\s*include\\s+(['\\\"])(.*?)\\1\", l)\n if m:\n changes.extend(self.readChanges(os.path.join(os.path.dirname(fname), m.group(2)), bk))\n continue\n # test for \"at\" command\n m = re.match(r\"^\\s*at\\s+(.*?)\\s+(?=in|['\\\"])\", l)\n if m:\n atref = RefList.fromStr(m.group(1), context=AnyBooks)\n for r in atref.allrefs():\n if r.chap == 0:\n atcontexts.append((r.book, None))\n elif r.verse == 0:\n atcontexts.append((r.book, regex.compile(r\"(?<=\\\\c {}(?=\\D)).*?($|\\\\[cv] )\".format(r.chap), flags=regex.S)))\n else:\n atcontexts.append((r.book, regex.compile(r\"(?<=\\\\c {}(?=\\D)(?:.(?!\\\\c))*?)\\\\v {}[ -].*?($|\\\\[cv] )\".format(r.chap, r.verse), flags=regex.S)))\n l = l[m.end():].strip()\n else:\n atcontexts = [None]\n # test for 1+ \"in\" commands\n while True:\n m = re.match(r\"^\\s*in\\s+\"+qreg+r\"\\s*:\\s*\", l)\n if not m:\n break\n try:\n contexts.append(regex.compile(m.group(1) or m.group(2), flags=regex.M))\n except re.error as e:\n self.printer.doError(\"Regular expression error: {} in changes file at line {}\".format(str(e), i+1),\n show=not self.printer.get(\"c_quickRun\"))\n break\n l = l[m.end():].strip()\n # capture the actual change\n m = re.match(r\"^\"+qreg+r\"\\s*>\\s*\"+qreg, l)\n if m:\n for at in atcontexts:\n if at is None:\n context = self.make_contextsfn(None, *contexts) if len(contexts) else None\n elif len(contexts) or at[1] is not None:\n context = self.make_contextsfn(at[0], at[1], *contexts)\n else:\n context = at[0]\n try:\n changes.append((context, regex.compile(m.group(1) or m.group(2), flags=regex.M),\n m.group(3) or m.group(4) or \"\"))\n except re.error as e:\n self.printer.doError(\"Regular expression error: {} in changes file at line {}\".format(str(e), i+1),\n show=not self.printer.get(\"c_quickRun\"))\n break\n continue\n return changes\n\n def makelocalChanges(self, printer, bk, chaprange=None):\n script = self.dict[\"document/script\"]\n if len(script):\n sscript = getattr(scriptsnippets, script[8:].lower(), None)\n if sscript is not None:\n self.changes.extend(sscript.regexes(self))\n #self.changes.append((None, regex.compile(r\"(?<=\\\\[^\\\\\\s]+)\\*(?=\\S)\", flags=regex.S), \"* \"))\n if self.printer is not None and self.printer.get(\"c_tracing\"):\n print(\"List of changes.txt:-------------------------------------------------------------\")\n report = \"\\n\".join(\"{} -> {}\".format(p[1].pattern, p[2]) for p in self.changes)\n if getattr(self.printer, \"logger\", None) is not None:\n self.printer.logger.insert_at_cursor(v)\n else:\n try:\n print(report)\n except UnicodeEncodeError:\n print(\"Unable to print details of changes.txt\")\n self.localChanges = []\n if bk == \"GLO\" and self.dict['document/filterglossary']:\n self.filterGlossary(printer)\n if chaprange is not None:\n first, last = chaprange\n elif self.dict[\"project/bookscope\"] == \"single\":\n first = int(float(self.dict[\"document/chapfrom\"]))\n last = int(float(self.dict[\"document/chapto\"]))\n else:\n first, last = (-1, -1)\n \n # Fix things that other parsers accept and we don't\n self.localChanges.append((None, regex.compile(r\"(\\\\[cv] [^ \\\\\\r\\n]+)(\\\\)\", flags=regex.S), r\"\\1 \\2\"))\n \n # Remove empty \\h markers (might need to expand this list and loop through a bunch of markers)\n self.localChanges.append((None, regex.compile(r\"(\\\\h ?\\r?\\n)\", flags=regex.S), r\"\"))\n \n # This section handles PARTIAL books (from chapter X to chapter Y)\n if self.asBool(\"document/ifchaplabels\", true=\"%\"):\n clabel = self.dict[\"document/clabel\"]\n clbooks = self.dict[\"document/clabelbooks\"].split()\n # print(\"Chapter label: '{}' for '{}' with {}\".format(clabel, \" \".join(clbooks), bk))\n if len(clabel) and (not len(clbooks) or bk in clbooks):\n #self.localChanges.append((lambda fn,b,d: d if r'\\cl ' in d else fn(d),\n # regex.compile(r\"(\\\\c 1)(?=\\s*\\r?\\n|\\s)\", flags=regex.S), \"\\\\cl {}\\n\\\\1\".format(clabel)))\n # self.localChanges.append((None,\n # regex.compile(r\"(\\\\c 1)(?=\\s*\\r?\\n|\\s)\", flags=regex.S), \"\\\\cl {}\\n\\\\1\".format(clabel)))\n self.localChanges.append((None,\n regex.compile(r\"(\\\\c )\", flags=regex.S), \"\\\\cl {}\\n\\\\1\".format(clabel)))\n \n # if self.dict[\"project/bookscope\"] == \"single\":\n if first > 1:\n self.localChanges.append((None, regex.compile(r\"\\\\c 1 ?\\r?\\n.+(?=\\\\c {} ?\\r?\\n)\".format(first), flags=regex.S), \"\"))\n if last >=0 and last < int(chaps.get(bk, 999)):\n self.localChanges.append((None, regex.compile(r\"\\\\c {} ?\\r?\\n.+\".format(last+1), flags=regex.S), \"\"))\n\n # Throw out the known \"nonpublishable\" markers and their text (if any)\n self.localChanges.append((None, regex.compile(r\"\\\\(usfm|ide|rem|sts|restore|pubinfo)( .*?)?\\n(?=\\\\)\", flags=regex.M), \"\"))\n\n # If a printout of JUST the book introductions is needed (i.e. no scripture text) then this option is very handy\n if not self.asBool(\"document/ifmainbodytext\"):\n self.localChanges.append((None, regex.compile(r\"\\\\c 1 ?\\r?\\n.+\".format(first), flags=regex.S), \"\"))\n\n # Probably need to make this more efficient for multi-book and lengthy glossaries (cache the GLO & changes reqd etc.)\n if self.asBool(\"notes/glossaryfootnotes\"):\n self.makeGlossaryFootnotes(printer, bk)\n\n # Glossary Word markup: Remove the second half of the \\w word|glossary-form\\w* and apply chosen glossary markup\n v = self.dict[\"document/glossarymarkupstyle\"]\n gloStyle = self._glossarymarkup.get(v, v)\n if v is not None: # and v != \"no\":\n if gloStyle is not None and len(v) == 2: # otherwise skip over OLD Glossary markup definitions\n self.localChanges.append((None, regex.compile(r\"\\\\\\+?w ((?:.(?!\\\\\\+w\\*))+?)(\\|[^|]+?)?\\\\\\+?w\\*\", flags=regex.M), gloStyle))\n\n if self.asBool(\"notes/includexrefs\"): # This seems back-to-front, but it is correct because of the % if v\n self.localChanges.append((None, regex.compile(r'(?i)\\\\x .+?\\\\x\\*', flags=regex.M), ''))\n \n if self.asBool(\"document/ifinclfigs\") and bk in self._peripheralBooks:\n # Remove any illustrations which don't have a |p| 'loc' field IF this setting is on\n if self.asBool(\"document/iffigexclwebapp\"):\n self.localChanges.append((None, regex.compile(r'(?i)\\\\fig ([^|]*\\|){3}([aw]+)\\|[^\\\\]*\\\\fig\\*', flags=regex.M), '')) # USFM2\n self.localChanges.append((None, regex.compile(r'(?i)\\\\fig [^\\\\]*\\bloc=\"[aw]+\"[^\\\\]*\\\\fig\\*', flags=regex.M), '')) # USFM3\n def figtozfiga(m):\n a = self.printer.picinfos.getAnchor(m.group(1), bk)\n if a is None:\n return \"\"\n ref = re.sub(r\"^\\S+\\s+\", r\"\", a)\n return \"\\\\zfiga|{}\\\\*\".format(ref)\n self.localChanges.append((None, regex.compile(r'\\\\fig.*?src=\"([^\"]+?)\".*?\\\\fig\\*', flags=regex.M), figtozfiga))\n self.localChanges.append((None, regex.compile(r'\\\\fig(?: .*?)?\\|(.*?)\\|.*?\\\\fig\\*', flags=regex.M), figtozfiga))\n\n if self.asBool(\"document/iffighiderefs\"): # del ch:vs from caption \n self.localChanges.append((None, regex.compile(r\"(\\\\fig [^\\\\]+?\\|)([0-9:.\\-,\\u2013\\u2014]+?)(\\\\fig\\*)\", \\\n flags=regex.M), r\"\\1\\3\")) # USFM2\n self.localChanges.append((None, regex.compile(r'(\\\\fig .+?)(ref=\\\"\\d+[:.]\\d+([-,\\u2013\\u2014]\\d+)?\\\")(.*?\\\\fig\\*)', \\\n flags=regex.M), r\"\\1\\4\")) # USFM3\n else:\n # Strip out all \\figs from the USFM as an internally generated temp PicList will do the same job\n self.localChanges.append((None, regex.compile(r'\\\\fig[\\s|][^\\\\]+?\\\\fig\\*', flags=regex.M), \"\"))\n\n if not self.asBool(\"document/sectionheads\"): # Drop ALL Section Headings (which also drops the Parallel passage refs now)\n self.localChanges.append((None, regex.compile(r\"\\\\[sr] .+\", flags=regex.M), \"\"))\n\n if not self.asBool(\"document/parallelrefs\"): # Drop ALL Parallel Passage References\n self.localChanges.append((None, regex.compile(r\"\\\\r .+\", flags=regex.M), \"\"))\n\n if self.asBool(\"document/preventorphans\"): # Prevent orphans at end of *any* paragraph\n self.localChanges.append((None, regex.compile(r\"(\\\\q\\d?(\\s?\\r?\\n?\\\\v)?( \\S+)+( (?!\\\\)[^\\\\\\s]{,6})) ([\\S]{,9}\\s*\\n)\", \\\n flags=regex.M), r\"\\1\\u2000\\5\"))\n self.localChanges.append((None, regex.compile(r\"(?<=\\\\[^ctm][^\\\\]+)(\\s+[^ 0-9\\\\\\n\\u2000\\u00A0]{,6}) ([^ 0-9\\\\\\n\\u2000\\u00A0]{,8}\\n(?:\\\\[pmqsc]|$))\", flags=regex.S), r\"\\1\\u2000\\2\"))\n\n if self.asBool(\"document/preventwidows\"):\n # Push the verse number onto the next line (using NBSP) if there is\n # a short widow word (3 characters or less) at the end of the line\n self.localChanges.append((None, regex.compile(r\"(\\\\v \\d+([-,]\\d+)? [\\w]{1,3}) \", flags=regex.M), r\"\\1\\u00A0\")) \n\n # By default, HIDE chapter numbers for all non-scripture (Peripheral) books (unless \"Show... is checked)\n if not self.asBool(\"document/showxtrachapnums\") and bk in TexModel._peripheralBooks:\n self.localChanges.append((None, regex.compile(r\"(\\\\c \\d+ ?\\r?\\n)\", flags=regex.M), \"\"))\n\n if self.asBool(\"document/ch1pagebreak\"):\n self.localChanges.append((None, regex.compile(r\"(\\\\c 1 ?\\r?\\n)\", flags=regex.M), r\"\\pagebreak\\r\\n\\1\"))\n\n if self.asBool(\"document/glueredupwords\"): # keep reduplicated words together\n self.localChanges.append((None, regex.compile(r\"(?<=[ ])(\\w{3,}) \\1(?=[\\s,.!?])\", flags=regex.M), r\"\\1\\u2000\\1\")) \n \n if self.asBool(\"notes/addcolon\"): # Insert a colon between \\fq (or \\xq) and following \\ft (or \\xt)\n self.localChanges.append((None, regex.compile(r\"(\\\\[fx]q .+?):* ?(\\\\[fx]t)\", flags=regex.M), r\"\\1: \\2\")) \n\n # HELP NEEDED from Martin to fix this section up again.\n # Keep book number together with book name \"1 Kings\", \"2 Samuel\" within \\xt and \\xo\n self.localChanges.append((self.make_contextsfn(None, regex.compile(r\"(\\\\[xf]t\\s[^\\\\]+)\")),\n regex.compile(r\"(\\d)\\s(\\p{L})\"), r\"\\1\\u00A0\\2\"))\n \n if self.asBool(\"notes/keepbookwithrefs\"): # keep Booknames and ch:vs nums together within \\xt and \\xo\n self.localChanges.append((self.make_contextsfn(None, regex.compile(r\"(\\\\[xf]t\\s[^\\\\]+)\")),\n regex.compile(r\"(\\d?[^\\s\\d\\-\\\\,;]{3,}[^\\\\\\s]*?)\\s(\\d+[:.]\\d+(-\\d+)?)\"), r\"\\1\\u2000\\2\"))\n self.localChanges.append((self.make_contextsfn(None, regex.compile(r\"(\\\\[xf]t\\s[^\\\\]+)\")),\n regex.compile(r\"(\\s.) \"), r\"\\1\\u2000\")) # Ensure no floating single chars in note text\n \n # keep \\xo & \\fr refs with whatever follows (i.e the bookname or footnote) so it doesn't break at end of line\n self.localChanges.append((None, regex.compile(r\"(\\\\(xo|fr) (\\d+[:.]\\d+([-,]\\d+)?)) \"), r\"\\1\\u00A0\"))\n\n for c in (\"fn\", \"xr\"):\n # Force all footnotes/x-refs to be either '+ ' or '- ' rather than '*/#'\n if self.asBool(\"notes/{}override\".format(c)):\n t = \"+\" if self.asBool(\"notes/if{}autocallers\".format(c)) else \"-\"\n self.localChanges.append((None, regex.compile(r\"\\\\{} .\".format(c[0])), r\"\\\\{} {}\".format(c[0],t)))\n # Remove the [spare] space after a note caller if the caller is omitted AND if after a digit (verse number).\n if self.asBool(\"notes/{}omitcaller\".format(c)):\n self.localChanges.append((None, regex.compile(r\"(\\d )(\\\\[{0}] - .*?\\\\[{0}]\\*)\\s+\".format(c[0])), r\"\\1\\2\"))\n\n # Paratext marks no-break space as a tilde ~\n self.localChanges.append((None, regex.compile(r\"~\", flags=regex.M), r\"\\u00A0\")) \n\n # Paratext marks forced line breaks as //\n self.localChanges.append((None, regex.compile(r\"//\", flags=regex.M), r\"\\u2028\")) \n\n # Convert hyphens from minus to hyphen\n self.localChanges.append((None, regex.compile(r\"(?<!\\\\[fx]\\s)((?<=\\s)-|-(?=\\s))\", flags=regex.M), r\"\\u2011\"))\n\n if self.asBool(\"document/toc\") and self.asBool(\"document/multibook\"):\n # Only do this IF the auto Table of Contents is enabled AND there is more than one book\n for c in range(1,4): # Remove any \\toc lines that we don't want appearing in the ToC\n if not self.asBool(\"document/usetoc{}\".format(c)):\n self.localChanges.append((None, regex.compile(r\"(\\\\toc{} .+)\".format(c), flags=regex.M), \"\"))\n\n # Add End of Book decoration PDF to Scripture books only if FancyBorders is enabled and .PDF defined\n if self.asBool(\"fancy/enableborders\") and self.asBool(\"fancy/endofbook\") and bk not in self._peripheralBooks \\\n and self.dict[\"fancy/endofbookpdf\"].lower().endswith('.pdf'):\n self.localChanges.append((None, regex.compile(r\"\\Z\", flags=regex.M), r\"\\r\\n\\z\"))\n \n # Insert a rule between end of Introduction and start of body text (didn't work earlier, but might work now)\n # self.localChanges.append((None, regex.compile(r\"(\\\\c\\s1\\s?\\r?\\n)\", flags=regex.S), r\"\\\\par\\\\vskip\\\\baselineskip\\\\hskip-\\\\columnshift\\\\hrule\\\\vskip 2\\\\baselineskip\\n\\1\"))\n\n # Apply any changes specified in snippets\n for k, c in sorted(self._snippets.items(), key=lambda x: x[1][2].order):\n if self.printer is None:\n v = self.asBool(k) if c[1] is None else c[1](self.dict[k])\n elif c[1] is None:\n v = self.printer.get(c[0])\n self.dict[k] = \"true\" if v else \"false\"\n else:\n self.dict[k] = self.printer.get(c[0])\n v = c[1](self.dict[k])\n if v: # if the c_checkbox is true then extend the list with those changes\n if k == \"snippets/fancyintro\" and bk in self._peripheralBooks: # Only allow fancyIntros for scripture books\n pass\n else:\n self.localChanges.extend(c[2].regexes)\n\n ## Final tweaks\n # Strip out any spaces either side of an en-quad \n self.localChanges.append((None, regex.compile(r\"\\s?\\u2000\\s?\", flags=regex.M), r\"\\u2000\")) \n # Change double-spaces to singles\n self.localChanges.append((None, regex.compile(r\" {2,}\", flags=regex.M), r\" \")) \n # Escape special codes % and $ that could be in the text itself\n self.localChanges.append((None, regex.compile(r\"(?<!\\\\\\S*|\\\\[fx]\\s)([{}])(\\s?)\".format(\"\".join(self._specialchars)),\n flags=regex.M), lambda m:\"\\\\\"+self._specialchars[m.group(1)]+(\"\\\\space \" if m.group(2) else \" \"))) \n\n if self.printer is not None and self.printer.get(\"c_tracing\"):\n print(\"List of Local Changes:----------------------------------------------------------\")\n report = \"\\n\".join(\"{} -> {}\".format(p[1].pattern, p[2]) for p in self.localChanges)\n if getattr(printer, \"logger\", None) is not None:\n printer.logger.insert_at_cursor(v)\n else:\n print(report)\n return self.localChanges\n\n def base(self, fpath):\n doti = fpath.rfind(\".\")\n return os.path.basename(fpath[:doti])\n\n def codeLower(self, fpath):\n cl = re.match(self.printer.getPicRe()+\"$\", self.base(fpath))\n if cl:\n return cl[0].lower()\n else:\n return \"\"\n\n def newBase(self, fpath):\n clwr = self.codeLower(fpath)\n if len(clwr):\n return clwr\n else:\n return re.sub('[()&+,.;: ]', '_', self.base(fpath).lower())\n\n def makeGlossaryFootnotes(self, printer, bk):\n # Glossary entries for the key terms appearing like footnotes\n prjid = self.dict['project/id']\n prjdir = os.path.join(self.ptsettings.basedir, prjid)\n fname = printer.getBookFilename(\"GLO\", prjdir)\n infname = os.path.join(prjdir, fname)\n if os.path.exists(infname):\n with universalopen(infname, rewrite=True) as inf:\n dat = inf.read()\n ge = re.findall(r\"\\\\p \\\\k (.+)\\\\k\\* (.+)\\r?\\n\", dat) # Finds all glossary entries in GLO book (may need to add \\ili)\n if ge is not None:\n for g in ge:\n gdefn = re.sub(r\"\\\\xt (.+)\\\\xt\\*\", r\"\\1\", g[1])\n self.localChanges.append((None, regex.compile(r\"(\\\\w (.+\\|)?{} ?\\\\w\\*)\".format(g[0]), flags=regex.M), \\\n r\"\\1\\\\f + \\\\fq {}: \\\\ft {}\\\\f* \".format(g[0],gdefn)))\n\n def filterGlossary(self, printer):\n # Only keep entries that have appeared in this collection of books\n glossentries = []\n prjid = self.dict['project/id']\n prjdir = os.path.join(self.dict[\"/ptxpath\"], prjid)\n for bk in printer.getBooks():\n if bk not in TexModel._peripheralBooks:\n fname = printer.getBookFilename(bk, prjid)\n fpath = os.path.join(prjdir, fname)\n if os.path.exists(fpath):\n with universalopen(fpath) as inf:\n sfmtxt = inf.read()\n glossentries += re.findall(r\"\\\\w .*?\\|?([^\\|]+?)\\\\w\\*\", sfmtxt)\n fname = printer.getBookFilename(\"GLO\", prjdir)\n infname = os.path.join(prjdir, fname)\n if os.path.exists(infname):\n with universalopen(infname, rewrite=True) as inf:\n dat = inf.read()\n ge = re.findall(r\"\\\\p \\\\k (.+)\\\\k\\* .+\\r?\\n\", dat) # Finds all glossary entries in GLO book\n for delGloEntry in [x for x in ge if x not in list(set(glossentries))]:\n self.localChanges.append((None, regex.compile(r\"\\\\p \\\\k {}\\\\k\\* .+\\r?\\n\".format(delGloEntry), flags=regex.M), \"\"))\n\n def analyzeImageCopyrights(self):\n if self.dict['project/iffrontmatter'] == \"\":\n try:\n with open(self.printer.configFRT(), encoding=\"utf-8\") as inf:\n txt = inf.read()\n except FileNotFoundError:\n return\n else:\n txt = self.dict['project/colophontext']\n for m in re.findall(r\"(?i)\\\\(\\S+).*?\\\\zimagecopyrights([A-Z]{2,3})?\", txt):\n self.imageCopyrightLangs[m[1].lower() if m[1] else \"en\"] = m[0]\n return\n\n def generateEmptyImageCopyrights(self):\n self.analyzeImageCopyrights()\n res = [r\"\\def\\zimagecopyrights{}\"]\n for k in self.imageCopyrightLangs.keys():\n res.append(r\"\\def\\zimagecopyrights{}{{}}\".format(k))\n return \"\\n\".join(res)\n\n def generateImageCopyrightText(self):\n artpgs = {}\n mkr='pc'\n sensitive = self['document/sensitive']\n picpagesfile = os.path.join(self.docdir()[0], self['jobname'] + \".picpages\")\n crdts = []\n cinfo = self.printer.copyrightInfo\n self.analyzeImageCopyrights()\n if os.path.exists(picpagesfile):\n with universalopen(picpagesfile) as inf:\n dat = inf.read()\n\n # \\figonpage{304}{56}{cn01617.jpg}{tl}{© David C. Cook Publishing Co, 1978.}{x170.90504pt}\n rematch = r\"\\\\figonpage\\{(\\d+)\\}\\{\\d+\\}\\{(?:\" + self.printer.getPicRe() + \"|(.*?))\\.[^}]+\\}\\{.*?\\}\\{(.*?)?\\}\\{.+?\\}\"\n m = re.findall(rematch, dat)\n msngPgs = []\n customStmt = []\n if len(m):\n for f in m:\n if not len(f) or not f[0] or f[5] == \"None\":\n continue\n a = 'co' if f[1] == 'cn' else f[1] # merge Cook's OT & NT illustrations together\n if a == '' and f[5] != '':\n artpgs.setdefault(f[5], []).append(int(f[0]))\n elif a == '':\n artpgs.setdefault('zz', []).append(int(f[0]))\n msngPgs += [f[0]] \n else:\n artpgs.setdefault(a, []).append(int(f[0]))\n artistWithMost = \"\"\n if len(artpgs):\n artpgcmp = [a for a in artpgs if a != 'zz']\n if len(artpgcmp):\n artistWithMost = max(artpgcmp, key=lambda x: len(set(artpgs[x])))\n\n langs = set(self.imageCopyrightLangs.keys())\n langs.add(\"en\")\n for lang in sorted(langs):\n crdtsstarted = False\n if os.path.exists(picpagesfile):\n hasOut = False\n mkr = self.imageCopyrightLangs.get(lang, \"pc\")\n rtl = lang in cinfo['rtl']\n if rtl == (self.dict['document/ifrtl'] == \"false\"):\n mkr += \"\\\\begin\" + (\"R\" if rtl else \"L\")\n crdts.append(\"\\\\def\\\\zimagecopyrights{}{{%\".format(lang.lower()))\n crdtsstarted = True\n plstr = cinfo[\"plurals\"].get(lang, cinfo[\"plurals\"][\"en\"])\n cpytemplate = cinfo['templates']['imageCopyright'].get(lang,\n cinfo['templates']['imageCopyright']['en'])\n for art, pgs in artpgs.items():\n if art != artistWithMost and art != 'zz':\n if len(pgs):\n pgs = sorted(set(pgs))\n plurals = pluralstr(plstr, pgs)\n artinfo = cinfo[\"copyrights\"].get(art, {'copyright': {'en': art}, 'sensitive': {'en': art}})\n if artinfo is not None and (art in cinfo['copyrights'] or len(art) > 5):\n artstr = artinfo[\"copyright\"].get(lang, artinfo[\"copyright\"][\"en\"])\n if sensitive and \"sensitive\" in artinfo:\n artstr = artinfo[\"sensitive\"].get(lang, artinfo[\"sensitive\"][\"en\"])\n cpystr = multstr(cpytemplate, lang, len(pgs), plurals, artstr.replace(\"_\", \"\\u00A0\"))\n crdts.append(\"\\\\{} {}\".format(mkr, cpystr))\n else:\n crdts.append(_(\"\\\\rem Warning: No copyright statement found for: {} on pages {}\")\\\n .format(art.upper(), pluralstr))\n hasOut = True\n if len(msngPgs):\n plurals = pluralstr(plstr, msngPgs)\n template = cinfo['templates']['imageExceptions'].get(lang,\n cinfo['templates']['imageExceptions']['en'])\n exceptPgs = \" \" + multstr(template, lang, len(msngPgs), plurals)\n else:\n exceptPgs = \"\"\n\n if len(artistWithMost):\n artinfo = cinfo[\"copyrights\"].get(artistWithMost, \n {'copyright': {'en': artistWithMost}, 'sensitive': {'en': artistWithMost}})\n if artinfo is not None and (artistWithMost in cinfo[\"copyrights\"] or len(artistWithMost) > 5):\n pgs = artpgs[artistWithMost]\n plurals = pluralstr(plstr, pgs)\n artstr = artinfo[\"copyright\"].get(lang, artinfo[\"copyright\"][\"en\"])\n if sensitive and \"sensitive\" in artinfo:\n artstr = artinfo[\"sensitive\"].get(lang, artinfo[\"sensitive\"][\"en\"])\n if not hasOut:\n template = cinfo['templates']['allIllustrations'].get(lang,\n cinfo['templates']['allIllustrations']['en'])\n else:\n template = cinfo['templates']['exceptIllustrations'].get(lang,\n cinfo['templates']['exceptIllustrations']['en'])\n cpystr = template.format(artstr.replace(\"_\", \"\\u00A0\") + exceptPgs)\n crdts.append(\"\\\\{} {}\".format(mkr, cpystr))\n if self.dict['notes/ifxrexternalist']:\n if self.dict['notes/xrlistsource'] == \"standard\":\n msg = \"\\\\{} {}\".format(mkr, cinfo['templates']['openbible.info'].get(lang,\n cinfo['templates']['openbible.info']['en']).replace(\"_\", \"\\u00A0\"))\n else:\n msg = getattr(self, 'xrefcopyright', None)\n if msg is not None:\n if not crdtsstarted:\n crdts.append(\"\\\\def\\\\zimagecopyrights{}{{%\".format(lang.lower()))\n crdtsstarted = True\n crdts.append(msg)\n if crdtsstarted:\n crdts.append(\"}\")\n if len(crdts):\n crdts.append(\"\\\\let\\\\zimagecopyrights=\\\\zimagecopyrightsen\")\n return \"\\n\".join(crdts)\n\n def createXrefTriggers(self, bk, prjdir, outpath):\n if self.xrefs is None:\n cfilter = self.dict['notes/xrfilterbooks']\n if cfilter == \"pub\":\n bl = self.printer.get(\"ecb_booklist\", \"\").split()\n filters = set(bl)\n elif cfilter == \"prj\":\n filters = set(self.printer.getAllBooks().keys())\n elif cfilter == \"all\":\n filters = None\n elif cfilter == \"ot\":\n filters = allbooks[:39]\n elif cfilter == \"nt\":\n filters = allbooks[40:67]\n if filters is not None and len(filters) == 0:\n filters = None\n localfile = None\n if self.dict['strongsndx/localterms']:\n localfile = os.path.join(self.printer.settings_dir, self.printer.prjid, \"TermRenderings.xml\")\n if not os.path.exists(localfile):\n localfile = None\n self.xrefs = Xrefs(self, filters, prjdir,\n self.dict['project/selectxrfile'] if self.dict['notes/xrlistsource'] == 'custom' else None,\n self.dict['notes/xrlistsize'], self.dict['notes/xrlistsource'], localfile)\n self.xrefs.process(bk, outpath)\n\n","sub_path":"python/lib/ptxprint/texmodel.py","file_name":"texmodel.py","file_ext":"py","file_size_in_byte":96212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"480270764","text":"#10-7. Addition Calculator: Wrap your code from Exercise 10-6 \r\n#in a while loop so the user can continue entering numbers \r\n#even if they make a mistake and enter text instead of a number.\r\nprint(\"----------Addition program for two numbers----------\")\r\nprint(\"(Enter 'quit' to exit the program.)\")\r\n\r\nwhile True:\r\n\ttry:\r\n\t\tfirstNum = input(\"\\nEnter the first number: \")\r\n\t\tif firstNum == 'quit':\tbreak\r\n\t\tfirstNum = int(firstNum)\r\n\t\tsecondNum =input(\"Enter the second number: \")\r\n\t\tif secondNum == 'quit':\tbreak\r\n\t\tsecondNum = int(secondNum)\r\n\texcept ValueError:\r\n\t\tprint(\"\\n\\tYou entered a text instead of a number, please try again!\\n\")\r\n\telse:\r\n\t\tsum = firstNum + secondNum\r\n\t\tprint(\"\\n\\tThe sum of the two numbers you have inputed is \" + str(sum) + \".\")\r\n\t\t\r\n\t\t\r\n\r\n\r\n","sub_path":"chapter_10/addition_calculator.py","file_name":"addition_calculator.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"167561358","text":"#!/usr/bin/python -u\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport subprocess\nimport sys\n\nif 'LDAP_PORT_389_TCP_ADDR' not in os.environ:\n print('error: contained invoked without link to an ldap contaer')\n sys.exit(1)\n\nldap_url = 'ldap://%s:%s/' % (os.environ['LDAP_PORT_389_TCP_ADDR'],\n os.environ['LDAP_PORT_389_TCP_PORT'])\n\nos.environ['DOCKER_ENTRYPOINT'] = '1'\n\nsubprocess.check_call([\n '/usr/bin/python', '-u',\n '/usr/bin/ansible-playbook', 'docker-hgrb.yml',\n '-c', 'local',\n '-t', 'docker-startup',\n '-e', 'ldap_uri=%s' % ldap_url,\n ],\n cwd='/vct/ansible')\n\ndel os.environ['DOCKER_ENTRYPOINT']\n\nsubprocess.check_call(['/sbin/service', 'rsyslog', 'start'])\n\nos.execl(sys.argv[1], *sys.argv[1:])\n","sub_path":"ansible/roles/docker-hg-reviewboard/files/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83188645","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/ben/Dropbox/studies/newpainmachine/software/FAB/fab_controller/settings.py\n# Compiled at: 2015-01-06 09:36:37\nimport os\nfrom collections import namedtuple\nimport random, pkg_resources\nFAB_VERSION = pkg_resources.require('fab_controller')[0].version\nHANDS = [\n 'left', 'right']\nPair = namedtuple('Pair', HANDS)\nBlock = namedtuple('Block', ['duration', 'grams'])\nLOG_INTERVAL = 0.5\nLOGFILE_DIR = os.path.expanduser('~/Documents/fab/logs/')\nREST_N_FROM_TOP = 500\nSTEP_DELAY = 0.0003\nTIGHT_LOOP_INTERVAL = 0.001\nALLOWABLE_DISCREPANCY = 20\nTWO_KG = Pair(0.4457, 0.4692)\nDASHBOARD_UPDATE_INTERVAL = 0.2\nSERVER_PORT = 8000\nSTEP_PIN = Pair(2, 3)\nDIRECTION_PIN = Pair(6, 7)\nHIGH_LIMIT_PIN = Pair(17, 18)\nLOW_LIMIT_PIN = Pair(15, 16)\nSENSOR_PIN = Pair(4, 5)\nUP = 0\nDOWN = 1\nMOVEMENT_LABELS = {UP: 'up', DOWN: 'down'}\nMOVEMENT = {v:k for k, v in list(MOVEMENT_LABELS.items())}\nSTEPS_PER_FULL_STEP = 8\nFULL_STEPS_PER_REV = 200\nSTEPS_PER_REV = FULL_STEPS_PER_REV * STEPS_PER_FULL_STEP\nMM_PER_REV = 5\nMM_MAX_TRAVEL = 15\nMAX_REVS = MM_MAX_TRAVEL / MM_PER_REV\nMAX_STEPS = MAX_REVS * STEPS_PER_REV\nSWITCH_CHECKING_WINDOW_LENGTH = 5\nSENSOR_MEASUREMENTS_WINDOW_LENGTH = 5","sub_path":"pycfiles/fab-controller-0.9.14.tar/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297697645","text":"from datetime import datetime\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom dynamicfleet.reservas.validators import validate_start_date\n\n\nclass Reserve(models.Model):\n STATES_CHOICES = [\n ('provisoria', 'Provisória'),\n ('confirmada', 'Confirmada'),\n ('cancelada', 'Cancelada')\n ]\n\n vehicle = models.ForeignKey(\"veiculos.Vehicle\", \n on_delete=models.CASCADE, \n related_name='reserves')\n start = models.DateTimeField('Inicio', \n default=datetime.now, \n validators=[validate_start_date])\n end = models.DateTimeField('Fim', default=datetime.now,)\n state = models.CharField('Estado da Reserva', \n max_length=10, \n choices=STATES_CHOICES, \n default='provisoria')\n\n created = models.DateTimeField('Registrado em', auto_now_add=True)\n modified = models.DateTimeField('Modificado em', auto_now=True)\n\n def clean(self):\n super(Reserve, self).clean()\n \n if self.start > self.end:\n raise ValidationError('Data de fim da reserva inválida.', 'invalid value')\n\n class Meta:\n verbose_name = 'reserva'\n verbose_name_plural = 'reservas'\n\n def __str__(self):\n return '[ veiculo: {}, start: {}, end: {}, state: {}]'.format(\n self.vehicle.model,\n self.start,\n self.end,\n self.state)\n","sub_path":"dynamicfleet/reservas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"380817949","text":"\"\"\"\n--- Day 23: Crab Cups ---\nThe small crab challenges you to a game! The crab is going to mix up some cups, and you have to predict where they'll end up.\n\nThe cups will be arranged in a circle and labeled clockwise (your puzzle input). For example, if your labeling were 32415, \nthere would be five cups in the circle; going clockwise around the circle from the first cup, the cups would be labeled \n3, 2, 4, 1, 5, and then back to 3 again.\n\nBefore the crab starts, it will designate the first cup in your list as the current cup. The crab is then going to do 100 moves.\n\nEach move, the crab does the following actions:\n\nThe crab picks up the three cups that are immediately clockwise of the current cup. They are removed from the circle; cup spacing is adjusted as necessary to maintain the circle.\nThe crab selects a destination cup: the cup with a label equal to the current cup's label minus one. If this would select one of the cups that was just picked up, \n the crab will keep subtracting one until it finds a cup that wasn't just picked up. If at any point in this process the value goes below the lowest value on any cup's label, \n it wraps around to the highest value on any cup's label instead.\nThe crab places the cups it just picked up so that they are immediately clockwise of the destination cup. They keep the same order as when they were picked up.\nThe crab selects a new current cup: the cup which is immediately clockwise of the current cup.\n\nFor example, suppose your cup labeling were 389125467. If the crab were to do merely 10 moves, the following changes would occur:\n\n-- move 1 --\ncups: (3) 8 9 1 2 5 4 6 7 \npick up: 8, 9, 1\ndestination: 2\n\n-- move 2 --\ncups: 3 (2) 8 9 1 5 4 6 7 \npick up: 8, 9, 1\ndestination: 7\n\n-- move 3 --\ncups: 3 2 (5) 4 6 7 8 9 1 \npick up: 4, 6, 7\ndestination: 3\n\n-- move 4 --\ncups: 7 2 5 (8) 9 1 3 4 6 \npick up: 9, 1, 3\ndestination: 7\n\n-- move 5 --\ncups: 3 2 5 8 (4) 6 7 9 1 \npick up: 6, 7, 9\ndestination: 3\n\n-- move 6 --\ncups: 9 2 5 8 4 (1) 3 6 7 \npick up: 3, 6, 7\ndestination: 9\n\n-- move 7 --\ncups: 7 2 5 8 4 1 (9) 3 6 \npick up: 3, 6, 7\ndestination: 8\n\n-- move 8 --\ncups: 8 3 6 7 4 1 9 (2) 5 \npick up: 5, 8, 3\ndestination: 1\n\n-- move 9 --\ncups: 7 4 1 5 8 3 9 2 (6)\npick up: 7, 4, 1\ndestination: 5\n\n-- move 10 --\ncups: (5) 7 4 1 8 3 9 2 6 \npick up: 7, 4, 1\ndestination: 3\n\n-- final --\ncups: 5 (8) 3 7 4 1 9 2 6 \nIn the above example, the cups' values are the labels as they appear moving clockwise around the circle; the current cup is marked with ( ).\n\nAfter the crab is done, what order will the cups be in? Starting after the cup labeled 1, collect the other cups' labels clockwise into a \nsingle string with no extra characters; each number except 1 should appear exactly once. In the above example, after 10 moves, the cups \nclockwise from 1 are labeled 9, 2, 6, 5, and so on, producing 92658374. If the crab were to complete all 100 moves, \nthe order after cup 1 would be 67384529.\n\nUsing your labeling, simulate 100 moves. What are the labels on the cups after cup 1?\n\nYour puzzle input is 792845136.\n\n\n--- Part Two ---\nDue to what you can only assume is a mistranslation (you're not exactly fluent in Crab), you are quite surprised when the \ncrab starts arranging many cups in a circle on your raft - one million (1000000) in total.\n\nYour labeling is still correct for the first few cups; after that, the remaining cups are just numbered in an increasing \nfashion starting from the number after the highest number in your list and proceeding one by one until one million is reached. \n(For example, if your labeling were 54321, the cups would be numbered 5, 4, 3, 2, 1, and then start counting up from 6 until \none million is reached.) In this way, every number from one through one million is used exactly once.\n\nAfter discovering where you made the mistake in translating Crab Numbers, you realize the small crab isn't going to do \nmerely 100 moves; the crab is going to do ten million (10000000) moves!\n\nThe crab is going to hide your stars - one each - under the two cups that will end up immediately clockwise of cup 1. \nYou can have them if you predict what the labels on those cups will be when the crab is finished.\n\nIn the above example (389125467), this would be 934001 and then 159792; multiplying these together produces 149245887792.\n\nDetermine which two cups will end up immediately clockwise of cup 1. What do you get if you multiply their labels together?\n\"\"\"\n\n### IMPORTS ###\n\nimport collections\nimport cProfile\nimport datetime\nimport math\nimport numpy\nimport pickle\nimport pstats\nimport time\n\n\n### CONSTANTS ###\n\nINPUT_FILENAME = 'input0.txt'\n\ndo_profiling = False\n\n\n### FUNCTIONS ###\n\ndef parse_input( ):\n\tlines = open( INPUT_FILENAME, 'r' ).read( ).splitlines( )\n\n\treturn lines[ 0 ]\n\n\ndef main( labels ):\n\t\"\"\"\n\t\"\"\"\n\ttime_start = time.perf_counter( )\n\t\n\tcups = collections.deque( )\n\t\n\tfor l in labels:\n\t\tcups.append( int( l ) )\n\t\t\n\tmin_label = min( cups )\n\tmax_label = max( cups )\n\n\t# Add all numbers up to 1M\n\tfor i in range( max_label + 1, 1000000 + 1 ):\n\t\tcups.append( i )\n\t\n\tcur_cup = cups[ 0 ]\n\toffset = 0\n\tmove = 0\n\t\n\twhile move < 10000000:\n\t\tmove += 1\n\t\t\n\t\tif move % 1000 == 0:\n\t\t\telapsed_secs = time.perf_counter( ) - time_start\n\t\t\tsecs_per_move = elapsed_secs / move\n\t\t\thours_remaining = secs_per_move * ( 10000000 - move ) / 60.0 / 60.0\n\n\t\t\teta_time = ( datetime.datetime.now( ) + datetime.timedelta( hours = hours_remaining ) ).strftime( '%a %I:%M:%S %p' )\n\n\t\t\tprint( 'move {0} ({1:.02f}%), {2:.02f} hours remaining, ETA = {3}'.format( move, move / 10000000 * 100.0, hours_remaining, eta_time ) )\n\t\t\t\n\t\t\tif do_profiling:\n\t\t\t\tprofiler = cProfile.Profile( )\n\t\t\t\tprofiler.enable( )\n\t\t\t\n\t\t\n\t\t#print( '\\n-- move {0} --'.format( move ) )\n\t\t#print( 'cups: {0}'.format( ' '.join( [ str( c ) for c in list( cups )[ :50 ] ] ) ) )\n\t\t\t\n\t\tcups.rotate( -1 )\n\t\toffset = 999999\n\t\t\n\t\t# Remove 3 cups to right (clockwise) of current cup\n\t\tremoved = [\n\t\t cups.popleft( ),\n\t\t cups.popleft( ),\n\t\t cups.popleft( )\n\t\t]\n\t\t\n\t\t#print( 'pick up: {0}'.format( ' '.join( [ str( r ) for r in removed ] ) ) )\n\t\t\n\t\t# find destination cup\n\t\tdest_cup = None\n\t\ti = cur_cup - 1\n\t\t\n\t\twhile dest_cup is None:\n\t\t\tif i in cups:\n\t\t\t\tdest_cup = i\n\t\t\t\tcontinue\n\n\t\t\ti -= 1\n\t\t\tif i < min_label:\n\t\t\t\ti = max_label\n\t\t\n\t\t# Got our destination cup, so insert cups to right of it\n\t\tidx = cups.index( dest_cup )\n\t\t#idx = 10\n\t\t#while cups[ idx ] != dest_cup:\n\t\t\t#idx -= 1\n\t\t\t#if idx < 0:\n\t\t\t\t#idx = 999996\n\t\t\n\t\tprint( 'cur_cup =', cur_cup, 'dest_cup =', dest_cup, 'idx =', idx )\n\t\t\n\t\tif idx != len( cups ) - 1:\n\t\t\t# Have to do some rotating to get dest cup to end\n\t\t\tcups.rotate( ( idx + 1 ) * -1 )\n\t\t\t\n\t\t\t# Update cur_cup offset\n\t\t\toffset -= idx + 1\n\t\t\n\t\t# Add our removed cups\t\t\n\t\tfor r_cup in removed:\n\t\t\tcups.append( r_cup )\n\n\t\t# Update offset due to inserting those 3 again.\n\t\t# Since we added them to the end, offset drops by 3 again\n\t\toffset -= 3\n\t\t\n\t\t# Rotate to get current cup at head (first) again, then pick new current cup\n\t\tcups.rotate( len( cups ) - offset )\n\t\toffset = 0\n\n\t\tcur_cup = cups[ 1 ]\n\t\t\n\t\t# Rotate again so new cur_cup is at head (first)\n\t\tcups.rotate( -1 )\n\t\t\n\t\tif do_profiling:\n\t\t\tif move % 1000 == 0:\n\t\t\t\tprofiler.disable( )\n\t\t\t\tstats = pstats.Stats( profiler )\n\t\t\t\tstats.dump_stats( r'C:\\Users\\Home\\Dropbox (Personal)\\misc\\profile.pstats' )\t\n\n\t# Dump it out\n\tpickle.dump( cups, open( r'D:\\temp\\blah.pickle', 'wb' ) )\n\t\n\t#print( '\\n-- final --' )\n\t#print( 'cups: {0}'.format( ' '.join( [ str( c ) for c in cups ] ) ) )\n\t\t \n\t# print answer?\n\t#print( 'cups: {0}'.format( ' '.join( [ str( c ) for c in list( cups )[ :50 ] ] ) ) )\n\n\tidx = cups.index( 1 )\n\t\n\tidx1 = idx + 1\n\tif idx1 >= len( cups ):\n\t\tidx1 = idx1 - len( cups )\n\tidx2 = idx + 2\n\tif idx2 >= len( cups ):\n\t\tidx2 = idx2 - len( cups )\n\t\t\n\tval1 = cups[ idx1 ]\n\tval2 = cups[ idx2 ]\n\t#print( 'answer = {0} * {1} = {2}'.format( val1, val2, val1 * val2 ) )\n\n\t## Calculate score\n\t## Rotate so \"1\" is at end (on right)\n\t#idx = cups.index( 1 )\n\t#cups.rotate( len( cups ) - idx - 1 )\n\t\n\t#score = ''.join( [ str( c ) for c in cups ] )[ :-1 ]\n\tprint( 'values =', val1, val2 )\n\treturn val1 * val2\n\t\n# test answer = 67384529\n\n### CLASSES ###\n\n\n### MAIN ###\n\nif __name__ == \"__main__\":\n\ttime_start = time.perf_counter( )\n\n\tlabels = parse_input( )\n\t\n\tanswer = main( labels )\n\n\tprint( 'answer =', answer )\n\tprint( 'done in {0:.4f} secs'.format( time.perf_counter( ) - time_start ) )\n\n# 54 not right\n","sub_path":"2020/23/2020_day_23_2c_grow.py","file_name":"2020_day_23_2c_grow.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636265272","text":"#\n# Joel Labbe\n#\n# convert_v1.py\n#\n# Create a table showing Celsius to Farhenheit temperatures every 10 degrees, from 0C to 100C\n#\n# Input: Temperature in Celsius from 0C to 100c\n#\n# Processing: 1. For Celsius temperature from 0C to 100C\n# (every 10 degrees):\n# Calculate corresponding Farhenheit\n# farhenheit = 9/5*Celsius+32\n# Display Result\n#\n# Output: Table of Equivalent Temperatures in Fahrenheit \n#\n\ndef main():\n # Display Heading\n print(\"Temperature Converter ...\")\n print()\n print(\"Celsius\\t\\tFahrenheit\")\n print(\"==========================\")\n\n # For celsius from 0C to 100c\n # Calculate & display fahrenheit\n celsius = 0\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 10\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 20\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 30\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 40\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 50\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 60\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 70\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 80\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 90\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\n celsius = 100\n fahrenheit = 9/5 * celsius + 32\n print (\" \",celsius,\"\\t\\t\", fahrenheit)\n\nmain()\n \n","sub_path":"Labs/Chapter 2/convert_v1.py","file_name":"convert_v1.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"347871921","text":"\n\n#calss header\nclass _PIOUS():\n\tdef __init__(self,): \n\t\tself.name = \"PIOUS\"\n\t\tself.definitions = [u'strongly believing in religion, and living in a way that shows this belief: ', u'pretending to have sincere feelings: ', u'something that is unlikely to happen']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_pious.py","file_name":"_pious.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442971790","text":"import sys, StringIO\n\ndef replaceAll(s, r):\n for x in r:\n s = s.replace(x, ' ', 1)\n return s\n\ndef solution(s):\n r = []\n #find \"THREE\", \"NINE\"\n for i, n in [(8, 'GEIHT'), (6, 'XSI'), (0, 'ZERO'), (4,'UFOR'), (7, 'SEVEN'),\n (5, 'VFIE'), (2, 'WTO'), (1, 'ONE'), (3, 'THREE'), (9, 'NINE')]:\n while s.find(n[0])>-1:\n r.append(i)\n s = replaceAll(s, n)\n\n return ''.join([str(x) for x in sorted(r)])\n#solution\n\n\nif __name__ == '__main__':\n if len(sys.argv)>1:\n input = file(sys.argv[1])\n else:\n input = StringIO.StringIO(\"\"\"4\nOZONETOWER\nWEIGHFOXTOURIST\nOURNEONFOE\nETHER\"\"\")\n cases = int(input.readline())\n for case in range(cases):\n s = input.readline().strip()\n print(\"Case #%d: %s\" % (case+1, solution(s)))\n","sub_path":"solutions_5648941810974720_0/Python/Wingi/pa.py","file_name":"pa.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588147580","text":"#!/usr/bin/env python\n\nimport sys\nimport rospy\nimport moveit_commander\nimport tf\n\nfrom time import time\nfrom math import radians, degrees, cos, sin\nfrom leap_motion.msg import Human, Hand\nfrom tf.transformations import quaternion_from_euler\nfrom geometry_msgs.msg import TwistStamped, Vector3\n\nALMOST_ZERO = 0.0001\n\n# [0.7783687709988791, -0.3197063419414436, 0.6000029657171599]\n\ndef constrain(value, a, b):\n max_value = max(a, b)\n min_value = min(a, b)\n return min(max_value, max(min_value, value))\n\ndef constrain_abs(value, diviation):\n return constrain(value, -diviation, diviation)\n\ndef apply_to_vector(vector, func):\n new_vec = Vector3()\n new_vec.x = func(vector.x)\n new_vec.y = func(vector.y)\n new_vec.z = func(vector.z)\n return new_vec\n\ndef is_open(hand):\n return hand.grab_strength < 0.5\n\ndef rotate_vector(x, y):\n theta = radians(45)\n cs = cos(theta)\n sn = sin(theta)\n\n px = x * cs - y * sn\n py = x * sn + y * cs\n\n return px, py\n\ndef add_vectors(a, b):\n new_vec = Vector3()\n new_vec.x = a.x + b.x\n new_vec.y = a.y + b.y\n new_vec.z = a.z + b.z\n return new_vec\n\ndef subtract_vectors(a, b):\n new_vec = Vector3()\n new_vec.x = a.x - b.x\n new_vec.y = a.y - b.y\n new_vec.z = a.z - b.z\n return new_vec\n\ndef negate_vector(vec):\n new_vec = Vector3()\n new_vec.x = -vec.x\n new_vec.y = -vec.y\n new_vec.z = -vec.z\n return new_vec\n\n\nclass LeapListener(object):\n def __init__(self):\n super(LeapListener, self).__init__()\n moveit_commander.roscpp_initialize(sys.argv)\n rospy.init_node(\"leap_commander\")\n self.centered_position = Vector3(0.68, -0.34, 0.59)\n\n self.tf_listener = tf.TransformListener()\n self.move_group = moveit_commander.MoveGroupCommander(\"right_arm\")\n self.previous_speed = Vector3()\n self.published_zero = False\n\n self.zeroed_hand_position = Vector3(0, 0, 0.25)\n self.zeroed_in_this_loop = True\n\n self.last_timestamp = time()\n\n\n self.body_pose_publisher = rospy.Publisher(\"jog_arm_server/delta_jog_cmds\", TwistStamped, queue_size=10)\n rospy.Subscriber(\"leap_motion/leap_device\", Human, self.on_leap_msg)\n rospy.spin()\n\n def get_end_effector_position(self):\n return self.tf_listener.lookupTransform('/base_link', self.move_group.get_end_effector_link(), rospy.Time(0))\n\n def on_leap_msg(self, msg):\n if msg.right_hand is not None and msg.right_hand.is_present and is_open(msg.right_hand):\n hand = msg.right_hand\n\n hand_position = Vector3(-hand.palm_center.z, -hand.palm_center.x, hand.palm_center.y)\n # hand_position = apply_to_vector(hand_position, lambda vec: constrain_abs(vec, 0.2))\n\n current_position = self.get_end_effector_position()[0]\n current_position = Vector3(current_position[0], current_position[1], current_position[2])\n\n if not self.zeroed_in_this_loop:\n self.zeroed_hand_position = hand_position\n self.centered_position = current_position\n self.zeroed_in_this_loop = True\n relative_hand_position = subtract_vectors(hand_position, self.zeroed_hand_position)\n\n # print \"relative\"\n # print relative_hand_position\n # print \"\"\n\n desired_position = add_vectors(relative_hand_position, self.centered_position)\n\n position = Vector3(ALMOST_ZERO, ALMOST_ZERO, ALMOST_ZERO)\n rotation = Vector3(ALMOST_ZERO, ALMOST_ZERO, ALMOST_ZERO)\n \n speed = subtract_vectors(desired_position, current_position)\n\n x, z = rotate_vector(-speed.y, speed.z)\n Kp = 3\n Kd = 0\n now = time()\n time_difference = now - self.last_timestamp\n self.last_timestamp = now\n position.y = speed.x * Kp - self.previous_speed.y * time_difference * Kd\n position.x = x * Kp - self.previous_speed.x * time_difference * Kd\n position.z = z * Kp - self.previous_speed.z * time_difference * Kd\n # print position\n # print \"\"\n self.previous_speed = position\n \n \n pose = TwistStamped()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = \"base_link\"\n pose.twist.linear = position\n pose.twist.angular = rotation\n\n self.body_pose_publisher.publish(pose)\n self.published_zero = False\n elif not self.published_zero:\n pose = TwistStamped()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = \"base_link\"\n pose.twist.linear = Vector3(ALMOST_ZERO, ALMOST_ZERO, ALMOST_ZERO)\n pose.twist.angular = Vector3(ALMOST_ZERO, ALMOST_ZERO, ALMOST_ZERO)\n self.body_pose_publisher.publish(pose)\n self.published_zero = True\n self.zeroed_in_this_loop = False\n else:\n self.zeroed_in_this_loop = False\n\n\nif __name__ == \"__main__\":\n LeapListener()\n","sub_path":"husky_leap/src/leap_motion_absolute_controller.py","file_name":"leap_motion_absolute_controller.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"443703496","text":"import xarray as xr\nimport traitlets as tl\n\nfrom podpac.core.utils import common_doc, cached_property\nfrom podpac.core.data.datasource import COMMON_DATA_DOC, DATA_DOC\nfrom podpac.core.data.file_source import BaseFileSource, FileKeysMixin, LoadFileMixin\n\n\n@common_doc(COMMON_DATA_DOC)\nclass Dataset(FileKeysMixin, LoadFileMixin, BaseFileSource):\n \"\"\"Create a DataSource node using xarray.open_dataset.\n\n Attributes\n ----------\n source : str\n Path to the dataset file.\n In addition to local paths, file://, http://, ftp://, and s3:// transport protocols are supported.\n dataset : xarray.Dataset\n Dataset object.\n coordinates : :class:`podpac.Coordinates`\n {coordinates}\n data_key : str\n data key, default 'data'\n lat_key : str\n latitude key, default 'lat'\n lon_key : str\n longitude key, default 'lon'\n time_key : str\n time key, default 'time'\n alt_key : str\n altitude key, default 'alt'\n crs : str\n Coordinate reference system of the coordinates\n extra_dim : dict\n In cases where the data contain dimensions other than ['lat', 'lon', 'time', 'alt'], these dimensions need to be selected.\n For example, if the data contains ['lat', 'lon', 'channel'], the second channel can be selected using `extra_dim=dict(channel=1)`\n \"\"\"\n\n # dataset = tl.Instance(xr.Dataset).tag(readonly=True)\n extra_dim = tl.Dict(allow_none=True).tag(attr=True)\n\n @tl.default(\"extra_dim\")\n def _default_extra_dim(self):\n return None\n\n # -------------------------------------------------------------------------\n # public api properties and methods\n # -------------------------------------------------------------------------\n\n def open_dataset(self, fp):\n return xr.open_dataset(fp)\n\n def close_dataset(self):\n super(Dataset, self).close_dataset()\n self.dataset.close()\n\n @cached_property\n def dims(self):\n \"\"\"dataset coordinate dims\"\"\"\n lookup = {self.lat_key: \"lat\", self.lon_key: \"lon\", self.alt_key: \"alt\", self.time_key: \"time\"}\n return [lookup[dim] for dim in self.dataset.dims]\n\n @cached_property\n def keys(self):\n return list(self.dataset.keys())\n\n @common_doc(COMMON_DATA_DOC)\n def get_data(self, coordinates, coordinates_index):\n \"\"\"{get_data}\"\"\"\n\n if not isinstance(self.data_key, list):\n data = self.dataset[self.data_key]\n data = data.transpose(*self.dataset.dims)\n else:\n data = self.dataset[self.data_key].to_array(dim=\"output\")\n tdims = tuple(self.dataset.dims) + (\"output\",)\n data = data.transpose(*tdims)\n\n return self.create_output_array(coordinates, data.data[coordinates_index])\n","sub_path":"podpac/core/data/dataset_source.py","file_name":"dataset_source.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"409083044","text":"'''\n最后一行是最后图片加1s,全部出车,且保留车辆pos信息\n'''\nimport cv2\nimport numpy as np\nimport os\nimport json\nfrom detect_utils import parking_line\nfrom detect_utils import show_parking_line\nfrom create import create_json_record\n\npath_img = r'E:\\home_label\\2000_02_02\\DDT2G1907ZMY00040SY' # 路径\n\nparking_space = 5 # 停车位个数 #需要手动改\n\n# #################不标车牌的IP##################\nno_plate_list = ['test_177', 'test_211', 'test_212', 'test_221', 'test_222', 'test_231',\n 'test_232', 'test_241', 'test_242', 'test_251', 'test_252', 'test_261', 'test_262']\n\n# #################创建参数和初始化##################\npath_txt = os.path.dirname(path_img) + '\\\\' + path_img.split('\\\\')[-1] + '_label.txt'\npath_json = os.path.dirname(path_img) + '\\\\' + path_img.split('\\\\')[-1] + '_label_v2.json'\nfont = cv2.FONT_HERSHEY_SIMPLEX # 使用默认字体\nchange_parking_state = 0\nshow_parking_num = 1\nchange_parking_num = 2\nframe_car_use_left_button = 3\nstate_mouse_init = 0\ndrag_start = None\nsel = (0, 0, 0, 0) # 鼠标用\nstate_num_key = change_parking_state\nstate_mouse = state_mouse_init\n\nparking_num_block = [] # 车位数据集合(选择跨车位停车时使用),是车位列表中的数字\"0,1\"\nparking_num = [] # 各停车空间的所占的停车位号,parking_num[idx]的内容是parking_num_block中的数字\nstate_loc = [] # 停车状态\npos_show = [] # 显示位置坐标\npos_show_bias = [] # 显示位置偏移后的坐标\ncolor_parking_num = [] # 停车状态颜色\ncolor_parking_space_num = []\ncar_frame_point = []\ncar_frame_point_abs = []\n\nact = 0 # 控制是否连续播放\nstate_time_label_overwrite = 0\ntime_label = ''\n\n\n# 为各项数值初始化\nfor idx, i in enumerate(range(parking_space)):\n parking_num.append([idx])\n state_loc.append(0)\n pos_show.append([0, 0])\n pos_show_bias.append([0, 0])\n color_parking_num.append((255, 0, 255))\n color_parking_space_num.append((255, 0, 0))\n car_frame_point.append([]) # 车辆框架点初始为空\n car_frame_point_abs.append([])\n\n# list_img = os.listdir(path_img)\n# for filename in list_img:\n# if not filename.endswith('jpg'):\n# list_img.remove(filename)\n# list_img.sort() # 排序\n\nfile_list = os.listdir(path_img)\nlist_img = [file for file in file_list if file.endswith('jpg')]\nlist_img.sort() # 排序\nimgs_list_only_time = ['_'.join(i.split('_')[:3]) + '.jpg' for i in list_img] # 以防文件名后为电压等\nos.chdir(path_img)\nim_data = cv2.imread(list_img[0])\n\n# ##############得到图像参数##################\nh, w = im_data.shape[0:2]\n# ##############字体大小和位置参数##################\nx_panel = int(w * 0.83)\ny_panel = int(h * 0.10)\ny_panel_bias = y_panel - int(0.05 * h)\nfont_size = w // 1000\nbias = int(w * 0.025) # 车位与状态标记在图片上的偏移\nbias_y = int(h * -0.05) # 车位idx号位置的偏移\nfont_width = int((h + w) / 1000)\n\n# ##################得到IP地址并人为规定显示位置##################\nimg_dir_name = os.path.basename(path_img)\n\nip_name = img_dir_name\n\nif ip_name in parking_line:\n parking_list_np = np.array(parking_line[ip_name]).astype(int) # 获得车位线\nelse:\n blank_list = []\n for i in range(parking_space):\n blank_list.append([[0, 0], [0, 0], [0, 0], [0, 0]]) # 待修改\n parking_list_np = np.array(blank_list)\n\nif ip_name == 'test_177':\n h_h = 600\n h_l = 1400\nelif ip_name == 'test_175':\n h_h = 700\n h_l = 1700\nelif ip_name == 'test_176':\n h_h = 500\n h_l = 1400\nelif ip_name == 'test_178':\n h_h = 250\n h_l = 1050\nelif ip_name == 'test_170':\n h_h = 250\n h_l = 1300\nelif ip_name == 'test_211':\n h_h = 350\n h_l = 850\nelif ip_name == 'test_212':\n h_h = 350\n h_l = 850\nelif ip_name == 'test_221':\n h_h = 350\n h_l = 850\nelif ip_name == 'DDZ2G1907ZMY00002SY':\n h_h = 200 # 显示的最上边界\n h_l = 1300 # 显示的最下边界\nelif ip_name == 'DDT2G1907ZMY00008SY':\n h_h = 300 # 显示的最上边界\n h_l = 1400 # 显示的最下边界\nelif ip_name == 'DDT2G1907ZMY00016SY':\n h_h = 600 # 显示的最上边界\n h_l = 1450 # 显示的最下边界\nelse:\n h_h = 1000 # 显示的最上边界\n h_l = 1800 # 显示的最下边界\n\n# 标记窗口大小设置\n# 待修改\nif ip_name == 'test_177':\n # w_w = w//3\n # w_h = (h_l - h_h)//3\n w_w = (w // 7) * 3\n w_h = ((h_l - h_h) // 7) * 3\nelif ip_name == 'test_170':\n w_w = (w // 7) * 3\n w_h = ((h_l - h_h) // 7) * 3\nelif ip_name in ['test_211', 'test_212', 'test_221']:\n w_w = (w // 3) * 3\n w_h = ((h_l - h_h) // 3) * 3\nelse:\n w_w = (w // 5) * 2\n w_h = ((h_l - h_h) // 5) * 2\n\nimg_len = len(list_img)\nidx = 0\nh_0, m_0, s_0 = os.path.splitext(list_img[0])[0].split('_')[:3] # 排序后第一个时间\nh_1, m_1, s_1 = os.path.splitext(list_img[1])[0].split('_')[:3]\n# 两幅图片时间间隔\n# 如果时间间隔不确定,这个就对程序没意义了\ngap_sec = (int(h_1) - int(h_0)) * 3600 + (int(m_1) - int(m_0)) * 60 + (int(s_1) - int(s_0))\none_min = 60 // gap_sec\nthree_mins = 180 // gap_sec\nfive_mins = 300 // gap_sec\nif one_min == 0:\n one_min = 2\nif three_mins == 0:\n three_mins = 6\nif five_mins == 0:\n five_mins = 10\n\n# 得到初始时间秒值\ninitial_time_sec = int(h_0) * 3600 + int(m_0) * 60 + int(s_0)\n\n# 全部图片时间秒\nh_f, m_f, s_f = os.path.splitext(list_img[-1])[0].split('_')[:3]\nh_sum = int(h_f) - int(h_0)\nm_sum = int(m_f) - int(m_0)\ns_sum = int(s_f) - int(s_0)\nsec_sum = h_sum * 3600 + m_sum * 60 + s_sum\n\n# 计算图片最后一张加一秒的时_分_秒表达\nsec_all = int(h_f) * 3600 + int(m_f) * 60 + int(s_f)\nsec_all_plus_one_sec = sec_all + 1\nh_last = sec_all_plus_one_sec // 3600 # 时\nm_last = sec_all_plus_one_sec % 3600 // 60 # 分\ns_last = sec_all_plus_one_sec % 60 # 秒\nlast_time = str(h_last).zfill(2) + '_' + str(m_last).zfill(2) + '_' + str(s_last).zfill(2)\nimgs_last_time_plus_one = last_time # 为了减少改动\n\n\nimg_data = cv2.imread(list_img[0])\n# global record_list\nrecord_list = []\n\nclick_time = 0\n\n\ndef onmouse(event, x, y, flags, param):\n global drag_start, sel\n global click_time\n global pos_show\n global pos_show_bias\n if state_mouse == state_mouse_init:\n if event == cv2.EVENT_LBUTTONDOWN:\n if click_time < parking_space:\n pos_show[click_time] = [x, y]\n pos_show_bias[click_time] = [x + bias, y]\n click_time += 1\n print(x, y)\n if state_mouse == frame_car_use_left_button:\n if event == cv2.EVENT_LBUTTONDOWN:\n drag_start = x, y\n sel = 0, 0, 0, 0\n elif event == cv2.EVENT_LBUTTONUP:\n drag_start = None\n car_frame_point[id_parking_num_change] = [(sel[0], sel[1]), (sel[2], sel[3])]\n car_frame_point_abs[id_parking_num_change] = [(sel[0], sel[1] + h_h), (sel[2], sel[3] + h_h)]\n print('是否保留该边框?是:点击回车确认;否:直接重画')\n # state_mouse = frame_car_use_left_button\n elif drag_start:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n minpos = min(drag_start[0], x), min(drag_start[1], y)\n maxpos = max(drag_start[0], x), max(drag_start[1], y)\n sel = minpos[0], minpos[1], maxpos[0], maxpos[1]\n img = img_data.copy()\n cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0, 255, 255), 4)\n cv2.imshow(\"label\", img)\n\n else:\n print(\"selection is complete\")\n drag_start = None\n\n# 在数字键用来改变停车状态下的简单显示\n\n\ndef tmp_print(state_loc):\n for idx_state_loc, i in enumerate(state_loc):\n if idx_state_loc < parking_space - 1:\n print(i, end='\\t')\n else:\n print(i)\n\n# 加减一秒数值状态简单显示\n\n\ndef tmp_print_time_label(state_loc, time_label):\n for i in state_loc:\n print(i, end='\\t')\n print(time_label)\n\n# 加减一秒数值状态删除显示\n\n\ndef tmp_print_for_delete(tmp_np_array):\n for i in tmp_np_array:\n print(i, end='')\n print('<--X')\n\n# 加车牌函数\n\n\ndef add_plate_2_label_v2(data_raw): # 待修改\n parking_state_txt_record_state = []\n for i in range(parking_space):\n parking_state_txt_record_state.append('0') # 初始状态为0\n\n data_updata = [] # 第一行处理方法,见2加车牌\n for i in data_raw:\n data_updata.append(i[0])\n for idx_0 in range(parking_space):\n if (i[idx_0 + 1].split(':')[1] == '2') and (parking_state_txt_record_state[idx_0] == '0'):\n data_updata.append(i[idx_0 + 1] + ':蓝辽Axxxxx')\n else:\n data_updata.append(i[idx_0 + 1])\n if i[idx_0 + 1].split(':')[1] != '1': # 若无此判断,则012的2显示不了车牌\n parking_state_txt_record_state[idx_0] = i[idx_0 + 1].split(':')[1]\n\n data_updata = np.array(data_updata)\n data_updata = data_updata.reshape((-1, parking_space + 1))\n\n return data_updata\n\n\ndef save_json_label(record_list, parking_space, path_json, imgs_last_time_plus_one, imgs_list_only_time):\n record_for_json = create_json_record(record_list, parking_space, imgs_last_time_plus_one, imgs_list_only_time)\n file = open(path_json, 'w', encoding='utf-8')\n json.dump(record_for_json, file, ensure_ascii=False)\n file.close()\n print(f'save json at:{path_json}')\n\n\n# 补全记录(在记录的最后一条补全出车)\ndef complement_record_list(record_list, last_time, parking_space): # 这里的last_time就是最后图片加1s的时间\n record_list_tmp = np.array(record_list) # 得到记录\n record_list_tmp = record_list_tmp.reshape((-1, parking_space + 1)) # 排版\n record_list_last = record_list_tmp[-1] # 得到最后一行\n\n for i in range(parking_space + 1):\n if i == 0:\n info = last_time\n else:\n info_splits = record_list_last[i].split(':')\n if len(info_splits) == 2:\n info = info_splits[0] + ':' + '0'\n elif len(info_splits) == 3:\n info = info_splits[0] + ':' + '0' + ':' + info_splits[2]\n\n record_list.append(info)\n return record_list\n\n\ndef save_label_and_print(record_list, path_txt, parking_space):\n print('============原始数据如下===============')\n record_list = np.array(record_list)\n record_list = record_list.reshape((-1, parking_space + 1))\n for i in record_list:\n for idx_i, j in enumerate(i):\n if idx_i < parking_space:\n print(j, end=' ') # 原始数据加制表\n else:\n print(j) # 最后一个数据加回车\n # 对所有结果按时间由小到大排序\n tmp = record_list.T # np排序方法\n record_list_order = tmp[0].argsort() # 得出比较结果\n record_list_new_order = record_list[record_list_order] # 排序完毕\n # 去掉重复行\n tmp_f = record_list_new_order[0] # 为了去掉重复行\n data_raw = []\n for idx_i, j in enumerate(tmp_f): # 第一行的写法\n if idx_i < parking_space:\n data_raw.append(j)\n else:\n data_raw.append(j)\n\n for i in record_list_new_order[1:]: # 后面的写法\n if (i != tmp_f).any():\n for idx_i, j in enumerate(i):\n if idx_i < parking_space:\n data_raw.append(j)\n else:\n data_raw.append(j)\n tmp_f = i\n\n data_raw = np.array(data_raw)\n data_raw = data_raw.reshape((-1, parking_space + 1))\n\n if ip_name in no_plate_list: # ['177']\n record_list_new_order = data_raw\n else:\n record_list_new_order = add_plate_2_label_v2(data_raw)\n\n # 输出到txt\n path_txt = path_txt\n file = open(path_txt, 'a+', encoding='UTF-8')\n for i in record_list_new_order:\n for idx_i, j in enumerate(i):\n if idx_i < parking_space:\n file.write(j + ' ')\n else:\n file.write(j + '\\n')\n file.close()\n print('save txt at :{}'.format(path_txt))\n\n# 删除一行\n\n\ndef modify_record_list(record_list, list_img, idx):\n # 1.先找当前图片是否存在记录,无则显示无需修改,有则显示列表的idx\n # 2.取出相��记录,使用remove删除该图片名称下全部记录\n # if record_list is None:\n # record_list = []\n # print('记录里啥也没有,没东西可删')\n # return record_list\n # #return要返回值,否则就会把record变成none\n\n if len(record_list) == 0:\n print('记录里啥也没有,没东西可删')\n else:\n tmp_np_array = np.array(record_list)\n tmp_np_array = tmp_np_array.reshape(-1, parking_space + 1)\n delete_time = '_'.join(os.path.splitext(list_img[idx])[0].split('_')[:3])\n\n if (tmp_np_array.T[0] == delete_time).any(): # any:不加括号是bug\n idx_for_delete = [idx for idx, i in enumerate(tmp_np_array.T[0]) if i == delete_time] # 得到索引值\n idx_for_delete = idx_for_delete[::-1] # 从后往前删除,否则idx失效\n tmp_np_array_for_print = tmp_np_array[idx_for_delete] # 先取出要删除的\n for i in idx_for_delete:\n tmp_np_array = np.delete(tmp_np_array, i, 0) # 将要删除的删除\n tmp_print_for_delete(tmp_np_array_for_print) # 显示出已经删除的条目\n tmp_np_array = tmp_np_array.reshape(-1) # 恢复record\n record_list = tmp_np_array.tolist() # 恢复record\n else:\n print('记录中无当前时间条目')\n\n return record_list\n\n\ndef str_space_remove(content):\n temp = ''\n for letter in content:\n if letter != ' ':\n temp += letter\n return temp\n\n\ncv2.namedWindow('label', 0)\ncv2.namedWindow('next', 0)\ncv2.startWindowThread()\ncv2.resizeWindow('label', w_w, w_h) # 宽,高\ncv2.resizeWindow('next', w_w, w_h) # 宽,高\n\n\nwhile idx < img_len - 1:\n idx_next = idx + 1\n img_data = cv2.imread(list_img[idx])\n img_data_next = cv2.imread(list_img[idx_next])\n if img_data is None:\n idx += 1\n continue\n if img_data_next is None:\n if img_data is None:\n idx += 1\n continue\n else:\n idx_next += 1\n continue\n\n img_data = show_parking_line(img_data, parking_list_np, 4) # 画停车线\n img_data = img_data[h_h:h_l, ...]\n img_data_next = img_data_next[h_h:h_l, ...]\n\n h, m, s = os.path.splitext(list_img[idx])[0].split('_')[:3]\n h_p = int(h) - int(h_0)\n m_p = int(m) - int(m_0)\n s_p = int(s) - int(s_0)\n sec_pass = h_p * 3600 + m_p * 60 + s_p\n\n # 显示的是当前时间(使用图片文件名)\n img_data = cv2.putText(img_data, '_'.join([h.zfill(2), m.zfill(2), s.zfill(2)]), (x_panel, y_panel),\n font, font_size, (0, 0, 255), font_width) # 添加文字,字体大小,初始位置,颜色,粗细\n\n # 显示剩余时间\n img_data = cv2.putText(img_data, str(int(sec_sum - sec_pass) // 60) + 'm' + str(int(sec_sum - sec_pass) % 60) + 's', (x_panel, y_panel_bias),\n font, font_size, (0, 255, 0), font_width) # 添加文字,字体大小,初始位置,颜色,粗细\n\n if state_num_key == change_parking_num:\n if len(parking_num_block) > 1: # 车位数据集合(选择跨车位停车时使用)\n parking_num_block.sort()\n parking_num[id_parking_num_change] = parking_num_block # 写进去使命就完成了\n\n for i in range(parking_space):\n if len(car_frame_point[i]) == 0:\n pass\n else:\n # 画图时只使用当前的两点坐标,但是在记入label时,使用绝对值坐标\n img_data = cv2.rectangle(img_data, car_frame_point[i][0], car_frame_point[i][1], (0, 255, 255), 4)\n\n for i in range(parking_space):\n\n if state_num_key == show_parking_num:\n color_parking_space_num[i] = (0, 255, 255)\n elif state_num_key == change_parking_state:\n color_parking_space_num[i] = (255, 0, 0)\n elif state_num_key == change_parking_num:\n color_parking_space_num[i] = (255, 0, 0)\n\n if state_num_key == show_parking_num:\n color_parking_num[i] = (255, 0, 255)\n elif state_num_key == change_parking_state:\n color_parking_num[i] = (255, 0, 255)\n elif state_num_key == change_parking_num:\n color_parking_num[i] = (255, 0, 255)\n color_parking_num[id_parking_num_change] = (0, 255, 255)\n # 车位idx\n img_data = cv2.putText(img_data, str(i),\n (pos_show[i][0], pos_show[i][1] + bias_y), font, font_size, color_parking_space_num[i], font_width)\n\n # 车位\n img_data = cv2.putText(img_data, str(parking_num[i]),\n (pos_show[i][0], pos_show[i][1]), font, font_size, color_parking_num[i], font_width)\n\n # 状态标记\n img_data = cv2.putText(img_data, str(int(state_loc[i])),\n (pos_show_bias[i][0], pos_show_bias[i][1] + bias_y), font, font_size + 1, (0, 0, 255), font_width)\n\n # cv2.namedWindow('label',0)\n # cv2.resizeWindow('label',w_w ,w_h) #宽,高\n cv2.setMouseCallback('label', onmouse, 0) # 鼠标操作回调函数\n # cv2.startWindowThread()\n cv2.imshow('label', img_data)\n\n # cv2.namedWindow('next',0)\n # cv2.resizeWindow('next',w_w ,w_h) #宽,高\n cv2.imshow('next', img_data_next)\n\n key = cv2.waitKeyEx(1) # 等待按键\n if act == 1:\n idx += 1\n\n if key == 27: # ESC\n break\n elif key == ord(' '): # 空格控制act\n if act:\n act = 0\n else:\n act = 1\n elif key == ord('s') or key == ord('S'): # s 前进3min\n idx += three_mins\n elif key == ord('d') or key == ord('D'): # d前进5min\n idx += five_mins\n elif key == ord('a') or key == ord('A'): # a后退5min\n if idx > 0:\n idx -= five_mins\n else:\n idx = 0\n elif key == ord('w') or key == ord('W'): # w 后退3min\n if idx > 0:\n idx -= three_mins\n else:\n idx = 0\n elif key == 2490368: # 上\n if idx > 0:\n idx -= 1\n else:\n idx = 0\n\n elif key == 2621440: # 下\n idx += 1\n elif key == 2424832: # 左\n if idx > 10:\n idx -= one_min # 1min\n else:\n idx = 0\n elif key == 2555904: # 右\n idx += one_min # 1min\n elif key == 13: # p或回车\n if state_mouse == frame_car_use_left_button:\n # 待改进\n print(f'两点当前坐标为({sel[0]}, {sel[1]}), ({sel[2]}, {sel[3]})')\n print(f'两点绝对坐标为({sel[0]}, {sel[1]+h_h}), ({sel[2]}, {sel[3]+h_h})')\n state_mouse = state_mouse_init # 将状态返回为初始化\n continue\n\n if state_num_key == change_parking_num:\n if len(parking_num_block) == 0:\n parking_num_block = [] # 为其复位,归空\n print('请赋值')\n elif len(parking_num_block) == 1: # 列表里只有一个值\n state_num_key = change_parking_state\n # car_frame_point[parking_num_block[0]] = [] # 取出唯一的值作为索引,并将里面的值去掉,成为[],这样不对\n car_frame_point[id_parking_num_change] = [] # 取出唯一的值作为索引,并将里面的值去掉,成为[]\n parking_num_block = [] # 为其复位,归空\n print('赋值已完成,不需要画框了')\n elif len(parking_num_block) > 1:\n state_num_key = change_parking_state\n state_mouse = frame_car_use_left_button\n parking_num_block = [] # 为其复位,归空\n print(\"\\n赋值结束,请用鼠标左键为车画框\")\n elif state_num_key == change_parking_state:\n if state_time_label_overwrite == 0: # 如果秒数没有被改写\n # record_list.append(os.path.splitext(list_img[idx])[0]) # 先记录时间\n record_list.append('_'.join([h.zfill(2), m.zfill(2), s.zfill(2)])) # 先记录时间\n for idx_state_loc, i in enumerate(state_loc): # 再记录数据\n parking_num_str = str_space_remove(str(parking_num[idx_state_loc]))\n if len(car_frame_point[idx_state_loc]) == 0:\n record_list.append(parking_num_str + ':' + str(i))\n else:\n car_frame_point_abs_str = str_space_remove(str(car_frame_point_abs[idx_state_loc]))\n record_list.append(parking_num_str + ':' + str(i) + ':' + car_frame_point_abs_str)\n\n for i in state_loc:\n print(i, end='\\t')\n # print(os.path.splitext(list_img[idx])[0], end=' ')\n print('_'.join([h.zfill(2), m.zfill(2), s.zfill(2)]), end=' ')\n print('<---')\n\n else:\n record_list.append(time_label) # 先记录时间\n for idx_state_loc, i in enumerate(state_loc): # 再记录数据\n parking_num_str = str_space_remove(str(parking_num[idx_state_loc]))\n if len(car_frame_point[idx_state_loc]) == 0:\n record_list.append(parking_num_str + ':' + str(i))\n else:\n car_frame_point_abs_str = str_space_remove(str(car_frame_point_abs[idx_state_loc]))\n record_list.append(parking_num_str + ':' + str(i) + ':' + car_frame_point_abs_str)\n\n for i in state_loc:\n print(i, end='\\t')\n print(time_label, end=' ')\n print('<---')\n\n state_time_label_overwrite = 0 # 更改状态\n elif (key == ord('p')) or key == ord('P'): # p\n if state_num_key == change_parking_state:\n print(\"点击数字选择一个要更改的停车区域,从零开始\", end=' ')\n state_num_key = show_parking_num\n\n elif (key == ord('o')) or key == ord('O'): # o或O\n if state_num_key == show_parking_num:\n print(\"\\n状态恢复\")\n state_num_key = change_parking_state\n\n elif key == ord('f') or key == ord('F'): # f 时间标签增加1s\n tmp = sec_pass + initial_time_sec\n tmp += 1\n time_label = f'{tmp//3600:02d}_{tmp%3600//60:02d}_{tmp%60:02d}' # 上下都行\n state_time_label_overwrite = 1\n tmp_print_time_label(state_loc, time_label)\n\n elif key == ord('r') or key == ord('R'): # r 时间标签减少1s\n tmp = sec_pass + initial_time_sec\n tmp -= 1\n time_label = f'{tmp//3600:02d}_{tmp%3600//60:02d}_{tmp%60:02d}' # 上下都行\n state_time_label_overwrite = 1\n tmp_print_time_label(state_loc, time_label)\n\n elif key == ord('m') or key == ord('M'): # m 减去当前图片名下的一条数据\n record_list = modify_record_list(record_list, list_img, idx)\n\n elif key == ord('l') or key == ord('L'): # l leave 离开需要马上打印\n if len(record_list) != 0: \n record_list = complement_record_list(record_list, last_time, parking_space)\n save_label_and_print(record_list, path_txt, parking_space)\n else:\n print('there are no records to record !!!')\n # save_json_label(record_list, parking_space, path_json)\n\n elif key == 48 and parking_space > 0: # 0\n if state_num_key == change_parking_state:\n if state_loc[0] == 2:\n state_loc[0] = 0\n else:\n state_loc[0] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 0\n print('0')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(0)\n print('0', end=' ')\n\n elif key == 49 and parking_space > 1: # 1\n if state_num_key == change_parking_state: # 若数字键用来改变停车位状态\n if state_loc[1] == 2: # 修改的状态位置是0-1-2\n state_loc[1] = 0\n else:\n state_loc[1] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num: # 若数字键用来选择车位索引\n id_parking_num_change = 1 # 先确定索引的位置,若不同车在相同车位,这个索引也是不同的\n print('1')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num: # 若数字键用来标记车位\n parking_num_block.append(1)\n print('1', end=' ')\n\n elif key == 50 and parking_space > 2: # 2\n if state_num_key == change_parking_state:\n if state_loc[2] == 2:\n state_loc[2] = 0\n else:\n state_loc[2] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 2\n print('2')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(2)\n print('2', end=' ')\n\n elif key == 51 and parking_space > 3: # 3\n if state_num_key == change_parking_state:\n if state_loc[3] == 2:\n state_loc[3] = 0\n else:\n state_loc[3] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 3\n print('3')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(3)\n print('3', end=' ')\n\n elif key == 52 and parking_space > 4: # 4\n if state_num_key == change_parking_state:\n if state_loc[4] == 2:\n state_loc[4] = 0\n else:\n state_loc[4] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 4\n print('4')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(4)\n print('4', end=' ')\n\n elif key == 53 and parking_space > 5: # 5\n if state_num_key == change_parking_state:\n if state_loc[5] == 2:\n state_loc[5] = 0\n else:\n state_loc[5] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 5\n print('5')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(5)\n print('5', end=' ')\n\n elif key == 54 and parking_space > 6: # 6\n if state_num_key == change_parking_state:\n if state_loc[6] == 2:\n state_loc[6] = 0\n else:\n state_loc[6] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 6\n print('6')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(6)\n print('6', end=' ')\n\n elif key == 55 and parking_space > 7: # 7\n if state_num_key == change_parking_state:\n if state_loc[7] == 2:\n state_loc[7] = 0\n else:\n state_loc[7] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 7\n print('7')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(7)\n print('7', end=' ')\n\n elif key == 56 and parking_space > 8: # 8\n if state_num_key == change_parking_state:\n if state_loc[8] == 2:\n state_loc[8] = 0\n else:\n state_loc[8] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 8\n print('8')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(8)\n print('8', end=' ')\n\n elif key == 57 and parking_space > 9: # 9\n if state_num_key == change_parking_state:\n if state_loc[9] == 2:\n state_loc[9] = 0\n else:\n state_loc[9] += 1\n tmp_print(state_loc)\n elif state_num_key == show_parking_num:\n id_parking_num_change = 9\n print('9')\n print('按数字选择该空间所占车位,按回车确定', end=' ')\n state_num_key = change_parking_num\n elif state_num_key == change_parking_num:\n parking_num_block.append(9)\n print('9', end=' ')\n\n elif key == 57: # 9\n record_list = complement_record_list(record_list, last_time, parking_space)\n save_label_and_print(record_list, path_txt, parking_space)\n # save_json_label(record_list, parking_space, path_json, imgs_last_time_plus_one, imgs_list_only_time)\n\nif key != 27: # ESC没按输出结果到txt\n record_list = complement_record_list(record_list, last_time, parking_space)\n save_label_and_print(record_list, path_txt, parking_space)\n # save_json_label(record_list, parking_space, path_json, imgs_last_time_plus_one, imgs_list_only_time)\n\n\ncv2.destroyAllWindows()\nprint('程序结束')\n","sub_path":"label_tool_v3.py","file_name":"label_tool_v3.py","file_ext":"py","file_size_in_byte":30596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137652206","text":"import argparse\nimport logging\n\nfrom .tag_datasource_processor import TagDatasourceProcessor\n\n\nclass TagManagerCLI:\n\n @classmethod\n def run(cls, argv):\n cls.__setup_logging()\n\n args = cls._parse_args(argv)\n args.func(args)\n\n @classmethod\n def __setup_logging(cls):\n logging.basicConfig(level=logging.INFO)\n\n @classmethod\n def _parse_args(cls, argv):\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n\n subparsers = parser.add_subparsers()\n\n create_tags_parser = subparsers.add_parser('create-tags', help='Create Tags')\n create_tags_parser.add_argument('--csv-file', help='CSV file with Tags information', required=True)\n create_tags_parser.set_defaults(func=cls.__create_tags)\n\n return parser.parse_args(argv)\n\n @classmethod\n def __create_tags(cls, args):\n TagDatasourceProcessor().create_tags_from_csv(file_path=args.csv_file)\n","sub_path":"src/datacatalog_tag_manager/tag_manager_cli.py","file_name":"tag_manager_cli.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415911719","text":"# -*- coding: utf-8 -*-\r\n\r\nimport csv\r\nimport json\r\nimport logging\r\n\r\nfrom Cuisinier import Recipe, ClassifiedRecipe\r\nfrom CuisinierTFIDF import CuisinierTFIDF\r\n\r\nLOGGING_LEVEL = logging.INFO\r\nTRAINING_FILE = \"resources/train.json\"\r\nTEST_FILE = \"resources/test.json\"\r\n\r\n# Configure logging\r\nlogging.basicConfig(filename=\"log.txt\", filemode=\"w\", level=LOGGING_LEVEL)\r\n\r\n\r\ndef getClassifiedRecipes(file):\r\n f = open(file)\r\n recipes = json.loads(f.read())\r\n f.close()\r\n return [ClassifiedRecipe(recipe[\"id\"], recipe[\"cuisine\"],\r\n recipe[\"ingredients\"])\r\n for recipe in recipes]\r\n\r\n\r\ndef getRecipes(file):\r\n f = open(file)\r\n recipes = json.loads(f.read())\r\n f.close()\r\n return [Recipe(recipe[\"id\"], recipe[\"ingredients\"]) for recipe in recipes]\r\n\r\n\r\n# Writes a list of classified recipes to a given file in CSV format\r\ndef writeRecipesCSV(file, recipes):\r\n print((\"Writing recipes to \\\"\" + file + \"\\\"\"))\r\n with open(file, \"w\", newline=\"\") as fileToWrite:\r\n csv_writer = csv.writer(fileToWrite, delimiter=\",\", quotechar=\"|\",\r\n quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow([\"id\", \"cuisine\"])\r\n\r\n for recipe in recipes:\r\n csv_writer.writerow([recipe.id, recipe.cuisine])\r\n\r\n\r\n# Tests a Cuisinier against its own training data\r\ndef selfTest(cuisinier):\r\n # Read and parse JSON data\r\n recipes = getClassifiedRecipes(TRAINING_FILE)\r\n cuisinier.addRecipes(recipes)\r\n\r\n success = 0\r\n for recipe in recipes:\r\n result = cuisinier.classifyRecipe(Recipe(recipe.id,\r\n recipe.ingredients))\r\n if result.cuisine == recipe.cuisine:\r\n success += 1\r\n\r\n print((cuisinier.getAlgorithmType() + \" self-test accuracy: \" +\r\n str(success) + \"/\" + str(len(recipes)) +\r\n \" (\" + \"{0:.2f}\".format(success/len(recipes) * 100) + \"%)\"))\r\n\r\n\r\n# Run test data and output the results\r\ndef generateSubmission(cuisinier):\r\n # Read and parse JSON data\r\n trainingRecipes = getClassifiedRecipes(TRAINING_FILE)\r\n testRecipes = getRecipes(TEST_FILE)\r\n\r\n cuisinierType = cuisinier.getAlgorithmType()\r\n cuisinier.addRecipes(trainingRecipes)\r\n print((\"Classifying \" + str(len(testRecipes)) + \" recipes with \" +\r\n cuisinierType))\r\n writeRecipesCSV(\"results\" + cuisinierType + \".csv\",\r\n cuisinier.classifyRecipes(testRecipes))\r\n print((cuisinierType + \" classification complete\"))\r\n\r\n\r\ndef test():\r\n recipesToClassify = getRecipes(TEST_FILE)\r\n recipes = getClassifiedRecipes(TRAINING_FILE)\r\n cuisinier = Cuisinier()\r\n cuisinier.addRecipes(recipes)\r\n\r\n with open('submissionData.csv', 'wb') as fileToWrite:\r\n csv_writer = csv.writer(fileToWrite)\r\n csv_writer = csv.writer(fileToWrite, delimiter=',',\r\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow([\"id\",\"cuisine\"])\r\n\r\n success = 0\r\n for recipe in recipesToClassify:\r\n result = cuisinier.classifyRecipe(Recipe(recipe.id,\r\n recipe.ingredients))\r\n csv_writer.writerow([recipe.id, result.cuisine])\r\n\r\n\r\ndef main():\r\n cuisinier = CuisinierTFIDF()\r\n # Calculate initial accuracy\r\n selfTest(cuisinier)\r\n generateSubmission(cuisinier)\r\n\r\nmain()\r\n","sub_path":"data/external/repositories_2to3/248568/cuisinier-master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"278696149","text":"\nimport random\nimport time\nimport pygame\npygame.init()\nrelogio = pygame.time.Clock()\npygame.display.set_caption(\"Alfabetizando\")\n\nicon = pygame.image.load(\"assets/icon.jpg\")\npygame.display.set_icon(icon)\nlargura = 800\naltura = 600\ndisplay = pygame.display.set_mode((largura, altura))\nbackground = pygame.image.load(\"assets/sky.png\")\nlapis = pygame.image.load(\"assets/lapis.png\")\nlapisLargura = 260\nlapisPosicaoX = 360\nlapisPosicaoY = 470\nlapisMovimento = 0\nlapisVelocidade = 10\nmissel = pygame.image.load(\"assets/missile.png\")\nmisselLargura = 50\nmisselAltura = 250\nmisselPosicaoX = 360\nmisselPosicaoY = 10 - misselAltura\nmisselMovimento = 0\nvelocidadeMissel = 5\npygame.mixer.music.load('assets/ironsound.mp3')\npygame.mixer.music.play(-1)\nmisselSound = pygame.mixer.Sound(\"assets/missile.wav\")\nmisselSound.set_volume(0.1)\npygame.mixer.Sound.play(misselSound)\npygame.mixer.music.set_volume(0.1)\nexplosaoSound = pygame.mixer.Sound(\"assets/explosao.wav\")\nexplosaoSound.set_volume(0.2)\n\ndef escrevendoPlacar(contador):\n font = pygame.font.SysFont(None, 25)\n texto = font.render(\"Desvios: \"+str(contador), True, (255, 255, 255))\n display.blit(texto, (10, 10))\ndef dead():\n pygame.mixer.Sound.play(explosaoSound)\n pygame.mixer.music.stop()\n font = pygame.font.SysFont(None, 150)\n texto = font.render(\"Você Morreu!\", True, (0, 0, 0))\n display.blit(texto, (100, 200))\n pygame.display.update()\n time.sleep(5)\ncontador = 0\nwhile True:\n # Trabalhar com Background\n display.fill((255, 255, 255))\n display.blit(background, (0, 0))\n # devolve uma lista de eventos da tela []\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n pygame.quit()\n quit()\n if evento.type == pygame.KEYDOWN:\n if evento.key == pygame.K_LEFT:\n lapisMovimento = lapisVelocidade * -1\n elif evento.key == pygame.K_RIGHT:\n lapisMovimento = lapisVelocidade\n if evento.type == pygame.KEYUP:\n lapisMovimento = 0\n lapisPosicaoX = lapisPosicaoX + lapisMovimento\n if lapisPosicaoX < 0:\n lapisPosicaoX = 0\n elif lapisPosicaoX > largura - lapisLargura:\n lapisPosicaoX = largura - lapisLargura\n display.blit(lapis, (lapisPosicaoX, lapisPosicaoY))\n misselPosicaoY = misselPosicaoY + velocidadeMissel\n escrevendoPlacar(contador)\n # controlando o míssel novo\n if misselPosicaoY > altura:\n misselPosicaoY = 10 - misselAltura\n velocidadeMissel = velocidadeMissel + 1\n pygame.mixer.Sound.play(misselSound)\n misselPosicaoX = random.randrange(0, largura)\n contador += 1 # contator = contator + 1\n display.blit(missel, (misselPosicaoX, misselPosicaoY))\n # Verificando Colisões\n if lapisPosicaoY < misselPosicaoY + misselAltura:\n if lapisPosicaoX < misselPosicaoX and lapisPosicaoX + lapisLargura > misselPosicaoX or misselPosicaoX+misselLargura > lapisPosicaoX and misselPosicaoX+misselLargura < lapisPosicaoX+lapisLargura:\n dead()\n velocidadeMissel = 5\n misselPosicaoY = 0 - misselAltura\n contador = 0\n pygame.display.update()\n relogio.tick(60)\nprint(\"Volte Sempre....\")\n","sub_path":"codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"601346077","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom flask import Flask\nimport os\nimport logging\nfrom logging import Formatter\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask.ext.login import LoginManager, login_user, logout_user, current_user, login_required\nfrom newlife.view import front\nfrom newlife.view import admin\nfrom newlife.view import install\nfrom newlife.model import Admin\n\n\ndef creat_app(config):\n app = Flask(__name__)\n app.debug = True\n app.config.from_pyfile(config)\n configure_logging(app)\n temple_filter(app)\n app.register_blueprint(front.blueprint)\n app.register_blueprint(admin.blueprint)\n app.register_blueprint(install.blueprint)\n\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = \"admin.login\"\n\n @login_manager.user_loader\n def load_user(userid):\n admin = Admin()\n admin.get_user_id(userid)\n return admin\n return app\n\n\ndef configure_logging(app):\n mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config[\n 'MAIL_SENDER'], app.config['MAIL_RECEIVER'], 'YourApplication Failed')\n\n mail_handler.setLevel(logging.ERROR)\n mail_handler.setFormatter(Formatter('''\n Message type: %(levelname)s\n Location: %(pathname)s:%(lineno)d\n Module: %(module)s\n Function: %(funcName)s\n Time: %(asctime)s\n\n Message:\n\n %(message)s\n '''))\n app.logger.addHandler(mail_handler)\n\n file_log = os.path.join(app.root_path,\n app.config['FILE_LOG'])\n\n file_handler = RotatingFileHandler(\n file_log, maxBytes=100000, backupCount=10)\n\n file_handler.setLevel(logging.WARNING)\n file_handler.setFormatter(Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n app.logger.addHandler(file_handler)\n\n\ndef temple_filter(app):\n @app.template_filter()\n def monthformat(datetime):\n return datetime.strftime(\"%b\")\n\n @app.template_filter()\n def dayformat(datetime):\n return datetime.strftime(\"%d\")\n\n @app.template_filter()\n def dateformat(datetime):\n return datetime.strftime(\"%B %d\")\n\n @app.template_filter()\n def dateformatY(datetime):\n return datetime.strftime(\"%B %d %Y\")\n\n @app.template_filter()\n def limit_file(size):\n return int(size/1024/1024)\n","sub_path":"newlife/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255388362","text":"# # 8*8 체스판 추출하기\nm, n = map(int, input().split())\na = [input() for _ in range(m)]\nprint()\n\n# 왜틀림\n# res = None\n# for i in range(m-7): # 세로줄 8개씩 더 탐색할 반복수\n# for k in range(n -7): # 가로줄 8개씩 더 탐색할 반복수\n# # print(f'k: {a[j][k:]}', end = ' ')\n# # print(f'k: {k}')\n# pat = {0: 'W', 1:'B'} if a[i][0] == 'W' else {0: 'B', 1:'W'} # 여기가 재선언 되는 문제를 막아야함\n# # print(f'start: {k}, {i}')\n# temp = 0\n# for j in range(i, 8 + i): # 세로줄 탐색 시작수\n# # print(f'j: {j}')\n# # print(a[j])\n# t1 = []\n# cnt = 0\n# for l in range(k, 8+k): # 가로줄 탐색 시작인덱스\n# # print(f'l: {l}')\n# # print(a[j][l], end='')\n# # print(pat[l % 2], end='')\n# t1.append(pat[cnt % 2])\n# if a[j][l] != pat[cnt % 2]:\n# temp+=1\n# cnt +=1\n# # print()\n# # print(''.join(t1))\n# # print(temp)\n# pat[0], pat[1] = pat[1], pat[0]\n# # print('----------------')\n# # print(f'temp: {temp}, res: {res}')\n# if res is None : res = temp\n# elif res > temp : res = temp\n# # print('=============\\n')\n# print(res)\n\n# 결국 풀이를 봤습니다........\nz = []\nfor i in range(m-7): # 세로줄 8개씩 더 탐색할 반복수\n for k in range(n -7): # 가로줄 8개씩 더 탐색할 반복수\n w , b = 0, 0 # w로 시작할때랑 b로 시작할때 칠해야할부분 \n for j in range(i, 8 + i): # 세로줄 탐색 시작수\n for l in range(k, 8+k): # 가로줄 탐색 시작인덱스\n if (j + l) % 2 == 0 : # 시작위치 다음부터의 홀짝순서를 정할 수 있음.. 0,0 0,1 0,2 ... 1,0 1,1 1,2 각위치를 더하면 짝홀짝, 홀짝홀 이래되니\n if a[j][l] == 'W': w +=1 # 짝수가 W로 시작했다면\n else: b+=1 # B로시작했음\n else:\n if a[j][l] == 'B': w +=1 # 홀수는 B일때 세줘야함\n else: b+=1 # 홀수가 W일때 계산.\n z.append(w)\n z.append(b)\n\nprint(min(z)) # 즉, 이 배열은 w,b 중 어느 하나로 시작했을 모든 경우를 담아두는 배열임. 그러니 최소값만 구하면 되겠네\n # print('=============\\n')\n # print(z)\n# print(res)","sub_path":"step/level 11 (무차별 대입{brute force})/1018_체스판 다시 칠하기.py","file_name":"1018_체스판 다시 칠하기.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116387160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 31 18:43:42 2018\n\n@author: sana\n\"\"\"\n\n'''Generating input and data file for second relax to the system '''\n'''lastData.xyz contains coordinates and atom type of the relaxed system at low temperature'''\n\n\n\nimport numpy as np\n\n\n\n\n\n\n'''conversion from real to lj unit'''\n\nsigma = 2.5 #bead size in angstrom unit\nepsilon = 0.2 # in kcal/mol\n\nepsilonH = 1.0/epsilon # strength of hydrophobic interaction \n\nmassReal = 96.0\nmassReduced = 96.0/massReal # mass is 1.0 is reduced unit. It is taken each bead is of same mass i.e. 96 Da i.e. 96 gram/mole\n\n#Bond coefficients\n\nl0 = 3.0/sigma # equilibrium bond length 3.0 angstrom unit\n#kb = round((1./2.)*100.*epsilonH/l0**2.,3) # 1/2 100 epsilonH /a^2\nkb = 171.0 * (sigma**2)/epsilon # harmonic constant for the harmonic potential. value is 171 kcal/mol/Angstrom^2 ( energy/distance^2)kb\n\n\n#Angle coefficients\n\n#kTheta = (1./2.)*20.*epsilonH # in unit (energy/radian^2)\nkTheta = 50./epsilon # in unit of energy----Our angle potential is cosine/squared K(costheta - costheta0)^2\ntheta0 = 105. # in degrees\n\n\n#Dihedral coefficients \n\n#diheA = 1.0*epsilonH\n#diheB = 1.6*epsilonH\n#diheC = 1.0*epsilonH\n\n\n#dielectric constant\ndielectric = 80.0\n\n#pair potential coordinates\n\nsigmalj = 3.0\nepsilonlj = 0.2\nrlj = 2.5 # global cutoff for the lj potential is taken as 2.5*sigma\nrlocal= ((2.0)**(1.0/6.0))*sigmalj/sigma # local cutoff for lj interaction to include only repulsive part---r at minimum well in the potential plot\n\n\nsaltConc = 0.6 # salt concentration 0.6 moles/ltr\nkappa = round(0.33*np.sqrt(saltConc)*sigma,3) # debye screening length in reduced unit---for dielectric 80 and at temp 25 degree Celsius, kappa = 0.33(sqrt(I)) in per Angstrom unit\nrcoul = 10.0/kappa # to calculate cutoff for coulomb interaction, usually, kappa*cutoff = 10.\n\n\n\n\n\n\n'''for incorporating hydrogen bonding, I tried to modify gauss potential\nof the form E = -A exp(-Br^2)\nA is in unit of energy, B in unit of per distance square and we need cutoff... I choose it to be 2 sigma\nFor hydrogen bonding, energy would be 2-12 kBT or 6-30 KJpermole\nI choose A to be 5.68kBT and B to be 0.1/sigma square '''\n\n\n\n\ngaussA = 6.612/epsilon # 1 KBT (20 degree C) is 0.58kcal/mole---For now I choose 10kBT at the point where there is cutoff for excluded volume\ngaussB = 0.1 # 0.1/sigma square in reduced unit will be 0.1\ngaussCut = (15*sigma)/sigma\n\n\n\n\n'''time conversion'''\ntimeConversion = 4200*1000/(10**(-10))**2 # conversion from kcal/Angstrom^2 to gram meter^2 /sec^2 meter^2\nljUnitTime = ((epsilon/(massReal* (sigma)**2)*timeConversion)**(1.0/2.0))/10**15 # conversion factor for time is 0.0003742 (epsilon/(mass* (sigma)^2))^(1/2) in unit per second \ntau = 10000 * ljUnitTime # tau is 10 picosecod --the value mentioned here is in femtosecond unit 1 pico second = 1000 femtosecond\ndt = 0.00005*tau # dt = 0.5 femtosecond\n\n\n\n\n'''charge conversion'''\npivalue = np.pi \npermittivity = 8.85*10**(-12) # Its unit is (C^2 sec^2)/(kg meter^3)\navogadoNum = 6.023 * 10**(23)\nconversion = 4200*10**(-10)\nljUnitcharge = np.sqrt(4*pivalue*epsilon*sigma*permittivity*conversion/avogadoNum) #charge conversion (4 pi permittivity sigma epsilon)^(1/2)\nchargeValue = round((1.6*10**(-19))/ljUnitcharge, 3) # value of 1 e charge in reduced unit \n\n\n\nkBT = 0.58 # Value in kcal/mole in 293 K i.e. 20 C temperature \ntempReduced = kBT/epsilon\n\n\n\n\n'''Pore dimensions'''\n\n\nradius = 10.0/(2.0*sigma) # Diameter of beta barrel in ahl is 1.4nm\nheight = 50.0/sigma # Height of pore is choosen to be 5nm \nzvalue = int(height/2.)+1\n\n\n\n'''I choose x and y range to go from -20 sigma unit to +20 sigma unit'''\nxStart = -20\nxEnd = 20\nyStart = -20\nyEnd = 20\n\n'''(0,0,0) lies in the Up wall'''\n\nzUp = height/2. \nzDown = -height/2.\ndz = (zUp-zDown)*sigma #in real unit\n\nresType = 26\nporeResType = resType\n\n\n\n\n\n\n\n\n\n\n'''Define parameters needed for the data file'''\n\nnumResidue = 72\nnResidue = 3*numResidue\n\n#nTail = 10 \nnMobile = nResidue\n\n\n\nnBonds = 3*numResidue-1\nnAngle = 3*numResidue -2 # Each residue have 2 backbone angles and one side angle ---first one has 2 and last one has 2\n#nDihedral= 2*(numResidue-1) -1 # Each residue have 2 backbone dihedral(Phos-Sugar-Phos-Sugar, Sugar-Phos-Sugar-Phos) last residue doesnot start with, second last has only PSPS \nnDihedral= 0 \nnImproper = 0\n\n\nnAtomType = 25+1\nnBondType = 1\nnangleType = 2\n#ndiheType = 1\nndiheType = 0\nimproType = 0\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''Read lastData.xyz and get coordinates'''\n\n\nreaddata= open('lastData.xyz','r')\nfirstline = readdata.readline()\nsecondline = readdata.readline()\ninputdata = readdata.readlines() #coordinates data file\n\nnTotal = int(firstline) \n\n'''Generate data file for the translocation simulation'''\n\noutputatoms = open('relax.data','w')\n\noutputatoms.write(\"LAMMPS Description\\n\\n\")\n\noutputatoms.write('{0} {1}\\n'.format(nTotal, \"atoms\"))\noutputatoms.write('{0} {1}\\n'.format(int(nBonds), \"bonds\"))\noutputatoms.write('{0} {1}\\n'.format(int(nAngle), \"angles\"))\noutputatoms.write('{0} {1}\\n'.format(int(nDihedral), \"dihedrals\"))\noutputatoms.write('{0} {1}\\n\\n'.format(int(nImproper), \"impropers\"))\n\noutputatoms.write('{0} {1} {2}\\n'.format(int(nAtomType), \"atom\", \"types\"))\noutputatoms.write('{0} {1} {2}\\n'.format(int(nBondType), \"bond\", \"types\"))\noutputatoms.write('{0} {1} {2}\\n'.format(int(nangleType), \"angle\", \"types\"))\noutputatoms.write('{0} {1} {2}\\n'.format(int(ndiheType), \"dihedral\", \"types\"))\noutputatoms.write('{0} {1} {2}\\n\\n'.format(int(improType), \"improper\", \"types\"))\n\n\noutputatoms.write('{0} {1} {2} {3}\\n'.format(\"-40.0\", \"40.0\", \"xlo\", \"xhi\" ))\noutputatoms.write('{0} {1} {2} {3}\\n'.format(\"-40.0\", \"40.0\", \"ylo\", \"yhi\" ))\noutputatoms.write('{0} {1} {2} {3}\\n\\n'.format(\"-140.0\", \"140.0\", \"zlo\", \"zhi\" ))\n\n\noutputatoms.write(\"Masses\\n\\n\")\nfor j in range(nAtomType):\n outputatoms.write('{0} {1}\\n'.format(int(j+1), massReduced))\n\noutputatoms.write(\"\\nAtoms\\n\\n\")\n\n'''split contents of lines from third line of xyz file'''\n\nchargePhos = -1.*chargeValue\n\ncoordAll=np.zeros((nTotal,6))\nresidueType=np.zeros(nTotal)\ncounter = 1\n\nfor i in range(nTotal):\n if i<int(nResidue):\n residueType[i]=int(round((i)/3) +1)\n else:\n residueType[i] = int(numResidue+1)\n \n \n\nfor idx,lines in enumerate(inputdata):\n everyline = lines.split() \n if everyline[0]==\"1\":\n coordAll[idx][0]=counter\n coordAll[idx][1]=float(everyline[0])\n coordAll[idx][2]=round(chargePhos,3)\n coordAll[idx][3]=float(everyline[1])\n coordAll[idx][4]=float(everyline[2])\n coordAll[idx][5]=float(everyline[3]) \n else:\n coordAll[idx][0]=counter\n coordAll[idx][1]=float(everyline[0])\n coordAll[idx][2]=0.0\n coordAll[idx][3]=float(everyline[1])\n coordAll[idx][4]=float(everyline[2])\n coordAll[idx][5]=float(everyline[3])\n counter = counter+1\n\nfor index in range(nTotal):\n outputatoms.write(\"{} {} {} {} {} {} {}\\n\".format(int(coordAll[index][0]), int(residueType[index]), int(coordAll[index][1]), coordAll[index][2], coordAll[index][3], coordAll[index][4], coordAll[index][5])) \n \n \n'''Generate bonds and angles'''\n\noutputatoms.write('\\n Bonds \\n\\n')\n\n\n\nbondType = \"1\" \nnum = 0\nfor index in range(numResidue-1):\n num = num +1\n outputatoms.write('{0} {1} {2} {3}\\n'.format(int(num), bondType, int((index+1)*3 - 2), int(index+1)*3 -1))\n num = num +1\n outputatoms.write('{0} {1} {2} {3}\\n'.format(int(num), bondType, int((index+1)*3 - 1), int(index+1)*3 -0))\n num = num+1\n outputatoms.write('{0} {1} {2} {3}\\n'.format(int(num), bondType, int((index+1)*3 - 1), int(index+1)*3 +1))\n\n'''last bond'''\nindex = int((index+1)*3 +1)\nnum = num+1\noutputatoms.write('{0} {1} {2} {3}\\n'.format(int(num), bondType, int(index), int(index+1) ))\nnum = num +1\noutputatoms.write('{0} {1} {2} {3}\\n'.format(int(num), bondType, int(index+1), int(index+2) ))\n\n\n\n\n\n\n'''Generate list of angles'''\n\noutputatoms.write(\"\\nAngles\\n\\n\")\nnum =0\n\n'''backbone angles --Phosphate-Sugar-Phospate, Sugar-Phosphate-Sugar'''\n\nangleType = \"1\"\n\n'''first angle'''\nnum =num+1\noutputatoms.write('{} {} {} {} {}\\n'.format( int(num), angleType, \"1\", \"2\", \"4\"))\n'''in between'''\nfor index in range(numResidue-2):\n num =num+1\n outputatoms.write('{} {} {} {} {}\\n'.format( int(num), angleType, int((index+1)*3 - 1), int((index+1)*3 +1), int((index+1)*3+2) ))\n num = num+1\n outputatoms.write('{} {} {} {} {}\\n'.format( int(num), angleType, int((index+1)*3 + 1), int((index+1)*3 +2), int((index+1)*3+4) ))\n\n\n'''last angle'''\n\nindex = int((index+1)*3 +4)\nnum = num+1\noutputatoms.write('{} {} {} {} {}\\n'.format( int(num), angleType, int(index-2), int(index), int(index+1) ))\n\n\n\n'''side angles ---Phos-Sugar-Base for each nucleotide'''\n\nangleType =\"2\"\nfor index in range(numResidue):\n num = num +1\n outputatoms.write('{0} {1} {2} {3} {4}\\n'.format(int(num), angleType, int((index+1)*3 - 2), int((index+1)*3 -1), int((index+1)*3-0)))\n\n\n\n\n'''Generate inputfile for the simulation'''\nrepeatline = \"##---------------------------------\\n\"\noutInFile = open('relax.in','w')\noutInFile.write(\"################ RNA with tail ##########\\n\")\noutInFile.write(\"clear\\n\\n\")\n\noutInFile.write(\"##Initialization\\n\")\noutInFile.write(repeatline)\n\noutInFile.write('{0} {1}\\n'.format(\"units\", \"lj\"))\noutInFile.write('{0} {1}\\n'.format(\"dimension\", \"3\"))\noutInFile.write('{0} {1}\\n'.format(\"atom_style\", \"full\"))\noutInFile.write('{0} {1} {2} {3}\\n\\n'.format(\"boundary\", \"f\", \"f\", \"f\"))\n\n\noutInFile.write(\"##Atom definition\\n\")\noutInFile.write(repeatline)\noutInFile.write('{0} {1}\\n\\n'.format(\"read_data\", \"relax.data\"))\n\n\noutInFile.write(\"## Force definition\\n\")\noutInFile.write(repeatline)\n\noutInFile.write(\"## Bond definition\\n\")\noutInFile.write('{0} {1}\\n'.format(\"bond_style\", \"harmonic\"))\noutInFile.write('{0} {1} {2} {3}\\n\\n'.format(\"bond_coeff\", bondType, kb, l0))\n\noutInFile.write(\"## Angle definition\\n\")\noutInFile.write('{0} {1}\\n'.format(\"angle_style\", \"harmonic\"))\noutInFile.write('{0} {1} {2} {3}\\n'.format(\"angle_coeff\", bondType, kTheta, theta0))\noutInFile.write('{0} {1} {2} {3}\\n\\n'.format(\"angle_coeff\", angleType, kTheta, theta0))\n\noutInFile.write(\"special_bonds\tlj/coul 0 1 1 angle yes dihedral yes\\n\")\noutInFile.write('{0} {1}\\n'.format(\"dielectric\", dielectric))\n\noutInFile.write(\"## Pair definition\\n\")\noutInFile.write(repeatline)\n\noutInFile.write('{0} {1} {2} {3} {4} {5} {6:0.3f} {7} {8}\\n'.format(\"pair_style\", \"hybrid/overlay\", \"lj/cut\", rlj, \"coul/debye\", kappa, rcoul, \"gauss\", gaussCut ))\n\noutInFile.write(\"pair_modify\tshift yes\\n\")\n\n'''pair coefficient for lj'''\n\nfor i in range(nAtomType):\n for j in range(nAtomType):\n if i<j:\n break\n else:\n \n if i==j and i>=3:\n outInFile.write('{0} {1} {2} {3} {4} {5} {6:0.3f}\\n'.format(\"pair_coeff\", j+1, i+1, \"lj/cut\", float(epsilonlj/epsilon), float(sigmalj/sigma), rlocal ))\n outInFile.write(\"pair_coeff {} {} gauss {} {}\\n\".format( j+1, i+1, gaussA, gaussB))\n else:\n outInFile.write('{0} {1} {2} {3} {4} {5} {6:0.3f}\\n'.format(\"pair_coeff\", j+1, i+1, \"lj/cut\", float(epsilonlj/epsilon), float(sigmalj/sigma), rlocal ))\n\n'''pair coefficient for coulomb/debye'''\noutInFile.write('{0} {1} {2} {3}\\n'.format(\"pair_coeff\", 1, 1, \"coul/debye\"))\n\n\nrun = 3000000\n\nendpartStr =\"\\n#Group Definition \\n#---------------------------------------------------------\\n\"\nendpartStr +=\"group\t\tmobile molecule <> 1 {}\\n\".format(numResidue)\nendpartStr +=\"group\t\tpore molecule <> {} {}\\n\".format(numResidue+1, numResidue+1)\nendpartStr += \"group initial id {}\\n\\n\".format(nMobile-2)\n\nendpartStr +=\"#Neighbor Modify \\n#-------------------------------------------------------\\n\"\nendpartStr +=\"neigh_modify\texclude type {} {}\\n\\n\".format(poreResType, poreResType)\n\n\nendpartStr += \"\\n#Timestep etc\\n\"\nendpartStr += \"#-----------------------------------------------\\n\"\nendpartStr +=\"timestep {}\\n\".format(dt)\nendpartStr +=\"run_style verlet\\n\"\nendpartStr +=\"#velocity mobile create {} 5748\\n\".format(tempReduced)\nendpartStr += \"velocity \tinitial zero linear\\n\\n\"\n\n\nendpartStr+=\"#Fix\\n#------------------------------------------\\n\"\nendpartStr+=\"fix 2 mobile nve molecule\\n\"\nendpartStr+=\"fix 3 all langevin {} {} 1.0 74354\\n\".format(tempReduced, tempReduced)\nendpartStr+=\"fix\t4 initial setforce 0.0 0.0 0.0\\n\\n\"\n\n\n\n\nendpartStr+=\"#Dump\\n#----------------------------------------------\\n\"\nendpartStr+=\"thermo_style custom step temp evdwl ecoul ebond eangle pe ke etotal \\n\"\nendpartStr+=\"thermo 1000\\n\"\nendpartStr+=\"dump 1 mobile custom 5000 relax.lammpstrj id mol type x y z \\n\"\nendpartStr+=\"dump 5 all xyz {} dump.xyz\\n\".format(run)\n\nendpartStr+=\"run {} \".format(run)\nendpartStr+=\"#----------End-----------\\n\"\n\noutInFile.write(endpartStr)\n\n\n\n\n\n\n\n\n\n\nreaddata.close()\noutputatoms.close()\noutInFile.close()\n","sub_path":"buildSystem/6-16HP/relax-II.py","file_name":"relax-II.py","file_ext":"py","file_size_in_byte":13346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115217542","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n# version: Python3.X\n''' oxo_data is the data module for a tic - tac - toe (or OXO) game.\nIt saves and restores a game board. The functions are:\n save_game(game) -> None\n restore_game() -> game\nNote that no limits are placed on the size of data.\nThe game implementation is responsible for validating all data in and out.\n'''\n__author__ = '__L1n__w@tch'\n\nimport os.path\n\ngame_file = \".oxogame.dat\"\n\n\n# 按照惯例,不希望被模块使用者调用的函数会在名字前加一个下划线\ndef _get_path():\n \"\"\"\n Returns a valid path for data file.\n Tries to use the users home folder, defaults to cwd\n :return: string\n \"\"\"\n # 使用os模块试图得到用户的主目录,如果失败就使用当前目录\n try:\n game_path = os.environ[\"HOMEPATH\"] or os.environ[\"HOME\"]\n if not os.path.exists(game_path):\n game_path = os.getcwd()\n except (KeyError, TypeError):\n game_path = os.getcwd()\n return game_path\n\n\ndef save_game(game):\n \"\"\"\n saves a game object in the data file in the users home folder.\n No checking is done on the input, which is expected to be a list of characters\n :param game:\n :return: None\n \"\"\"\n path = os.path.join(_get_path(), game_file)\n with open(path, \"w\") as gf:\n game_str = \"\".join(game)\n gf.write(game_str)\n\n\ndef restore_game():\n \"\"\"\n Restores a game from the data file.\n The game object is a list of characters\n :return: game\n \"\"\"\n path = os.path.join(_get_path(), game_file)\n with open(path) as gf:\n game_str = gf.read()\n return list(game_str)\n\n\ndef test():\n print(\"Path = \", _get_path())\n save_game(list(\"XO XO OX\"))\n print(restore_game())\n\n\ndef main():\n test()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python项目开发实战/第4章 创建桌面应用/4.5 用一些对话框让命令行界面变得生动/oxo_data.py","file_name":"oxo_data.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"358867888","text":"from heapq import heappush, heappop, _heapify_max\nfrom math import ceil\nfrom operator import itemgetter\nfrom sys import argv\n\n\ndef split(spaces):\n result = {}\n # top = heappop(spaces)\n top = spaces[0][0]\n mid = ceil(top / 2.0)\n ls = mid - 1\n rs = top - ls - 1\n result['max'] = max([ls, rs])\n result['min'] = min([ls, rs])\n\n return result\n\n\ndef compute(n_stalls, n_users, spaces, start):\n result = {}\n\n for j in range(start, n_users+1):\n result = split(spaces)\n\n if spaces[0][1] == 1:\n del spaces[0]\n else:\n spaces[0][1] = spaces[0][1] - 1\n\n if result['max'] != 0:\n added = False\n for idx, v in enumerate(spaces):\n if result['max'] == v[0]:\n spaces[idx][1] = spaces[idx][1] + 1\n added = True\n break\n\n if not added:\n spaces.append([result['max'], 1])\n # heappush(spaces, result['max'])\n\n if result['min'] != 0:\n added = False\n for idx, v in enumerate(spaces):\n if result['min'] == v[0]:\n spaces[idx][1] = spaces[idx][1] + 1\n added = True\n break\n\n if not added:\n spaces.append([result['min'], 1])\n # heappush(spaces, result['min'])\n\n spaces = sorted(spaces, key=itemgetter(0), reverse=True)\n # print(spaces)\n # _heapify_max(spaces)\n\n return [result, spaces]\n\n\nif __name__ == '__main__':\n test_cases = open(argv[1])\n t = int(test_cases.readline())\n inputs = []\n outputs = []\n\n for i in range(0, t):\n input = test_cases.readline().split(' ')\n n_stalls = int(input[0])\n n_users = int(input[1].replace('\\n', ''))\n inputs.append([n_stalls, n_users, i+1])\n\n inputs = sorted(inputs, key=itemgetter(0, 1))\n heap = []\n prev = [-1, -1, -1]\n\n for x in inputs:\n if prev[0] != x[0]:\n heap = [[x[0], 1]]\n start = 1\n else:\n start = prev[1] + 1\n\n out = compute(x[0], x[1], heap, start)\n result = out[0]\n heap = out[1]\n prev = x\n outputs.append([x[2], result['max'], result['min']])\n\n outputs = sorted(outputs, key=itemgetter(0))\n\n for out in outputs:\n print(f\"Case #{out[0]}: {out[1]} {out[2]}\")\n\n test_cases.close()\n","sub_path":"solutions_python/Problem_201/2348.py","file_name":"2348.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"17099106","text":"words_set_res = ('a')\n\nprint(f'words_set_res = {words_set_res}')\n\nstr2 = '''def addition(a, b):\n for word in words_set_res:\n for i in range(len(l)):'''\n\nl = str2.split('\\n')\nfor i in range(len(l)):\n print(i, l[i])\n\nvariables = {}\nfor el in words_set_res:\n variables[el] = []\nprint(variables)\n\n# ПРОГРАМА ШУКАЄ СЛОВО В СТРОЦІ ЛИШЕ ОДИН РАЗ, А ПОТРІБНО ЗРОБИТИ ЩОБ ШУКАЛО БАГАТО РАЗ\nfor word in words_set_res:\n for i in range(len(l)):\n if word in l[i]:\n\n m = -1\n for j in range(2):\n\n m = l[i].index(word, m + 1)\n print('i', i, 'm ', m, 'word ', word)\n\n # inx_before = l[i].index(word) - 1\n # inx_after = l[i].index(word) + len(word)\n # if word == 'a':\n # print(i, inx_before, l[i][inx_before], inx_after, l[i][inx_after])\n # if not l[i][inx_before].isalpha():\n # if not l[i][inx_after].isalpha():\n # coordinates = [i, l[i].index(word)]\n # variables[word].append(coordinates)\n\n\nfor key in variables:\n print(key, variables[key])\n\n","sub_path":"lessons_with_tutor/miscellaneous/c22.py","file_name":"c22.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"178458020","text":"# coding=utf-8\n# Copyright (c) 2021, EleutherAI contributors\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file has been modified from its original version\n#\n\n\"\"\"Pretrain utilities.\"\"\"\nfrom datetime import datetime\nfrom functools import partial\n\nimport math\nimport sys\n\nimport torch\n\nfrom megatron.utils import Timers, init_wandb\nfrom megatron import print_rank_0\n\nfrom megatron import mpu\n\nfrom megatron.model import GPT2ModelPipe\nfrom megatron.checkpointing import load_checkpoint, save_checkpoint\nfrom megatron.data.gpt2_dataset import build_train_valid_test_datasets\n\nfrom megatron.initialize import initialize_megatron\nfrom megatron.learning_rates import AnnealingLR\nfrom megatron.model import get_params_for_weight_decay_optimization\nfrom megatron.utils import check_adlr_autoresume_termination\nfrom megatron.utils import make_data_loader\nfrom megatron.utils import report_memory\nfrom megatron.utils import tb_wandb_log\nfrom megatron.gradient_noise_scale import GradientNoiseScale\n\nfrom megatron.fp16 import fp32_to_fp16\nfrom megatron.model.gpt2_model import cross_entropy\nfrom megatron.utils import get_ltor_masks_and_position_ids\nfrom megatron.utils import reduce_losses\n\nimport deepspeed\n\n\ndef pretrain(neox_args):\n \"\"\"Main training program.\n\n This function will run the followings in the order provided:\n 1) initialize Megatron.\n 2) setup model, optimizer and lr schedule\n 3) call train_val_test_data_provider to get train/val/test datasets.\n 4) train the model.\n\n Arguments:\n neox_args: an instance of NeoXArgs containing the configuration for pretrain\n\n \"\"\"\n # setup logging and timers\n init_wandb(neox_args=neox_args)\n timers = Timers(use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n # Initalize and get arguments, timers, and Tensorboard writer.\n initialize_megatron(neox_args=neox_args)\n\n # Model, optimizer, and learning rate.\n timers('model and optimizer').start()\n model, optimizer, lr_scheduler = setup_model_and_optimizer(neox_args=neox_args, inference=False, get_key_value=True)\n timers('model and optimizer').stop()\n\n # Data stuff.\n timers('train/valid/test data iterators').start()\n train_data_iterator, valid_data_iterator, test_data_iterator = build_train_valid_test_data_iterators(neox_args=neox_args)\n timers('train/valid/test data iterators').stop()\n\n # Print setup timing.\n print_rank_0('done with setups ...')\n timers.log(['model and optimizer', 'train/valid/test data iterators'])\n print_rank_0('training ...')\n\n iteration = 0\n if neox_args.do_train and neox_args.train_iters > 0:\n iteration = train(\n neox_args=neox_args, \n timers=timers,\n model=model, \n optimizer=optimizer, \n lr_scheduler=lr_scheduler,\n train_data_iterator=train_data_iterator, \n valid_data_iterator=valid_data_iterator\n )\n\n if neox_args.do_valid:\n prefix = 'the end of training for val data'\n evaluate_and_print_results(\n neox_args=neox_args, \n prefix=prefix, \n forward_step_func=forward_step,\n data_iterator=valid_data_iterator, \n model=model, \n iteration=iteration, \n verbose=False\n )\n\n if neox_args.save and iteration != 0:\n save_checkpoint(neox_args=neox_args, iteration=iteration, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler)\n\n if neox_args.do_test:\n # Run on test data.\n prefix = 'the end of training for test data'\n evaluate_and_print_results(\n neox_args=neox_args, \n prefix=prefix, \n forward_step_func=forward_step,\n data_iterator=test_data_iterator, \n model=model, \n iteration=0, # iteration 0 in order to always use full test data\n verbose=True\n )\n\ndef _get_batch(neox_args, tokenizer, keys, data, datatype):\n \"\"\"Support function for get_batch / get_batch pipe (to avoid code repetition)\"\"\"\n data_b = mpu.broadcast_data(keys, data, datatype)\n\n # Unpack.\n tokens_ = data_b['text'].long()\n labels = tokens_[:, 1:].contiguous()\n tokens = tokens_[:, :-1].contiguous()\n\n # Get the masks and postition ids.\n attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\n tokens,\n tokenizer.eod,\n neox_args.reset_position_ids,\n neox_args.reset_attention_mask,\n neox_args.eod_mask_loss)\n\n return tokens, labels, loss_mask, attention_mask, position_ids\n\n\ndef get_batch(neox_args, data_iterator):\n \"\"\"Generate a batch\"\"\"\n\n # Items and their type.\n keys = ['text']\n datatype = torch.int64\n\n # Broadcast data.\n if data_iterator is not None:\n data = next(data_iterator)\n else:\n data = None\n return _get_batch(neox_args=neox_args, tokenizer=neox_args.tokenizer, keys=keys, data=data, datatype=datatype)\n\ndef get_batch_pipe(data, neox_args):\n \"\"\"A modification of get_batch() to work with the latest batch instead of an iterator. \"\"\"\n \n # Items and their type.\n keys = ['text']\n datatype = torch.int64\n\n tokens, labels, loss_mask, attention_mask, position_ids = _get_batch(neox_args, neox_args.tokenizer, keys, data, datatype)\n # unpack data\n if neox_args.precision == \"fp16\":\n # cast to fp16 because pipeline parallelism skips the FP16 wrapper.\n return fp32_to_fp16((tokens, position_ids, attention_mask)), fp32_to_fp16((labels, loss_mask))\n else:\n return (tokens, position_ids, attention_mask), (labels, loss_mask)\n\n\ndef forward_step(neox_args, timers, data_iterator, model):\n \"\"\"Forward step.\"\"\"\n\n # Get the batch.\n timers('batch generator').start()\n tokens, labels, loss_mask, attention_mask, position_ids = get_batch(neox_args=neox_args, data_iterator=data_iterator)\n timers('batch generator').stop()\n\n outputs = model((tokens, position_ids, attention_mask))\n loss = cross_entropy(outputs, (labels, loss_mask), _fp16=neox_args.fp16_lm_cross_entropy)\n\n # Reduce loss for logging.\n reduced_loss = reduce_losses([loss])\n\n return loss, {'lm loss': reduced_loss[0]}\n\ndef get_model(neox_args, inference=False, get_key_value=True):\n \"\"\"Build the model.\"\"\"\n\n print_rank_0('building GPT2 model ...')\n\n # Build model on cpu.\n model = GPT2ModelPipe(neox_args=neox_args, num_tokentypes=0, parallel_output=True, topology=mpu.get_topology(), inference=inference, get_key_value=get_key_value)\n if not neox_args.is_pipe_parallel:\n # Export PipeParallel model to nn.Sequential model to avoid the overhead of deepspeed's pipe parallel training\n model = model.to_sequential()\n else:\n # This is a hack to give us a reference to get_batch_pipe from within training.py\n # We need to call model.set_batch_fn after deepspeed.initialize\n model._megatron_batch_fn = partial(get_batch_pipe, neox_args=neox_args) \n\n if neox_args.deepspeed:\n # DeepSpeed handles CUDA, FP16, and DDP components.\n return model\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n\n\ndef get_optimizer(model, neox_args):\n \"\"\"Set up the optimizer.\"\"\"\n if neox_args.no_load_optim:\n return None, None\n # Build parameter groups (weight decay and non-decay).\n param_groups = get_params_for_weight_decay_optimization(model, neox_args)\n print_rank_0(f'Configuring Optimizer type: {neox_args.optimizer_type} with params: {neox_args.optimizer[\"params\"]}')\n # Add model parallel attribute if it is not set.\n for param_group in param_groups:\n for param in param_group['params']:\n if not hasattr(param, 'model_parallel'):\n param.model_parallel = False\n\n if neox_args.optimizer_type.lower() in [\"cpu_adam\", \"cpu_torch_adam\"]:\n if neox_args.optimizer == \"cpu_torch_adam\":\n cpu_adam_optimizer = torch.optim.Adam\n else:\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n cpu_adam_optimizer = DeepSpeedCPUAdam\n optimizer = cpu_adam_optimizer(param_groups,\n weight_decay=neox_args.weight_decay,\n **neox_args.optimizer[\"params\"])\n elif neox_args.optimizer_type.lower() == \"onebitadam\":\n assert neox_args.deepspeed\n optimizer = None\n # onebitadam needs to be instantiated within the deepspeed engine to work :|\n elif neox_args.optimizer_type.lower() == \"sm3\":\n from .optimizers import SM3\n optimizer = SM3(\n param_groups,\n **neox_args.optimizer[\"params\"])\n elif neox_args.optimizer_type.lower() == \"adam\":\n # Use Adam\n try:\n # default to apex as it's slightly faster\n from apex.optimizers import FusedAdam as Adam\n except ImportError:\n # if apex isn't installed, use deepspeed's FusedAdam\n print(\"WARNING: APEX not installed - defaulting to deepspeed's fused adam\")\n from deepspeed.ops.adam import FusedAdam as Adam\n optimizer = Adam(param_groups,\n weight_decay=neox_args.weight_decay,\n **neox_args.optimizer[\"params\"])\n else:\n raise ValueError(f\"Optimizer type {neox_args.optimizer_type} not recognized\")\n\n if neox_args.deepspeed:\n # fp16 wrapper is not required for DeepSpeed.\n return optimizer, param_groups\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n\n\ndef get_learning_rate_scheduler(optimizer, neox_args):\n \"\"\"Build the learning rate scheduler.\"\"\"\n if neox_args.no_load_optim:\n # TODO: this should be configured as a separate arg\n return None\n if neox_args.deepspeed and neox_args.optimizer_type.lower() == \"onebitadam\":\n print_rank_0(\"WARNING: onebitadam requires the lr scheduler be built by deepspeed - \"\n \"Make sure one is added to your deepspeed config\")\n return None\n\n # Add linear learning rate scheduler.\n if neox_args.lr_decay_iters is not None:\n num_iters = neox_args.lr_decay_iters\n else:\n num_iters = neox_args.train_iters\n num_iters = max(1, num_iters)\n init_step = 0\n warmup_iter = neox_args.warmup * num_iters\n lr_scheduler = AnnealingLR(\n optimizer,\n start_lr=neox_args.lr,\n warmup_iter=warmup_iter,\n total_iters=num_iters,\n decay_style=neox_args.lr_decay_style,\n last_iter=init_step,\n min_lr=neox_args.min_lr,\n use_checkpoint_lr_scheduler=neox_args.use_checkpoint_lr_scheduler,\n override_lr_scheduler=neox_args.override_lr_scheduler)\n\n return lr_scheduler\n\n\ndef setup_model_and_optimizer(neox_args, inference=False, get_key_value=True):\n \"\"\"Setup model and optimizer.\"\"\"\n model = get_model(neox_args=neox_args, inference=inference, get_key_value=get_key_value)\n optimizer, param_groups = get_optimizer(model=model, neox_args=neox_args)\n lr_scheduler = get_learning_rate_scheduler(optimizer=optimizer, neox_args=neox_args)\n\n if neox_args.deepspeed:\n print_rank_0(\"DeepSpeed is enabled.\")\n if neox_args.no_load_optim:\n assert optimizer is None\n _model_params = None\n _lr_scheduler = None\n else:\n _model_params = param_groups if optimizer is None else None\n _lr_scheduler = lr_scheduler\n\n model, optimizer, _, lr_scheduler = deepspeed.initialize(\n model=model,\n optimizer=optimizer,\n args=neox_args,\n lr_scheduler=_lr_scheduler,\n dist_init_required=False,\n model_parameters=_model_params,\n config_params=neox_args.deepspeed_config,\n mpu=mpu if not neox_args.is_pipe_parallel else None\n )\n model.total_params = get_total_params(model.module)\n print_rank_0(f' > total params: {\"{:,}\".format(model.total_params)}')\n\n if neox_args.is_pipe_parallel:\n model.set_batch_fn(model.module._megatron_batch_fn)\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n\n if neox_args.load is not None:\n neox_args.iteration = load_checkpoint(neox_args=neox_args, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler)\n print_rank_0(f'Loading checkpoint and starting from iteration {neox_args.iteration}')\n else:\n neox_args.iteration = 0\n\n return model, optimizer, lr_scheduler\n\n\ndef backward_step(neox_args, timers, optimizer, model, loss):\n \"\"\"Backward step.\"\"\"\n\n # Backward pass.\n timers('backward-backward').start()\n if neox_args.deepspeed:\n model.backward(loss)\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n timers('backward-backward').stop()\n\n if neox_args.deepspeed:\n # DeepSpeed backward propagation already addressed all reduce communication.\n # Reset the timer to avoid breaking timer logs below.\n timers('backward-allreduce').reset()\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n\n\ndef train_step(neox_args, timers, data_iterator, model, optimizer, lr_scheduler):\n \"\"\"Single training step.\"\"\"\n\n # Pipeline parallelism schedules forward/backward/step\n if neox_args.is_pipe_parallel:\n return train_step_pipe(neox_args=neox_args, timers=timers, model=model, data_iterator=data_iterator)\n\n # Forward model for one step.\n timers('forward').start()\n loss, loss_reduced = forward_step(neox_args=neox_args, timers=timers, data_iterator=data_iterator, model=model)\n timers('forward').stop()\n\n # Calculate gradients, reduce across processes, and clip.\n timers('backward').start()\n backward_step(neox_args=neox_args, timers=timers, optimizer=optimizer, model=model, loss=loss)\n timers('backward').stop()\n\n # Update parameters.\n skipped_iter = 0\n timers('optimizer').start()\n if neox_args.deepspeed:\n model.step()\n else:\n raise ValueError(\"Must be using deepspeed to run neox\")\n timers('optimizer').stop()\n\n return loss_reduced, skipped_iter\n\n\ndef train_step_pipe(neox_args, timers, model, data_iterator):\n \"\"\"Single training step with DeepSpeed's pipeline parallel engine. \"\"\"\n\n assert neox_args.deepspeed\n loss = model.train_batch(data_iter=data_iterator)\n loss_dict = {'lm loss': loss}\n if neox_args.precision == \"fp16\" and model.optimizer.overflow:\n skipped_iter = 1\n else:\n skipped_iter = 0\n # Don't break Megatron's timers because we changed code paths.\n for t in ['forward', 'backward', 'allreduce', 'optimizer', 'batch generator', 'data loader']:\n timers(t).reset()\n return loss_dict, skipped_iter\n\n\ndef training_log(neox_args, timers, loss_dict, total_loss_dict, learning_rate, iteration,\n loss_scale, report_memory_flag, skipped_iter, model, optimizer, noise_scale_logger):\n \"\"\"Log training information such as losses, timing, etc.\"\"\"\n\n # Update losses.\n skipped_iters_key = 'skipped iterations'\n total_loss_dict[skipped_iters_key] = total_loss_dict.get(\n skipped_iters_key, 0) + skipped_iter\n got_nan_key = 'got nan'\n\n got_nan = False\n for key in loss_dict:\n if not skipped_iter:\n total_loss_dict[key] = total_loss_dict.get(key, 0.) + loss_dict[key]\n else:\n value = loss_dict[key].float().sum().item()\n is_nan = value == float('inf') or \\\n value == -float('inf') or \\\n value != value\n got_nan = got_nan or is_nan\n\n total_loss_dict[got_nan_key] = total_loss_dict.get(\n got_nan_key, 0) + int(got_nan)\n\n # Logging.\n timers_to_log = []\n\n def add_to_logging(name):\n if name in timers.timers:\n timers_to_log.append(name)\n\n if not neox_args.is_pipe_parallel:\n add_to_logging('forward')\n add_to_logging('backward')\n add_to_logging('backward-backward')\n add_to_logging('backward-allreduce')\n add_to_logging('backward-master-grad')\n add_to_logging('backward-clip-grad')\n add_to_logging('optimizer')\n add_to_logging('batch generator')\n\n # Log timer info to tensorboard and wandb\n normalizer = iteration % neox_args.log_interval\n if normalizer == 0:\n normalizer = neox_args.log_interval\n if torch.distributed.get_rank() == 0:\n timers.write(names=timers_to_log, iteration=iteration, normalizer=normalizer)\n else:\n # with pipeline parallel, the megatron timers are overridden by the deepspeed ones.\n # Try to grab timer values from model engine. Only recently added to deeperspeed, so check that the engine\n # has that attribute first\n if hasattr(model, 'timer_values') and model.timer_values is not None:\n if model.wall_clock_breakdown() and model.global_steps % model.steps_per_print() == 0:\n timer_values = model.timer_values\n # deepspeed already logs to tensorboard / prints values, so just log to wandb\n if neox_args.use_wandb and torch.distributed.get_rank() == 0:\n for key in timer_values:\n tb_wandb_log(f\"timers/{key}\", timer_values[key], iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n # write losses, lr, etc. every step\n tb_wandb_log('train/learning_rate', learning_rate, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n for key in loss_dict:\n tb_wandb_log(f'train/{key.replace(\" \", \"_\")}', loss_dict[key], iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n if neox_args.fp16:\n tb_wandb_log(f'train/loss_scale', loss_scale, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n # log gradient noise scale\n if neox_args.log_gradient_noise_scale:\n if noise_scale_logger.noise_scale is not None:\n tb_wandb_log(f'train/noise_scale', noise_scale_logger.noise_scale, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n # (optional) Log optimizer states to wandb / tb every step\n if neox_args.log_optimizer_states:\n for k, v in optimizer.state_dict()['optimizer_state_dict']['state'].items():\n for ki, vi in v.items(): # step, module\n if ki != 'step':\n opt_state_norm = torch.norm(vi) if hasattr(vi, 'dim') else vi\n tb_wandb_log(f'optimizer_state_norms/{k}_{ki}', opt_state_norm, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n # (optional) Log grad/param norms to wandb / tb every step\n if neox_args.log_grad_norm or neox_args.log_param_norm:\n for name, param in model.module.named_parameters():\n if neox_args.log_grad_norm:\n if param.grad is not None:\n tb_wandb_log(f'gradient_norms/{name}', torch.norm(param.grad), iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n if neox_args.log_param_norm:\n tb_wandb_log(f'parameter_norms/{name}', torch.norm(param), iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n if iteration % neox_args.log_interval == 0:\n # log other stuff every neox_args.log_interval iters\n elapsed_time = timers('interval time').elapsed()\n iteration_time = elapsed_time / neox_args.log_interval\n samples_per_sec = get_global_batch_size(neox_args) / iteration_time\n log_string = ' samples/sec: {:.3f} |'.format(samples_per_sec)\n tb_wandb_log('runtime/samples_per_sec', samples_per_sec, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n tb_wandb_log('runtime/iteration_time', iteration_time, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n log_string += ' iteration {:8d}/{:8d} |'.format(iteration, neox_args.train_iters)\n log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(\n elapsed_time * 1000.0 / neox_args.log_interval)\n log_string += ' learning rate: {:.3E} |'.format(learning_rate)\n num_iterations = max(\n 1, neox_args.log_interval - total_loss_dict[skipped_iters_key])\n\n # log tflop / gpu\n flops_per_s_per_gpu = get_flops(neox_args=neox_args, model=model, iter_time_s=iteration_time)\n log_string += f' approx flops per GPU: {human_readable_flops(flops_per_s_per_gpu)} |'\n tb_wandb_log('runtime/flops_per_sec_per_gpu', flops_per_s_per_gpu, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n for key in total_loss_dict:\n if key not in [skipped_iters_key, got_nan_key]:\n v = total_loss_dict[key].item() if hasattr(total_loss_dict[key], 'item') else total_loss_dict[key]\n avg = v / float(num_iterations)\n log_string += ' {}: {:.6E} |'.format(key, avg)\n total_loss_dict[key] = 0.0\n if neox_args.precision == \"fp16\":\n log_string += ' loss scale: {:.1f} |'.format(loss_scale)\n log_string += ' number of skipped iterations: {:3d} |'.format(\n total_loss_dict[skipped_iters_key])\n log_string += ' number of nan iterations: {:3d} |'.format(\n total_loss_dict[got_nan_key])\n total_loss_dict[skipped_iters_key] = 0\n total_loss_dict[got_nan_key] = 0\n print_rank_0(log_string)\n if report_memory_flag:\n report_memory('after {} iterations'.format(iteration))\n report_memory_flag = False\n\n timers.log(timers_to_log, normalizer=neox_args.log_interval)\n\n return report_memory_flag\n\n\ndef train(neox_args, timers, model, optimizer, lr_scheduler,\n train_data_iterator, valid_data_iterator):\n \"\"\"Train the model function.\"\"\"\n\n # Turn on training mode which enables dropout.\n model.train()\n\n # Tracking loss.\n total_loss_dict = {}\n\n # Iterations.\n iteration = neox_args.iteration\n\n timers('interval time').start()\n report_memory_flag = True\n\n if neox_args.log_gradient_noise_scale:\n if neox_args.zero_stage >= 1:\n raise NotImplementedError('Gradient Noise Scale logging does not work with zero stage 2+, as the '\n 'gradients are distributed across ranks.')\n noise_scale_logger = GradientNoiseScale(\n model=model,\n batch_size_small=neox_args.train_batch_size,\n n_batches=neox_args.gradient_noise_scale_n_batches,\n cpu_offload=neox_args.gradient_noise_scale_cpu_offload,\n neox_args=neox_args,\n mpu=mpu)\n else:\n noise_scale_logger = None\n\n while iteration < neox_args.train_iters:\n loss_dict, skipped_iter = train_step(\n neox_args=neox_args, \n timers=timers,\n data_iterator=train_data_iterator,\n model=model, \n optimizer=optimizer, \n lr_scheduler=lr_scheduler\n )\n iteration += 1\n if neox_args.log_gradient_noise_scale:\n noise_scale_logger.update()\n # Logging.\n loss_scale = None\n if neox_args.precision == \"fp16\":\n loss_scale = optimizer.cur_scale\n report_memory_flag = training_log(\n neox_args=neox_args, \n timers=timers, \n loss_dict=loss_dict, \n total_loss_dict=total_loss_dict, \n learning_rate=optimizer.param_groups[0]['lr'], \n iteration=iteration,\n loss_scale=loss_scale, \n report_memory_flag=report_memory_flag, \n skipped_iter=skipped_iter, \n model=model, \n optimizer=optimizer, \n noise_scale_logger=noise_scale_logger\n )\n\n # Autoresume\n if neox_args.adlr_autoresume and \\\n (iteration % neox_args.adlr_autoresume_interval == 0):\n check_adlr_autoresume_termination(neox_args=neox_args, iteration=iteration, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler)\n\n # Checkpointing\n if neox_args.save and neox_args.save_interval and iteration % neox_args.save_interval == 0:\n save_checkpoint(neox_args=neox_args, iteration=iteration, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler)\n\n # Evaluation\n if neox_args.eval_interval and iteration % neox_args.eval_interval == 0 and neox_args.do_valid:\n prefix = 'iteration {}'.format(iteration)\n evaluate_and_print_results(\n neox_args=neox_args, \n prefix=prefix, \n forward_step_func=forward_step,\n data_iterator=valid_data_iterator, \n model=model, \n iteration=iteration, \n verbose=False\n )\n\n if neox_args.exit_interval and iteration % neox_args.exit_interval == 0:\n torch.distributed.barrier()\n time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n rank = torch.distributed.get_rank()\n print_rank_0('rank: {} | time: {} | exiting the program at iteration {}'.format(rank, time_str, iteration))\n sys.exit()\n\n return iteration\n\n\ndef evaluate(neox_args, forward_step_fn, data_iterator, model, verbose=False):\n \"\"\"Evaluation.\"\"\"\n # Turn on evaluation mode which disables dropout.\n model.eval()\n\n total_loss_dict = {}\n\n with torch.no_grad():\n iteration = 0\n while iteration < neox_args.eval_iters:\n iteration += 1\n if verbose and iteration % neox_args.log_interval == 0:\n print_rank_0('Evaluating iter {}/{}'.format(iteration, neox_args.eval_iters))\n # Forward evaluation.\n _, loss_dict = forward_step_fn(data_iterator, model)\n\n # When contiguous memory optimizations are enabled, the buffers\n # allocated by the optimizations are deallocated during backward pass\n # in the absence of backward pass the buffers should be reset after each\n # forward pass\n if neox_args.deepspeed and neox_args.deepspeed_activation_checkpointing:\n deepspeed.checkpointing.reset()\n\n # Reduce across processes.\n for key in loss_dict:\n total_loss_dict[key] = total_loss_dict.get(key, 0.) + loss_dict[key]\n \n # Move model back to the train mode.\n model.train()\n\n for key in total_loss_dict:\n total_loss_dict[key] /= neox_args.eval_iters\n\n return total_loss_dict\n\n\ndef evaluate_and_print_results(neox_args, prefix, forward_step_func, data_iterator, model, iteration, verbose=False):\n \"\"\"Helper function to evaluate and dump results on screen.\"\"\"\n\n # Pipeline parallelism needs eval_batch() instead of a simple forward().\n if neox_args.is_pipe_parallel:\n def _eval_helper(data_iter, _):\n loss = model.eval_batch(data_iter)\n return None, {'lm loss': loss}\n forward_step_func = _eval_helper\n\n total_loss_dict = evaluate(neox_args=neox_args, forward_step_fn=forward_step_func, data_iterator=data_iterator, model=model, verbose=verbose)\n string = ' validation loss at {} | '.format(prefix)\n for key in total_loss_dict:\n string += '{} value: {:.6E} | '.format(key, total_loss_dict[key].item())\n ppl = math.exp(min(20, total_loss_dict[key].item()))\n string += '{} PPL: {:.6E} | '.format(key, ppl)\n tb_wandb_log(f\"validation/{key.replace(' ', '_')}\", total_loss_dict[key].item(), iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n tb_wandb_log(f\"validation/{key.replace(' ', '_')}_ppl\", ppl, iteration, use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer)\n\n length = len(string) + 1\n print_rank_0('-' * length)\n print_rank_0(string)\n print_rank_0('-' * length)\n\n\ndef build_train_valid_test_data_iterators(neox_args):\n \"\"\"XXX\"\"\"\n\n (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)\n\n print_rank_0('> building train, validation, and test datasets ...')\n\n # Ensure only the first/last pipeline stages have data loaders\n if neox_args.is_pipe_parallel:\n is_first_stage = mpu.get_pipe_parallel_rank() == 0\n is_last_stage = mpu.get_pipe_parallel_rank() == mpu.get_pipe_parallel_world_size() - 1\n pipe_load = is_first_stage or is_last_stage\n else:\n pipe_load = True\n\n # Data loader only on rank 0 of each model parallel group.\n if mpu.get_model_parallel_rank() == 0 and pipe_load:\n # Rank, size, and global batch size.\n data_parallel_size = mpu.get_data_parallel_world_size()\n global_batch_size = neox_args.batch_size * data_parallel_size * neox_args.gas\n\n # Number of train/valid/test samples.\n train_iters = neox_args.train_iters\n eval_iters = (train_iters // neox_args.eval_interval + 1) * neox_args.eval_iters\n test_iters = neox_args.eval_iters\n train_val_test_num_samples = [train_iters * global_batch_size,\n eval_iters * global_batch_size,\n test_iters * global_batch_size]\n print_rank_0(' > datasets target sizes (minimum size):')\n print_rank_0(' train: {}'.format(train_val_test_num_samples[0]))\n print_rank_0(' validation: {}'.format(train_val_test_num_samples[1]))\n print_rank_0(' test: {}'.format(train_val_test_num_samples[2]))\n\n # Build the datasets.\n\n print_rank_0('> building train, validation, and test datasets for GPT2 ...')\n train_ds, valid_ds, test_ds = build_train_valid_test_datasets(\n data_prefix=neox_args.data_path,\n data_impl=neox_args.data_impl,\n splits_string=neox_args.split,\n train_valid_test_num_samples=train_val_test_num_samples,\n seq_length=neox_args.seq_length,\n seed=neox_args.seed,\n skip_warmup=(not neox_args.mmap_warmup)\n )\n print_rank_0(\"> finished creating GPT2 datasets ...\")\n\n # Build dataloders.\n train_dataloader = make_data_loader(train_ds, neox_args=neox_args)\n valid_dataloader = make_data_loader(valid_ds, neox_args=neox_args)\n test_dataloader = make_data_loader(test_ds, neox_args=neox_args)\n\n # Flags to know if we need to do training/validation/testing.\n do_train = train_dataloader is not None and neox_args.train_iters > 0\n do_valid = valid_dataloader is not None and neox_args.eval_iters > 0\n do_test = test_dataloader is not None and neox_args.eval_iters > 0\n # Need to broadcast num_tokens and num_type_tokens.\n flags = torch.cuda.LongTensor(\n [int(do_train), int(do_valid), int(do_test)])\n else:\n flags = torch.cuda.LongTensor([0, 0, 0])\n\n # Broadcast num tokens.\n if neox_args.is_pipe_parallel:\n # Only first/last pipeline stages have data loaders, so pipeline parallelism should\n # broadcast globally instead of just the model parallel group.\n torch.distributed.broadcast(flags, src=0)\n else:\n torch.distributed.broadcast(flags,\n mpu.get_model_parallel_src_rank(),\n group=mpu.get_model_parallel_group())\n neox_args.do_train = flags[0].item()\n neox_args.do_valid = flags[1].item()\n neox_args.do_test = flags[2].item()\n\n # Shift the start iterations.\n if train_dataloader is not None:\n train_dataloader.batch_sampler.start_iter = neox_args.iteration % \\\n len(train_dataloader)\n print_rank_0('setting training data start iteration to {}'.\n format(train_dataloader.batch_sampler.start_iter))\n if valid_dataloader is not None:\n start_iter_val = (neox_args.iteration // neox_args.eval_interval) * \\\n neox_args.eval_iters\n valid_dataloader.batch_sampler.start_iter = start_iter_val % \\\n len(valid_dataloader)\n print_rank_0('setting validation data start iteration to {}'.\n format(valid_dataloader.batch_sampler.start_iter))\n\n # Build iterators.\n if train_dataloader is not None:\n train_data_iterator = iter(train_dataloader)\n else:\n train_data_iterator = None\n\n if valid_dataloader is not None:\n valid_data_iterator = iter(valid_dataloader)\n else:\n valid_data_iterator = None\n\n if test_dataloader is not None:\n test_data_iterator = iter(test_dataloader)\n else:\n test_data_iterator = None\n\n return train_data_iterator, valid_data_iterator, test_data_iterator\n\n\ndef get_total_params(model):\n # Print number of parameters.\n if mpu.get_data_parallel_rank() == 0:\n params = sum([p.nelement() for p in model.parameters()])\n print(' > number of parameters on model parallel rank {}: {}'.format(\n mpu.get_model_parallel_rank(), params), flush=True)\n else:\n params = 0\n\n total_n_parameters = torch.tensor([params]).cuda(torch.cuda.current_device())\n torch.distributed.all_reduce(total_n_parameters)\n total_n_parameters = total_n_parameters.item()\n return total_n_parameters\n\n\ndef human_readable_flops(num):\n for unit in ['', 'KFLOPS', 'MFLOPS', 'GFLOPS', 'TFLOPS', 'PFLOPS', 'EFLOPS', 'ZFLOPS']:\n if abs(num) < 1000.0:\n return \"%3.1f%s\" % (num, unit)\n num /= 1000.0\n return \"%.1f%s\" % (num, 'Yi')\n\n\ndef get_global_batch_size(neox_args):\n return neox_args.batch_size * mpu.get_data_parallel_world_size() * neox_args.gas\n\n\ndef get_flops(neox_args, model, iter_time_s):\n\n world_size = torch.distributed.get_world_size()\n global_batch_size = get_global_batch_size(neox_args)\n\n ff = model.total_params * 6\n attn = neox_args.seq_length * neox_args.hidden_size * neox_args.num_layers * 60\n flops = global_batch_size * neox_args.seq_length * (ff + attn) / (iter_time_s * world_size)\n\n return flops\n","sub_path":"megatron/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":35165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"559927018","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n \n def traverse(self):\n if self.head is None:\n print(\"List has no element\")\n return\n else:\n n = self.head\n while n is not None:\n print(n.data,\" \")\n n=n.next\n \n def insert_at_start(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head= new_node\n\n\nif __name__=='__main__':\n items =LinkedList()\n \n items.insert_at_start('40')\n items.insert_at_start('50')\n\n items.traverse()","sub_path":"data_structures/linked_lists/1_insert_start_LL/Solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"226104827","text":"class qryWSGiroBI:\r\n def __init__(self, formObj, parentForm) :\r\n self.app = formObj.ClientApplication\r\n self.qry = formObj.GetPanelByName('qData')\r\n self.oqlText = \\\r\n \"select from RECON_WSGiroBI \"\\\r\n \"( \"\\\r\n \" Tanggal \"\\\r\n \" , status \"\\\r\n \" , f_rtgs_out \"\\\r\n \" , f_rtgs_in \"\\\r\n \" , f_tom \"\\\r\n \" , f_bsk_debet \"\\\r\n \" , f_bsk_kredit_retur \"\\\r\n \" , f_girobi \"\\\r\n \" , j_rtgs_out \"\\\r\n \" , j_rtgs_in \"\\\r\n \" , j_tom \"\\\r\n \" , j_bsk_debet \"\\\r\n \" , j_bsk_kredit_retur \"\\\r\n \" , j_lain \"\\\r\n \" , j_total \"\\\r\n \" , ID_WSGiroBI \"\\\r\n \" , self \"\\\r\n \") then order by Tanggal;\"\r\n\r\n def setQueryText(self, oqltext):\r\n formObj = self.FormObject\r\n self.qry.OQLText = oqltext\r\n \r\n def showQuery(self):\r\n self.qry.DisplayData()\r\n\r\n def Show(self):\r\n formObj = self.FormObject\r\n uipNoData = formObj.GetUIPartByName('uipNoData')\r\n\r\n self.setQueryText(self.oqlText)\r\n self.showQuery()\r\n\r\n self.FormContainer.Show()\r\n return\r\n\r\n","sub_path":"dialogs/reconcilerak/PeragaanData/qryWSGiroBI_intr.py","file_name":"qryWSGiroBI_intr.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"536349408","text":"#!/usr/bin/python\n#coding:utf8\n#Auther: Zifan Liu\n#Date: Feb 29, 2016\n#Project interpreter: 3.x\nimport os,sys,socket\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.append(base_dir)\nfrom day7.FTP.server.user_info.register import *\nfrom day7.FTP.client.function_list import *\n\n\n#main\nif __name__ == '__main__':\n bufsiz = 1024\n addr = ('127.0.0.1',7777)\n tcp_sock = socket.socket() #1.socket\n tcp_sock.connect(addr) #2.connect\n print(\"Try to connect server....\")\n while 1:\n #register user\n username,status = user_register()\n if status:\n print(\"OK, signup&login successful\".center(80,'#'))\n print(\"You can use these command in this terminal:\\n\\t\\t0.ls\\n\\t\\t1.cd ..\\n\\t\\t2.cd xxx\\n\\t\\t3.mkdir xxx\\n\\t\\t4.rmdir xxx\\n\\t\\t5.upload C:\\\\450.jpg\\n\\t\\t6.download C:\\\\450.jpg\".center(80,'='))\n tcp_sock.sendall(bytes(username,encoding='utf-8')) #3.send\n while 1:\n user_input_command = input(\"[%s@localhost] >> \" % username)\n tcp_sock.sendall(bytes(user_input_command,encoding='utf8'))\n server_reply = str(tcp_sock.recv(bufsiz),'utf-8')\n print(\"\\t\\t%s\" % server_reply)\n tcp_sock.close() #5.close\n else:print(\"signup&login failed\")","sub_path":"day7/FTP/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"257917017","text":"from .data import GCP_PRIVATE_KEY\nfrom googleapiclient.http import RequestMockBuilder as GoogleApiClientRequestMockBuilder\nfrom httplib2 import Response as HttpLib2Response\nfrom journalpump.senders.google_cloud_logging import GoogleCloudLoggingSender\nfrom typing import Dict, List\nfrom unittest import mock\n\nimport json\n\n\nclass TestGoogleCloudLoggingSender:\n\n CONFIG = {\n \"google_cloud_logging_project_id\": \"project-id\",\n \"google_cloud_logging_log_id\": \"log-id\",\n \"google_cloud_logging_resource_labels\": {\n \"location\": \"us-east-1\",\n \"node_id\": \"my-test-node\",\n },\n \"google_service_account_credentials\": {\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"abcdefg\",\n \"private_key\": GCP_PRIVATE_KEY,\n \"client_email\": \"test@project-id.iam.gserviceaccount.com\",\n \"client_id\": \"123456789\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://oauth2.googleapis.com/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/test%40project-id.iam.gserviceaccount.com\", # pylint:disable=line-too-long\n },\n }\n\n def _generate_request_builder(self, entries: List[Dict[str, str]], error=None) -> GoogleApiClientRequestMockBuilder:\n \"\"\"Generate typical response body with patched `entries` key\"\"\"\n\n expected_body = {\n \"logName\": \"projects/project-id/logs/log-id\",\n \"resource\": {\n \"type\": \"generic_node\",\n \"labels\": {\"location\": \"us-east-1\", \"node_id\": \"my-test-node\"},\n },\n \"entries\": entries,\n }\n return GoogleApiClientRequestMockBuilder(\n {\n \"logging.entries.write\": (error, \"{}\", expected_body),\n },\n check_unexpected=True,\n )\n\n def test_message_string(self):\n \"\"\"Check that MESSAGE as plain text was sent\"\"\"\n\n request_builder = self._generate_request_builder([{\"jsonPayload\": {\"MESSAGE\": \"Hello\"}}])\n\n sender = GoogleCloudLoggingSender(\n name=\"googlecloudlogging\",\n reader=mock.Mock(),\n stats=mock.Mock(),\n field_filter=None,\n config=self.CONFIG,\n googleapiclient_request_builder=request_builder,\n )\n sender.send_messages(messages=[b'{\"MESSAGE\": \"Hello\"}'], cursor=None)\n assert sender._sent_count == 1 # pylint: disable=protected-access\n\n def test_missing_message(self):\n \"\"\"Check that missing `MESSAGE` key is fine\"\"\"\n\n request_builder = self._generate_request_builder([{\"jsonPayload\": {\"not_message\": \"Hello\"}}])\n\n sender = GoogleCloudLoggingSender(\n name=\"googlecloudlogging\",\n reader=mock.Mock(),\n stats=mock.Mock(),\n field_filter=None,\n config=self.CONFIG,\n googleapiclient_request_builder=request_builder,\n )\n sender.send_messages(messages=[b'{\"not_message\": \"Hello\"}'], cursor=None)\n assert sender._sent_count == 1 # pylint: disable=protected-access\n\n def test_message_json(self):\n \"\"\"Check that MESSAGE as object was decoded and sent\"\"\"\n\n expected_json_message = {\"test_key\": \"test_value\"}\n request_builder = self._generate_request_builder([{\"jsonPayload\": {\"MESSAGE\": expected_json_message}}])\n\n sender = GoogleCloudLoggingSender(\n name=\"googlecloudlogging\",\n reader=mock.Mock(),\n stats=mock.Mock(),\n field_filter=None,\n config=self.CONFIG,\n googleapiclient_request_builder=request_builder,\n )\n\n message = json.dumps({\"MESSAGE\": json.dumps(expected_json_message)})\n sender.send_messages(messages=[message.encode()], cursor=None)\n assert sender._sent_count == 1 # pylint: disable=protected-access\n\n def test_bad_request_did_not_marked_sent(self):\n \"\"\"Check that message was not marked as sent if GoogleApi returns error\"\"\"\n request_builder = self._generate_request_builder(\n [{\"jsonPayload\": {\"MESSAGE\": \"Hello\"}}],\n error=HttpLib2Response({\"status\": \"400\"}),\n )\n\n sender = GoogleCloudLoggingSender(\n name=\"googlecloudlogging\",\n reader=mock.Mock(),\n stats=mock.Mock(),\n field_filter=None,\n config=self.CONFIG,\n googleapiclient_request_builder=request_builder,\n )\n sender.send_messages(messages=[b'{\"MESSAGE\": \"Hello\"}'], cursor=None)\n assert sender._sent_count == 0 # pylint: disable=protected-access\n\n def test_correct_timestamp(self):\n \"\"\"Check severity mapping is converted correctly and timestamp is being sent.\"\"\"\n\n request_builder = self._generate_request_builder(\n [\n {\n \"timestamp\": \"2020-06-25T06:24:13.787255Z\",\n \"severity\": \"EMERGENCY\",\n \"jsonPayload\": {\"MESSAGE\": \"Hello\"},\n }\n ]\n )\n\n sender = GoogleCloudLoggingSender(\n name=\"googlecloudlogging\",\n reader=mock.Mock(),\n stats=mock.Mock(),\n field_filter=None,\n config=self.CONFIG,\n googleapiclient_request_builder=request_builder,\n )\n sender.send_messages(\n messages=[b'{\"MESSAGE\": \"Hello\", \"PRIORITY\": 0, \"timestamp\": \"2020-06-25T06:24:13.787255\"}'],\n cursor=None,\n )\n assert sender._sent_count == 1 # pylint: disable=protected-access\n","sub_path":"test/test_google_cloud_logging.py","file_name":"test_google_cloud_logging.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291120167","text":"class Solution(object):\n def hasPath(self, maze, start, destination):\n m,n = len(maze), len(maze[0])\n i0,j0 = start[0], start[1]\n i1,j1 = destination[0], destination[1]\n if maze[i0][j0]==1 or maze[i1][j1]==1:\n return False\n visited = set()\n def dfs(i,j):\n if i==i1 and j==j1:\n return True\n visited.add((i,j))\n for d in [1,-1]:\n k=i\n while k+d>=0 and k+d<m and maze[k+d][j]==0:\n k+=d\n \n if k!=i and (k,j) not in visited:\n if dfs(k,j):\n return True\n l=j\n while l+d>=0 and l+d<n and maze[i][l+d]==0:\n l+=d\n \n if l!=j and (i,l) not in visited:\n if dfs(i,l):\n return True\n return False\n return dfs(i0,j0)\n#Time-complexity: O(m.n)\n#spoace-complexity: O(m.n)","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"37017113","text":"from math import *\nimport sys\n\nfactor = 100\n\ndef createGaussian(sigma):\n a = 1.0 / sqrt(2 * pi * sigma * sigma)\n b = 2.0 * sigma * sigma\n def gaussian(x):\n return a * exp(-(x * x)/b)\n return gaussian\n\ndef parts(li, n):\n return [li[i:i + n] for i in range(0, len(li), n)]\n\ndef generateCode(weights):\n sideLen = int(ceil(sqrt(len(weights))))\n chunks = parts(weights, sideLen)\n joinedWeights = [', '.join(row) for row in chunks]\n return \"static const float weights [{0}] = {{\\n{1}\\n}};\".format(\n len(weights), ', '.join([str(w) for w in joinedWeights])\n )\n\ndef main(n = 256, sigma = 128):\n gaussian = createGaussian(sigma)\n weights = [factor * gaussian(i) for i in range(0, n)]\n weights = [str(w) for w in weights]\n code = generateCode(weights)\n print(code)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) == 3:\n main(int(args[1]), int(args[2]))\n elif len(args) == 2:\n main(int(args[1]))\n else:\n main()","sub_path":"Assets/Scripts/gaussian_weight_gen.py","file_name":"gaussian_weight_gen.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"479851623","text":"#6- Elabore um algorítmo que leia as variáveis 'c' e 'n', respectivamente código e número de horas trabalhadas de um\r\n#operário. Calcule o salário sabendo-se que ele ganha R$ 10.00 por hora. Quando o número de horas exceder a 50 calcule\r\n#o excesso de pagamento armazenando-o na variável 'e'. Caso contrário zerar tal variável. A hora excedente de trabalho\r\n#vale R$ 20.00. No final do processamento imprimir o salário total e o salário excedente.\r\n\r\nn=float(input('Insira o número de horas trabalhadas: '))\r\nc=0\r\n\r\nif (n)>50:\r\n salário_normal=float(500)\r\n salário_extra=(n-50)*20\r\n salário_total=(salário_normal+salário_extra)\r\n print(salário_normal,'mais',salário_extra)\r\n print(salário_total)\r\nelse:\r\n print('Tchuale!')\r\n","sub_path":"Seção 6 - Estruturas de decisão/Exercício 6.py","file_name":"Exercício 6.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"417694118","text":"from domain_SO.Memory import Memory\nfrom domain_SO.Disc import Disc\nfrom domain_SO.PcbTable import PcbTable\nfrom queue import Queue\nfrom domain_SO.Loader import Loader\nfrom domain_SO.CPU import CPU\nfrom domain_SO.Instruction import Instruction,InstructionType\nfrom userExceptions_SO.ProgramNotInDiscException import ProgramNotInDiscException\nfrom domain_SO.interruptions.InterruptionManager import InterruptionManager\n\nclass Kernel:\n\n def __init__(self,memory):\n self.pcbTable = PcbTable()\n self.memory = memory\n self.disc = Disc()\n self.cpu = CPU()\n self.programLoader = Loader(self.memory,self.disc)\n self.readyQueue = Queue()\n self.interruptionManager = InterruptionManager()\n\n def run(self,cmd,programName):\n try:\n #tested\n program = self.programLoader.getFromDisc(programName)\n #tested\n pcb = self.pcbTable.isPcbOf(program)\n #pass the pcb to set the basedir\n if(pcb is None):\n #new NewInterruption\n instructionNew = Instruction(InstructionType.New)\n newPcb = self.pcbTable.createPcbFor(self,program)\n self.interruptionManager.enqueueInterruption(instructionNew,newPcb)\n else:\n self.programLoader.loadToMemory(program,pcb)\n if(self.readyQueue.empty()):\n self.cpu.setPCB(pcb)\n else:\n self.readyQueue.put(pcb)\n except ProgramNotInDiscException as p:\n print(p.getMessage())\n\n\n#-pasar de running a ready -> interrupcion de timeout. Alerta va al kernel\n#-pasar de running a terminado -> disparo interrupcxion de kill, estoy en la ultima instruccion.\n#algun componente tuyo te lo expropia, limpia memoria y elimina pcb\n#y el scheduler le asigna un nuevo pcb al cpu\n#-interrupcion de termine de hacer IO\n\n\n\n#si cola de ready esta vacia va a cpu directo\n#cpu le avisa al kernel que hay interrupcion\n#reloj envia click y cpu hace fetch de instruccion\n\n#falta setear estados a los pcbs\n\n#modo usuario - cpu ejecuta instruccion\n#modo kernel - tareas de SO. el Clock no puede generar clicks\n\n#mientras se ejecuta una interrupcion no se pueden hacer clicks y por ende fetchs de instruccion\n#programar por prioridad con round robin. Cuando creas el kernel configuras con que politica de planificacion queres que trabaje\n\n\n\n\n#################\n\n#from threading import Thread\n#class Impresor(Thread)\n\n #def __init__(self,name):\n # Thread.__init__(self)\n # self.name = name\n\n #def run\n#sstart()\n\n#CPU es un thread, i/o es otro thread\n\n#cola I/O es estructura compartida entre dos Threads distintos. El CPU para grabar e I/O para acceder\n#cola de ready tambien porque es leida por el scheduler y escrita por el kernel. Manejador de interrupciones\n\n#Thread CPU\n#mientras SO este running\n#hago fetch","sub_path":"domain_SO/Kernel.py","file_name":"Kernel.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79378794","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Enemy(Sprite):\r\n def __init__(self,screen,settings,ball):\r\n super(Enemy,self).__init__()\r\n\r\n self.screen_player = screen\r\n self.settings_enemy=settings\r\n self.ball=ball\r\n #draw center line\r\n self.midfield=pygame.Rect(0,0,settings.sidewidth,settings.screenheight)\r\n\r\n #Draw side\r\n self.main_paddle=pygame.Rect(0,0,settings.sidewidth, settings.sideheight)\r\n self.screen_rect= self.screen_player.get_rect()\r\n\r\n self.midfield.centerx=self.screen_rect.centerx\r\n\r\n #Draw bottom and top\r\n\r\n self.top_paddle=pygame.Rect(0,0,settings.bottomwidth,settings.bottomheight)\r\n self.bottom_paddle=pygame.Rect(0,0,settings.bottomwidth,settings.bottomheight)\r\n\r\n #Put paddles to the left of the screen\r\n self.main_paddle.centery=self.screen_rect.centery\r\n self.main_paddle.midright=self.screen_rect.midright\r\n\r\n\r\n self.top_paddle.centerx=self.screen_rect.centerx\r\n self.top_paddle.topright=self.screen_rect.topright\r\n\r\n self.bottom_paddle.centerx=self.screen_rect.centerx\r\n self.bottom_paddle.bottomright=self.screen_rect.bottomright\r\n\r\n\r\n self.center=float(self.main_paddle.centery)\r\n self.centertop=float(self.top_paddle.centerx)\r\n self.centerbottom=float(self.bottom_paddle.centerx)\r\n\r\n #Variables to update enemy paddles\r\n self.AI_main=float(0)\r\n self.AI_top = float(0)\r\n self.AI_bottom = float(0)\r\n\r\n self.main_paddle.centery=self.center\r\n self.top_paddle.centerx=self.centertop\r\n self.bottom_paddle.centerx=self.centerbottom\r\n\r\n self.movingright=False\r\n self.movingleft=False\r\n def main_ai(self):\r\n if self.main_paddle.centery > self.ball.rect.centery:\r\n self.AI_main=-1 # go down\r\n elif self.main_paddle.centery < self.ball.rect.centery:\r\n self.AI_main = 1#go up\r\n\r\n def top_ai(self):\r\n\r\n if self.top_paddle.centerx > self.ball.rect.centerx:\r\n self.movingleft = True\r\n #self.AI_top=-1 # go left\r\n if self.top_paddle.centerx < self.ball.rect.centerx:\r\n self.movingright = True\r\n #self.AI_top =1 # go right\r\n\r\n def bottom_ai(self):\r\n if self.bottom_paddle.centerx > self.ball.rect.centerx:\r\n self.movingleft=True\r\n # self.AI_bottom = -1 # go left\r\n if self.bottom_paddle.centerx < self.ball.rect.centerx:\r\n self.movingright = True\r\n #self.AI_bottom = 1 # go right\r\n\r\n def update(self):\r\n self.movingright = False\r\n self.movingleft = False\r\n #check positions\r\n self.main_ai()\r\n self.top_ai()\r\n self.bottom_ai()\r\n #move paddles\r\n self.main_paddle.centery += self.settings_enemy.enemyspeed * self.AI_main\r\n if self.movingleft and self.top_paddle.left > self.settings_enemy.screenwidth/2:\r\n self.top_paddle.centerx -= self.settings_enemy.enemyspeed\r\n if self.movingleft and self.bottom_paddle.left > self.settings_enemy.screenwidth / 2:\r\n self.bottom_paddle.centerx -= self.settings_enemy.enemyspeed\r\n if self.movingright:\r\n self.top_paddle.centerx += self.settings_enemy.enemyspeed\r\n if self.movingright:\r\n self.bottom_paddle.centerx += self.settings_enemy.enemyspeed\r\n\r\n def drawplayer(self):\r\n pygame.draw.rect(self.screen_player,self.settings_enemy.enemycolor,self.main_paddle)\r\n pygame.draw.rect(self.screen_player, self.settings_enemy.enemycolor, self.top_paddle)\r\n pygame.draw.rect(self.screen_player, self.settings_enemy.enemycolor, self.bottom_paddle)\r\n\r\n","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242795770","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom doctor.items import HospitalItem,DoctorItem,DepartmentItem\nimport re\nfrom lxml import etree \nimport ast\n\nclass DoctorcastSpider(scrapy.Spider):\n\tdef __init__(self):\n\t\tsuper(DoctorcastSpider,self).__init__()\n\t\tself.remaining_links = len(self.start_urls)\n\tname = 'doctorcast'\n\n\tallowed_domains = ['haodf.com']\n\tstart_urls = (\n\t\t'https://www.haodf.com/yiyuan/beijing/list.htm',\n\t\t'https://www.haodf.com/yiyuan/shanghai/list.htm',\n\t\t'https://www.haodf.com/yiyuan/guangdong/list.htm',\n\t\t'https://www.haodf.com/yiyuan/guangxi/list.htm',\n\t\t'https://www.haodf.com/yiyuan/jiangsu/list.htm',\n\t\t'https://www.haodf.com/yiyuan/zhejiang/list.htm',\n\t\t'https://www.haodf.com/yiyuan/anhui/list.htm',\n\t\t'https://www.haodf.com/yiyuan/jiangxi/list.htm',\n\t\t'https://www.haodf.com/yiyuan/fujian/list.htm',\n\t\t'https://www.haodf.com/yiyuan/shandong/list.htm',\n\t\t'https://www.haodf.com/yiyuan/sx/list.htm',\n\t\t'https://www.haodf.com/yiyuan/hebei/list.htm',\n\t\t'https://www.haodf.com/yiyuan/henan/list.htm',\n\t\t'https://www.haodf.com/yiyuan/tianjin/list.htm',\n\t\t'https://www.haodf.com/yiyuan/liaoning/list.htm',\n\t\t'https://www.haodf.com/yiyuan/heilongjiang/list.htm',\n\t\t'https://www.haodf.com/yiyuan/jilin/list.htm',\n\t\t'https://www.haodf.com/yiyuan/hubei/list.htm',\n\t\t'https://www.haodf.com/yiyuan/hunan/list.htm',\n\t\t'https://www.haodf.com/yiyuan/sichuan/list.htm',\n\t\t'https://www.haodf.com/yiyuan/chongqing/list.htm',\n\t\t'https://www.haodf.com/yiyuan/shanxi/list.htm',\n\t\t'https://www.haodf.com/yiyuan/gansu/list.htm',\n\t\t'https://www.haodf.com/yiyuan/yunnan/list.htm',\n\t\t'https://www.haodf.com/yiyuan/xinjiang/list.htm',\n\t\t'https://www.haodf.com/yiyuan/neimenggu/list.htm',\n\t\t'https://www.haodf.com/yiyuan/hainan/list.htm',\n\t\t'https://www.haodf.com/yiyuan/guizhou/list.htm',\n\t\t'https://www.haodf.com/yiyuan/qinghai/list.htm',\n\t\t'https://www.haodf.com/yiyuan/ningxia/list.htm',\n\t\t'https://www.haodf.com/yiyuan/xizang/list.htm',\n\t\t)\n\trules = [\n\t\tRule(LinkExtractor(allow=[r'/yiyuan/.+/\\.list.htm']),callback='parse_yiyuan'),\n\t\tRule(LinkExtractor(allow=[r'/hospital/.+\\.htm']),callback='parse_hospital'),\n\t\tRule(LinkExtractor(allow=[r'/faculty/.+\\.htm']),callback='parse_faculty'),\n\t\tRule(LinkExtractor(allow=[r'/doctor/.+\\.htm']),callback='parse_doctor'),\n\t]\n\t\n\tdef parse(self, response):\n\t\tself.remaining_links -= 1\n\t\tself.logger.info('parsing hospital list page -- %s ,剩余链接数:%d' % (response.url,self.remaining_links))\n\t\thospital_links = response.xpath(\"//div[@class=\\\"m_ctt_green\\\"]/ul/li/a/@href\") \t\n\t\tfor link in hospital_links:\n\t\t\tself.remaining_links += 1\n\t\t\tyield scrapy.Request(response.urljoin(link.extract()),callback=self.parse_hospital,dont_filter=True,priority=900)\n\n\tdef parse_hospital(self, response):\n\t\tself.remaining_links -= 1\n\t\tself.logger.info('parsing hospital page -- %s ,剩余链接数:%d'%(response.url,self.remaining_links))\n\t\titem = HospitalItem()\n\t\titem['link'] = response.url\n\t\titem['name'] = response.xpath('//h1[@class=\"hospital-name\"]/text()').extract_first()\n\t\tself.logger.info('get Hospital item -- ' + item['name'])\n\t\tyield item\n\t\tfaculty_links = response.xpath(\"//a[@class=\\\"f-l-i-s-i-w-name\\\"]/@href\") \n\t\tfor link in faculty_links:\n\t\t\tself.remaining_links += 1\n\t\t\tyield scrapy.Request(response.urljoin(link.extract()),callback=self.parse_faculty,dont_filter=True,priority=800)\n\n\tdef parse_faculty(self, response):\n\t\tself.remaining_links -= 1\n\t\tself.logger.info('parsing faculty page -- %s ,剩余链接数:%d'%(response.url,self.remaining_links))\n\t\titem = DepartmentItem()\n\t\titem['link'] = response.url\n\t\titem['name'] = response.xpath('//div[@class=\"content\"]/a/text()').extract_first()\n\t\tself.logger.info('get faculty item -- ' + item['name'])\n\t\tyield item\n\t\tdoctor_links = response.xpath(\"//a[@class=\\\"name\\\"]/@href\") \n\t\tfor link in doctor_links:\n\t\t\tself.remaining_links += 1\n\t\t\tyield scrapy.Request(response.urljoin(link.extract()),callback=self.parse_doctor,dont_filter=True,priority=700)\n\t\tpnum_links = response.xpath(\"//a[@class=\\\"p_num\\\"]/@href\")\n\t\tfor link in pnum_links:\n\t\t\tself.remaining_links += 1\n\t\t\tyield scrapy.Request(response.urljoin(link.extract()),callback=self.parse_faculty,dont_filter=True,priority=800)\n\t\t\n\t\t\n\tdef parse_doctor(self, response):\n\t\tself.remaining_links -= 1\n\t\tself.logger.info('parsing doctor page -- %s ,剩余链接数:%d'%(response.url,self.remaining_links))\n\t\tdata = repr(ast.literal_eval(response.xpath('//script[1]')[1].extract()[55:-11])['content'].replace('\\n','').replace('\\t','')).replace(r'\\\\',\"\")[1:-1]\n\t\thtml = etree.HTML(data)\n\t\titem = DoctorItem()\n\t\titem['link'] = response.url\n\t\titem['name'] = html.xpath(\"//div[@class='luj']/a[6]\")[0].text\n\t\titem['hospital'] = html.xpath(\"//div[@class='luj']/a[4]\")[0].text\n\t\titem['department'] = html.xpath(\"//div[@class='luj']/a[5]\")[0].text\n\t\tyield item\n\t\t\n","sub_path":"doctor/doctor/spiders/doctorcast.py","file_name":"doctorcast.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286476635","text":"from django.contrib import admin\nfrom django.urls import path, include\n\n\nurlpatterns = [\n path('admin/', admin.site.urls,name='admin'),\n path('', include('cliente.urls'),name='cliente'),\n path('restaurantes/', include('restaurantes.urls'),name='restaurantes'),\n path('api/', include('api.urls'),name='api'),\n]\n\n","sub_path":"diamanfood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"200090964","text":"import os, sys \nimport string\nimport random\nimport hashlib\n\nfrom time import gmtime, strftime\nfrom random import random\n\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import render_template\nfrom flask import Blueprint\n\nfrom .db import db_get\nfrom .auth import login_required\n\n\nbp = Blueprint(\"peer\", __name__)\n\n################################################\n### Utility\n################################################\ndef key_generate(user):\n time_now = strftime(user + \"%a, %d %b %Y %H:%M:%S +0000\", gmtime())\n time_now = \"%s-%f\" % (time_now, random())\n md = hashlib.md5(time_now.encode())\n md_digest = md.hexdigest()\n return md_digest[-6:]\n\n################################################\n##### peer service #################\n@bp.route('/peer/register', methods = ['POST'])\ndef peer_register():\n #print (\"Register >> peer_register >>>ENTER \")\n db = db_get()\n code = key_generate('user')\n shareinfo = 'mqtt/mac012'\n db.execute(\n \"INSERT INTO invitation (code, shareinfo) VALUES (?, ?)\",\n (code, shareinfo),\n )\n db.commit()\n\n return code, 200\n\n@bp.route('/peer/get/<string:peer_id>', methods = ['GET'])\ndef peer_get(peer_id):\n db = db_get()\n share_info = db.execute(\n 'SELECT shareinfo FROM invitation WHERE code = \"%s\"' % (peer_id)\n ).fetchone()\n\n if share_info:\n return share_info['shareinfo'], 200\n else:\n return \"NONE\", 200\n \n\n################################################\n### view page \n@bp.route('/')\n@login_required\ndef index():\n db = db_get()\n invitations = db.execute(\n \"SELECT code, shareinfo, created\"\n \" FROM invitation\"\n \" ORDER BY created DESC\"\n ).fetchall()\n\n return render_template('invite/index.html', invitations=invitations)\n\n@bp.route('/peer/delete/<string:peer_id>', methods = ('POST', 'GET'))\ndef peer_delete(peer_id):\n # Get formdate\n db = db_get()\n sql_string = \"DELETE FROM invitation WHERE code = \\\"%s\\\"\" % (peer_id)\n #print ( \"peer_delete >>> \" + sql_string + \" peer_delete >>>\")\n db.execute(sql_string)\n db.commit()\n\n return redirect(url_for(\"peer.index\"))\n","sub_path":"invitation/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221554463","text":"#coding=utf-8\n\n# Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport sys\nfrom familia_wrapper import InferenceEngineWrapper\n\nif sys.version_info < (3,0):\n input = raw_input\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n sys.stderr.write(\"Usage:python {} {} {} {}.\\n\".format(\n sys.argv[0], \"model_dir\", \"conf_file\", \"emb_file\"))\n exit(-1)\n\n # 获取参数\n model_dir = sys.argv[1]\n conf_file = sys.argv[2]\n emb_file = sys.argv[3]\n # 创建InferenceEngineWrapper对象\n inference_engine_wrapper = InferenceEngineWrapper(model_dir, conf_file, emb_file)\n while True:\n # 输入两个长文本\n words = input(\"Enter Keywords: \").strip()\n doc = input(\"Enter Document: \").strip()\n # 分词\n seg_list = inference_engine_wrapper.tokenize(doc)\n items = inference_engine_wrapper.cal_keywords_twe_similarity(words, \" \".join(seg_list))\n # 打印结果\n print('----------------------------')\n for item in items:\n print(item[0] + '\\t' + str(item[1]))\n","sub_path":"python/demo/document_twe_keywords_demo.py","file_name":"document_twe_keywords_demo.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"317815399","text":"# Using the workspace:\r\n\r\n# Here are a few tips orienting you to this kind of workspace.\r\n\r\n# In the top panel is a code editor where you can edit the Python file user_input_numlist.py. Scroll up and down in this panel to see all the code. You can also expand or shrink this panel by clicking and dragging its bottom border.\r\n\r\n# In the bottom panel, you can execute your Python file by clicking on New Terminal and entering python user_input_numlist.py on the command line.\r\n\r\n# The solutions have been provided on the next page, but I encourage you to try figuring out where the bug is in the code, and fixing it yourself.\r\n\r\n# Sample Output: This is what the output should look like.\r\n\r\n# >>> user_list: [23, 24, 25, 26, 27, 28, 29, 30, 31, 22]\r\n# >>> The sum of the even numbers in user_list is: 130.\r\n\r\n# initiate empty list to hold user input and sum value of zero\r\nuser_list = []\r\nlist_sum = 0\r\n\r\n# seek user input for ten numbers \r\nfor i in range(10):\r\n userInput = input(\"Enter any 2-digit number: \")\r\n \r\n# check to see if number is even and if yes, add to list_sum\r\n# print incorrect value warning when ValueError exception occurs\r\n try:\r\n number = userInput\r\n user_list.append(number)\r\n if number % 2 == 0:\r\n list_sum += number\r\n except ValueError:\r\n print(\"Incorrect value. That's not an int!\")\r\n ","sub_path":"practice_debugging.py","file_name":"practice_debugging.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66874503","text":"from selenium import webdriver\nimport time\n\nbrowser = webdriver.Chrome()\nbrowser.get(\"http://suninjuly.github.io/wait1.html\")\n\t\ntry:\n\t# говорим WebDriver искать каждый элемент в течение 5 секунд\n\tbrowser.implicitly_wait(5)\n\tbutton = browser.find_element_by_id(\"verify\")\n\tbutton.click()\n\tmessage = browser.find_element_by_id(\"verify_message\")\n\n\tassert \"successful\" in message.text\n\tmtext = message.text\n\tprint(mtext)\n\t\nexcept Exception as error:\n print(f'{error}')\n #print(welcome_text)\n\nfinally:\n # успеваем скопировать код за 30 секунд\n #time.sleep(5)\n # закрываем браузер после всех манипуляций\n browser.quit()\n \n# не забываем оставить пустую строку в конце файла","sub_path":"lesson2.4_step3.py","file_name":"lesson2.4_step3.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590002958","text":"from datetime import datetime\nfrom time import mktime\n\ndate = 'Today is Monday, December 23th 2013'\n\nunixTime= datetime.strptime(date, 'Today is %A, %B %dth %Y')\n\nconverted = mktime(unixTime.timetuple())\n\nprint(converted)\n\n#converte a data atual\nconverted = mktime(datetime.now().timetuple())\nprint(converted)\n\n","sub_path":"Python 101/datetimeParaUnixTimeStamp.py","file_name":"datetimeParaUnixTimeStamp.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234895894","text":"from .canonical_linear import CanonicalLinearConstraint, canlinear_colloc_to_interpolate\nfrom ..constraint import DiscretizationType\nimport numpy as np\n\n\nclass JointAccelerationConstraint(CanonicalLinearConstraint):\n \"\"\"Joint Acceleration Constraint.\n\n A joint acceleration constraint is given by\n\n .. math ::\n\n \\ddot{\\mathbf{q}}_{min} & \\leq \\ddot{\\mathbf q} &\\leq \\ddot{\\mathbf{q}}_{max} \\\\\\\\\n \\ddot{\\mathbf{q}}_{min} & \\leq \\mathbf{q}'(s_i) u_i + \\mathbf{q}''(s_i) x_i &\\leq \\ddot{\\mathbf{q}}_{max}\n\n where :math:`u_i, x_i` are respectively the path acceleration and\n path velocity square at :math:`s_i`. For more detail see :ref:`derivationKinematics`.\n\n Rearranging the above pair of vector inequalities into the form\n required by :class:`CanonicalLinearConstraint`, we have:\n\n - :code:`a[i]` := :math:`\\mathbf q'(s_i)`\n - :code:`b[i]` := :math:`\\mathbf q''(s_i)`\n - :code:`F` := :math:`[\\mathbf{I}, -\\mathbf I]^T`\n - :code:`h` := :math:`[\\ddot{\\mathbf{q}}_{max}^T, -\\ddot{\\mathbf{q}}_{min}^T]^T`\n\n Parameters\n ----------\n alim: array\n Shape (dof, 2). The lower and upper acceleration bounds of the\n j-th joint are alim[j, 0] and alim[j, 1] respectively.\n\n discretization_scheme: :class:`.DiscretizationType`\n Can be either Collocation (0) or Interpolation\n (1). Interpolation gives more accurate results with slightly\n higher computational cost.\n\n \"\"\"\n def __init__(self, alim, discretization_scheme=DiscretizationType.Collocation):\n super(JointAccelerationConstraint, self).__init__()\n self.alim = np.array(alim, dtype=float)\n self.dof = self.alim.shape[0]\n self.set_discretization_type(discretization_scheme)\n assert self.alim.shape[1] == 2, \"Wrong input shape.\"\n self._format_string = \" Acceleration limit: \\n\"\n for i in range(self.alim.shape[0]):\n self._format_string += \" J{:d}: {:}\".format(i + 1, self.alim[i]) + \"\\n\"\n self.identical = True\n\n def compute_constraint_params(self, path, gridpoints, scaling):\n if path.get_dof() != self.get_dof():\n raise ValueError(\"Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})\".format(\n self.get_dof(), path.get_dof()\n ))\n ps = path.evald(gridpoints / scaling) / scaling\n pss = path.evaldd(gridpoints / scaling) / scaling ** 2\n N = gridpoints.shape[0] - 1\n dof = path.get_dof()\n I_dof = np.eye(dof)\n F = np.zeros((dof * 2, dof))\n g = np.zeros(dof * 2)\n ubound = np.zeros((N + 1, 2))\n g[0:dof] = self.alim[:, 1]\n g[dof:] = - self.alim[:, 0]\n F[0:dof, :] = I_dof\n F[dof:, :] = -I_dof\n if self.discretization_type == DiscretizationType.Collocation:\n return ps, pss, np.zeros_like(ps), F, g, None, None\n elif self.discretization_type == DiscretizationType.Interpolation:\n return canlinear_colloc_to_interpolate(ps, pss, np.zeros_like(ps), F, g, None, None,\n gridpoints, identical=True)\n else:\n raise NotImplementedError(\"Other form of discretization not supported!\")\n\n","sub_path":"toppra/constraint/joint_acceleration.py","file_name":"joint_acceleration.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23844148","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# This network looks much like a two-layer feedforward neural network,\n# with a few twists: first, the same weights and bias terms are shared\n# by both layers, and second, we feed inputs at each layer, and we get\n# outputs from each layer. To run the model, we need to feed it the\n# inputs at both time steps, like so:\n\nn_inputs = 3\nn_neurons = 5\n\nX0 = tf.placeholder(tf.float32, [None, n_inputs])\nX1 = tf.placeholder(tf.float32, [None, n_inputs])\n\nWx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))\nWy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))\nb = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))\n\nY0 = tf.tanh(tf.matmul(X0, Wx) + b)\nY1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b)\n\ninit = tf.global_variables_initializer()\n\n\n# This mini-batch contains four instances, each with an input sequence\n# composed of exactly two inputs. At the end, Y0_val and Y1_val contain\n# the outputs of the network at both time steps for all neurons and all\n# instances in the mini-batch:\n# Mini-batch: instance 0,instance 1,instance 2,instance 3\nX0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0\nX1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1\n\nwith tf.Session() as sess:\n init.run()\n Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})\n\n\nprint(Y0_val)\n\n# That wasn’t too hard, but of course if you want to be able to run an RNN\n# over 100 time steps, the graph is going to be pretty big. Now let’s look\n# at how to create the same model using TensorFlow’s RNN operations.\n\n","sub_path":"bin/run_13.py","file_name":"run_13.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273814773","text":"# ========================================================================\n# Copyright (c) 2015 The University of Washington\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ========================================================================\n#\n\n#\n# IAM messaging tools - encryption and signature methods\n#\n# \n\n# crypto class covers for openssl\nimport M2Crypto\nfrom M2Crypto import BIO, RSA, EVP, X509\n\nimport json\nimport uuid\nimport datetime\nimport dateutil.parser\nimport base64\nimport string\nimport time\nimport re\nimport os.path\nfrom sys import exit\nimport signal\nimport importlib\n\nimport urllib3\n\nimport threading\n\nfrom .exceptions import SignatureVerifyException\nfrom .exceptions import CryptKeyException\nfrom .exceptions import SigningCertException\n\n# ----- global vars (to this module) ------------------\n\n# decryption keys\n_crypt_keys = {}\n\n# public keys used for sig verify\n_public_keys = {}\n\n# private keys used for sig sign\n_private_keys = {}\n\n# ca certificate file\n_ca_file = None\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n#\n# -------------------------------------\n#\n\n#\n# accumulate header fields for signature\n#\ndef _build_sig_msg(header, txt):\n sigmsg = header['contentType'] + '\\n'\n if 'keyId' in header:\n sigmsg = sigmsg + header['iv'] + '\\n' + header['keyId'] + '\\n'\n sigmsg = sigmsg + header['messageContext'] + '\\n' + header['messageId'] + '\\n' + \\\n header['messageType'] + '\\n' + header['sender'] + '\\n' + \\\n header['signingCertUrl'] + '\\n' + header['timestamp'] + '\\n' + header['version'] + '\\n' + \\\n txt + '\\n'\n return sigmsg.encode('ascii')\n\n#\n# create a signed (and encrypted) iam message\n#\n# msg is anything\n# context is string\n\ndef encode_message(msg, context, cryptid, signid):\n \n iamHeader = {}\n iamHeader['contentType'] = 'json'\n iamHeader['version'] = 'UWIT-1'\n iamHeader['messageType'] = 'iam-test'\n u = uuid.uuid4()\n iamHeader['messageId'] = str(u)\n iamHeader['messageContext'] = base64.b64encode(context)\n iamHeader['sender'] = 'iam-msg'\n\n iamHeader['timestamp'] = datetime.datetime.utcnow().isoformat()\n if signid not in _private_keys:\n raise SigningCertException(keyid=signid, msg='not found')\n iamHeader['signingCertUrl'] = _private_keys[signid]['url']\n\n if cryptid!=None:\n if cryptid not in _crypt_keys:\n raise CryptKeyException(keyid=cryptid, msg='not found')\n iamHeader['keyId'] = cryptid\n iv = os.urandom(16)\n iamHeader['iv'] = base64.b64encode(iv)\n cipher = M2Crypto.EVP.Cipher(alg='aes_128_cbc', key=_crypt_keys[cryptid], iv=iv, op=1)\n txt = cipher.update(msg) + cipher.final()\n enctxt64 = base64.b64encode(txt)\n else:\n enctxt64 = base64.b64encode(msg)\n \n # gen the signature\n sigmsg = _build_sig_msg(iamHeader, enctxt64)\n\n key = _private_keys[signid]['key']\n key.reset_context(md='sha1')\n key.sign_init()\n key.sign_update(sigmsg)\n sig = key.sign_final()\n sig64 = base64.b64encode(sig)\n iamHeader['signature'] = sig64\n\n body = {}\n body['Message'] = enctxt64\n \n iamMessage = {}\n iamMessage['header'] = iamHeader\n iamMessage['body'] = enctxt64\n\n m64 = base64.b64encode(json.dumps(iamMessage))\n return m64\n \n#\n# receive a signed (and encrypted) iam message\n#\n\ndef decode_message(b64msg):\n global _crypt_keys \n global _public_keys \n global _ca_file \n\n # get the iam message\n msgstr = base64.b64decode(b64msg).encode('utf8','ignore')\n iam_message = json.loads(msgstr)\n\n\n if 'header' not in iam_message: \n logging.info('not an iam message')\n return None\n iamHeader = iam_message['header']\n\n try:\n # check the version\n if iamHeader['version'] != 'UWIT-1':\n logging.error('unknown version: ' + iamHeader['version'])\n return None\n\n # the signing cert should be cached most of the time\n certurl = iamHeader['signingCertUrl']\n if not certurl in _public_keys:\n logging.info('Fetching signing cert: ' + certurl)\n pem = ''\n\n if certurl.startswith('file:'):\n with open(certurl[5:], 'r') as f:\n pem = f.read()\n\n elif certurl.startswith('http'):\n if _ca_file != None:\n http = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED', # Force certificate check.\n ca_certs=_ca_file, \n )\n else:\n http = urllib3.PoolManager()\n certdoc = http.request('GET', certurl)\n if certdoc.status != 200:\n logger.error('sws cert get failed: ' + certdoc.status)\n raise SigningCertException(url=certurl, status=certdoc.status)\n logger.debug('got it')\n pem = certdoc.data\n else:\n raise SigningCertException(url=certurl, status=-1)\n\n x509 = X509.load_cert_string(pem)\n key = x509.get_pubkey()\n _public_keys[certurl] = key\n\n enctxt64 = iam_message['body']\n\n # check the signature\n\n sigmsg = _build_sig_msg(iamHeader, enctxt64)\n sig = base64.b64decode(iamHeader['signature'])\n pubkey = _public_keys[certurl]\n pubkey.reset_context(md='sha1')\n pubkey.verify_init()\n pubkey.verify_update(sigmsg)\n if pubkey.verify_final(sig)!=1:\n raise SignatureVerifyException()\n\n # decrypt the message\n if 'keyId' in iamHeader:\n iv64 = iamHeader['iv']\n iv = base64.b64decode(iv64)\n keyid = iamHeader['keyId']\n if not keyid in _crypt_keys:\n logger.error('key ' + keyid + ' not found')\n raise CryptKeyException(keyid=keyid, msg='not found')\n key = _crypt_keys[keyid]\n \n enctxt = base64.b64decode(enctxt64)\n cipher = M2Crypto.EVP.Cipher(alg='aes_128_cbc', key=key, iv=iv, op=0)\n txt = cipher.update(enctxt) + cipher.final()\n else:\n txt = base64.b64decode(enctxt64)\n\n txt = [x for x in txt if x in string.printable]\n iam_message['body'] = txt\n # un-base64 the context\n try:\n iamHeader['messageContext'] = base64.b64decode(iamHeader['messageContext'])\n except TypeError:\n logger.info( 'context not base64')\n return None\n except KeyError:\n if 'AlarmName' in iam_message:\n logger.debug('alarm: ' + iam_message['AlarmName'])\n return iam_message\n\n logger.error('Unknown message key: ' )\n return None\n\n return iam_message\n\n\ndef crypt_init(cfg):\n global _crypt_keys\n global _public_keys\n global _ca_file\n\n # load the signing keys\n certs = cfg['CERTS']\n for c in certs:\n id = c['ID']\n crt = {}\n crt['url'] = c['URL']\n crt['key'] = EVP.load_key(c['KEYFILE'])\n _private_keys[id] = crt\n\n\n # load the cryption key\n keys = cfg['CRYPTS']\n for k in keys:\n id = k['ID']\n k64 = k['KEY']\n logger.debug('adding crypt key ' + id)\n kbin = base64.b64decode(k64)\n _crypt_keys[id] = kbin\n\n # are we verifying certs ( just for the signing cert )\n if 'ca_file' in cfg:\n _ca_file = cfg['CA_FILE']\n \n","sub_path":"messagetools.old/iam_message.py","file_name":"iam_message.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612702877","text":"from google.cloud import vision\nfrom google.cloud.vision import types\nfrom PIL import Image, ImageDraw\n\nimport dialogue\n\nformer_status = \"\"\n\ndef get_face(input_filename):\n with open(input_filename, 'rb') as image:\n faces = detect_face(image)\n if not faces:\n dialogue.play_audio(\"nobody\")\n return {'status': 'nobody', 'dialogue': dialogue.get_dialogue(\"nobody\")}\n image.seek(0)\n face_status = highlight_faces(image, faces)\n return face_status\n\ndef detect_face(face_file, max_results=4):\n client = vision.ImageAnnotatorClient()\n content = face_file.read()\n image = types.Image(content=content)\n return client.face_detection(image=image).face_annotations\n\ndef highlight_faces(image, faces):\n if len(faces) == 1:\n box = None\n left_eye = None\n right_eye = None\n nose_tip = None\n joyLikelihood = None\n for face in faces:\n left_eye = face.landmarks[0].position\n right_eye = face.landmarks[1].position\n nose_tip = face.landmarks[7].position\n joyLikelihood = face.joy_likelihood\n box = [(vertex.x, vertex.y) for vertex in face.bounding_poly.vertices]\n return check_face_loc_lonely(box,left_eye,right_eye,nose_tip,joyLikelihood)\n else:\n box = []\n left_eye = []\n right_eye = []\n nose_tip = []\n joyLikelihood = []\n for value in faces:\n left_eye.append(value.landmarks[0].position)\n right_eye.append(value.landmarks[1].position)\n nose_tip.append(value.landmarks[7].position)\n joyLikelihood.append(value.joy_likelihood)\n box.append([(vertex.x, vertex.y) for vertex in value.bounding_poly.vertices])\n return check_face_loc(box,left_eye,right_eye,nose_tip,joyLikelihood)\n\ndef check_face_loc_lonely(face_box,left_eye,right_eye,nose_tip,joyLikelihood):\n global former_status\n print(former_status)\n if (face_box[0][0]-face_box[1][0])*(face_box[1][1]-face_box[2][1]) < 150 * 150 :\n status = \"forward\"\n result = play_audio(status)\n return result #顔はもう少し上に\n if (face_box[0][0]-face_box[1][0])*(face_box[1][1]-face_box[2][1]) > 600 * 600 :\n status = \"back\"\n result = play_audio(status)\n return result\n if(face_box[0][0] > 1024*1/2) :\n status = \"right\"\n result = play_audio(status)\n return result #被写体は右に\n if(face_box[1][0] < 1024*1/2) :\n status = \"left\"\n result = play_audio(status)\n return result #被写体は左に\n if face_box[0][1] > 768*1/2 :\n status = \"forward\"\n result = play_audio(status)\n return result #顔はもう少し上に\n if face_box[3][1] < 768*1/2 :\n status = \"back\"\n result = play_audio(status)\n return result #顔はもう少し下に\n if(joyLikelihood == 1) :\n status = \"smile\"\n result = play_audio(status)\n return result #顔はもう少し下に\n result = play_audio(\"ok\")\n former_status = \"\"\n return result\n\ndef check_face_loc(face_boxes,left_eyes,right_eyes,nose_tips,joyLikelihoods):\n global former_status\n print(\"2\")\n for face_box in face_boxes:\n print(face_box)\n if(face_box[0][0] > 1024):\n status = \"center\"\n result = play_audio(status)\n return result\n if(face_box[1][0] < 0):\n status = \"center\"\n result = play_audio(status)\n return result\n if face_box[0][1] > 768:\n status = \"forwards\"\n result = play_audio(status)\n return result\n if face_box[3][1] < 0:\n status = \"backs\"\n result = play_audio(status)\n return result\n for joyLikelihood in joyLikelihoods:\n if(joyLikelihood == 1):\n status = \"smiles\"\n result = play_audio(status)\n return result\n result = play_audio(\"ok\")\n former_status = \"\"\n return result\n\ndef play_audio(status):\n global former_status\n if former_status == status:\n status += \" again\"\n else:\n former_status = status\n text = dialogue.get_dialogue(status)\n dialogue.play_audio(status)\n \n return {\"status\": status, \"dialogue\": text}\n","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514068954","text":"import Tool\nimport SMO\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport KernelTransform\n'''\ncalculate w and draw the picture,\nthe variable which the α not equal zero , \nwe call support vector\n'''\ndef calculateW(alphas , data , labels):\n x = np.mat(data)\n label = np.mat(labels).transpose()\n m , n = np.shape(x)\n w = np.zeros((n , 1))\n for i in range(m):\n w += np.multiply(alphas[i] * label[i] , x[i , :].T)\n return w\n pass\n\nif __name__ == '__main__':\n data, label = Tool.loadDataSet('../Data/testSet.txt')\n b,alphas = SMO.smo(data , label , kernel=False)\n w = calculateW(alphas , data , label)\n x = np.arange(0 , 11)\n print(w)\n y = (-b - w[0]*x)/w[1]\n Tool.drawDataset(data , label , x , y.tolist()[0] , line=True , alphas=alphas)\n\n data, label = Tool.loadDataSet('../Data/testSetRBF.txt')\n b, alphas = SMO.smo(data, label,kernel=True ,maxIter=100)\n svInd = np.nonzero(alphas.A > 0)[0]\n Tool.drawDataset(data, label, line=False, alphas=alphas)\n\n\n\n\n\n\n","sub_path":"MachineLearning/Support Vector Machine/SVM_Model.py","file_name":"SVM_Model.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406106748","text":"import webbrowser\nimport pandas as pd\nimport importlib as lib\nimport copy\nimport re\n\nofflib = lib.import_module('offlinelib')\nonlib = lib.import_module('onlinelib')\n\nratings = pd.read_csv('new_ratings.csv', index_col=0)\nbooks = pd.read_csv('BX-Books.csv', error_bad_lines=False, encoding=\"ISO-8859-1\", sep=';', warn_bad_lines=False, low_memory=False)\n\nsimil =pd.read_csv('itemsimilarity.csv', sep = '\\t', header=None)\nsimil.columns = ['ISBN1', 'ISBN2', 'Similarity']\n\nsim = offlib.ratings_matrix_user(simil)\ndic_item = offlib.ratings_matrix_item(ratings)\ndic_user = offlib.ratings_matrix_user(ratings)\n\nq = input(\"Type of Recommender System:\\n A) Item-Based \\n B) User-Based\\n\")\n\n\ndef online(sim_mat, dic, dic_user, t, df_books, q):\n \"\"\"Online procedure for recommendation. Takes in input the similarity dictionary, the Item-User dictionary, the\n User-Item dictionary, a threshold and the books' data frame and the procedure.\"\"\"\n n = int(input('How many books do you want? '))\n print('\\n')\n print(\"Loading your Recommendation...\\n\")\n rat_user = pd.read_csv('new_userXX2.csv', sep=';',dtype={'ITEM': object})\n dic_rat_user = offlib.ratings_matrix_user(rat_user)\n if q == 'A':\n dic_tot = {}\n items = list(dic_rat_user[0].keys())\n for item in items:\n ds = onlib.n_sim(sim_mat, item, t, q)\n for k, v in ds.items():\n if k not in dic_tot.keys():\n dic_tot[k] = v\n elif k in dic_tot.keys() and v < dic_tot[k]:\n pass\n else:\n dic_tot[k] = v\n nest_dic = onlib.nested_dict(dic_tot)\n new_dic_user = copy.deepcopy(dic_user)\n new_dic_user.update(dic_rat_user)\n prev = onlib.prevision(sim_mat, dic, nest_dic, new_dic_user, t, q)\n item_rec = sorted(prev.keys(), key=lambda x: prev[x][0], reverse=True)\n else:\n new_dic_user = copy.deepcopy(dic_user)\n new_dic_user.update(dic_rat_user)\n sim_nu = onlib.similarity_user_online(new_dic_user)\n if sim_nu == {}:\n print('Sorry no match found!')\n else:\n newu = 0\n ds = onlib.n_sim(sim_nu, newu, t, q)\n us_sim = ds.keys()\n l_item = []\n for u in us_sim:\n if l_item == []:\n l_item = list(new_dic_user[u].keys())\n else:\n l_item += list(new_dic_user[u].keys())\n user_zero = {}\n k = 0\n for item in l_item:\n if user_zero == {}:\n user_zero[0] = {}\n user_zero[0][item] = 0\n else:\n user_zero[0][item] = 0\n for item in list(user_zero[0].keys()):\n prev = 0\n numer = 0\n denom = 0\n for user in us_sim:\n if user in list(dic_item[item].keys()):\n numer += ds[user] * dic_item[item][user]\n denom += ds[user]\n else:\n pass\n prev = numer / denom\n user_zero[0][item] = prev\n item_rec = sorted(user_zero[0].keys(), key=lambda x: user_zero[0][x], reverse=True)\n k = 0\n print('\\n')\n for item in item_rec:\n if item in list(df_books['ISBN'].values):\n print(k + 1, ') ', df_books[df_books['ISBN'] == item]['Book-Title'].values[0], ' - ',\n df_books[df_books['ISBN'] == item]['Book-Author'].values[0], ' - ',\n item)\n else:\n print(k + 1, ') Sorry! No information for this book: ', item)\n k += 1\n if k == n:\n break\n if k < n:\n print(\"\\n\")\n print(\"Sorry! I don't have enough information to suggest you %d items.\" % n)\n print('\\n')\n print(\n \"Would you like to:\\n A) Buy a book.\\n B) Get informations on the book's author.\\n C) Get the plot.\\n D) Exit\")\n q1 = input()\n if q1 == 'A':\n q2 = input('Please insert the ISBN: ')\n webbrowser.open(\"https://www.bookgoodies.com/a/%s\" % q2)\n elif q1 == 'B':\n q2 = input('Please insert the ISBN: ')\n author = df_books[df_books['ISBN'] == q2]['Book-Author'].values[0]\n author = author.lower()\n author = author.title()\n author = author.replace(' ', '_')\n webbrowser.open(\"https://en.wikipedia.org/wiki/%s\" % author)\n elif q1 == 'C':\n q2 = input('Please insert the ISBN: ')\n name = df_books[df_books['ISBN'] == q2]['Book-Title'].values[0]\n sep1 = '('\n sep2 = ':'\n name = name.split(sep1, 1)[0]\n name = name.split(sep2, 1)[0]\n name = name.lower()\n name = name.replace('&', \"&\")\n name = re.sub(r\"\\b(?<!')[a-z]\", lambda m: m.group().upper(), name)\n name = name.replace('\\\\', '')\n name = name.replace('\"', \"\")\n name = name.replace(\" \", \"_\")\n webbrowser.open(\"https://en.wikipedia.org/wiki/%s#Plot\" % name)\n return\n\n\n\nonline(sim, dic_item, dic_user, 5, books, q)\n","sub_path":"pers.py","file_name":"pers.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428895281","text":"# test driver for the wiki scrape system\n\ndef test_basic():\n\timport Wiki.wiki_graph as glib\n\n\tg = glib.HashGraph()\n\tg.add(\"alpha\", [\"beta\", \"delta\", \"gamma\"])\n\tg.add(\"beta\", [\"beta\", \"delta\", \"alpha\"])\n\tg.add(\"delta\", [\"alpha\"])\n\tg.add(\"gamma\", [\"alpha\", \"beta\", \"delta\"])\n\tg.save(\"Wiki/\")\n\n\tg.print_ht()\n\tprint(\"\")\n\tg.print_g()\n\tprint(\"\")\n\n\tg0 = glib.HashGraph()\n\tg0.load(\"Wiki/\")\n\n\tg0.print_ht()\n\tprint(\"\")\n\tg0.print_g()\n\ndef test_hash():\n\timport Wiki.wiki_graph as glib\n\tg = glib.HashGraph()\n\n\tstrs = [\"math\",\n\t\t\t\"maths\",\n\t\t\t\"MATH\",\n\t\t\t\"mlth\"\n\t\t\t]\n\n\tfor s in strs:\n\t\tprint(g.hash(s))\n\ndef test_mine():\n\timport Wiki.wiki as w\n\n\tminer = w.WikiMiner()\n\n\tminer.parse(\"Mathematics\")\n\tminer.parse(\"Graph_theory\")\n\tminer.graph.print_ht()\n\tminer.graph.print_g()\n\n\tminer.save()\n\n# test the convention that nodes added to the graph with\n# also test new seed mechanic \ndef test_processed_mechanic():\n\timport Wiki.wiki as w\n\n\tminer = w.WikiMiner()\n\n\tminer.seed()\n\tminer.parse(\"Mathematics\")\n\tminer.graph.add(\"Graph_theory\", None) # over write the entry to indicate non local\n\tminer.graph.print_processed()\n\n\tminer.save()\n\n# Returns if passed or not\ndef test_rand():\n\timport Wiki.wiki as w\n\tminer = w.WikiMiner()\n\n\tfor i in range(0,100):\n\t\tprint(i)\n\t\tminer.seed()\n\n\tprint(\"Saving and Loading\")\n\tminer.save()\n\tminer0 = w.WikiMiner()\n\n\t# now check that miner and miner0 have the same dicts\n\tprint(\"Checking the Hash Table\")\n\tfor k in miner.graph.hash_table.keys():\n\t\tif k not in miner0.graph.hash_table.keys():\n\t\t\tprint(\"key not found: \" + k)\n\t\t\treturn False\n\n\t\tv1 = miner.graph.hash_table[k]\n\t\tv2 = miner0.graph.hash_table[k]\n\n\t\tif not v1 == v2:\n\t\t\tprint(\"Values do not match\")\n\t\t\treturn -1 \n\n\tprint(\"Checking the Graph\")\n\tfor k in miner.graph.graph.keys():\n\t\tif k not in miner0.graph.graph.keys():\n\t\t\tprint(\"key not found: \" + k)\n\t\t\treturn False\n\n\t\tv1 = miner.graph.graph[k]\n\t\tv2 = miner0.graph.graph[k]\n\n\t\tif not v1 == v2:\n\t\t\tprint(\"Values do not match\")\n\t\t\treturn -1 \n\n\treturn True\n\n# test the function get boundry\ndef test_bd():\n\timport Wiki.wiki_graph as glib\n\tg = glib.HashGraph()\n\n\tg.add(\"A\", [\"B\", \"C\"])\n\tg.add(\"B\", [\"A\", \"C\", \"D\"])\n\tg.add(\"C\", [\"A\", \"B\"])\n\tg.add(\"D\", None)\n\n\tprint(g.get_boundry()) # should be B\n\t","sub_path":"Wiki/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"35072025","text":"def binary_search(value_list: list, value_search: int):\n # Get list size\n list_size = len(value_list)\n # Lowest index in the list [Initial]\n index_low = 0\n # Highest index in the list [Initial]\n index_high = list_size - 1\n\n # Will be used as validator if value exists\n value_found = False\n\n # While lowest index does not exceed highest index\n while index_low <= index_high:\n # Find the middle index of the list\n # Add the lowest index and highest index, then floor divide into 2\n index_middle = (index_high + index_low) // 2\n # If value searching is greater than the middle index\n if value_search > value_list[index_middle]:\n # New lowest index is the higher half of the list\n index_low = index_middle + 1\n # If value searching is less than the middle index\n elif value_search < value_list[index_middle]:\n # New highest index us the highest half of the list\n index_high = index_middle -1\n # If value found\n elif value_search == value_list[index_middle]:\n # Change value of value_found\n value_found = f\"Value found at index {index_middle}\"\n # Stops the loop\n break\n\n # Check if value exists\n # If value exists, display the text and index\n # If value does not exists, display value not found\n print(\"Value not found\" if value_found == False else value_found)\n\n\nbinary_search([1,2,3,4,6,7,8], 8)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"31439229","text":"#!/usr/bin/env python3\n\"\"\"\nThis application is designed to decode timestamps into human-readable date/times and vice-versa\nAdditional information regarding the source of the timestamp formats and associated equations\nwill be provided in the docstrings below.\nTO DO:\n Re-evaluate error handling.\n MSDOS and FAT timestamps both need method for accepting time offset\n\nGPS Ref: http://www.leapsecond.com/java/gpsclock.htm\nLeap Seconds: https://www.nist.gov/pml/time-and-frequency-division/leap-seconds-faqs\n http://hpiers.obspm.fr/eop-pc/index.php?index=TAI-UTC_tab&lang=en\nMicrosoft DateTime: https://docs.microsoft.com/en-us/dotnet/api/system.datetime?view=netframework-4.8\nMicrosoft Time: https://docs.microsoft.com/en-ca/windows/win32/sysinfo/time\nMicrosoft 1904 Timestamp: https://docs.microsoft.com/en-us/office/troubleshoot/excel/1900-and-1904-date-system\nMicrosoft OLE Automation Date (OADate): https://docs.microsoft.com/en-us/dotnet/api/system.datetime.tooadate?view=netframework-4.8\nMSDOS wFatDate wFatTime DosDate: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime\nMicrosoft FILETIME: https://support.microsoft.com/en-ca/help/188768/info-working-with-the-filetime-structure\nMicrosoft Active Directory/LDAP Timestamp: https://docs.microsoft.com/en-us/windows/win32/adschema/a-lastlogontimestamp\nbplist timestamp: https://developer.apple.com/documentation/corefoundation/cfabsolutetime\n https://developer.apple.com/documentation/foundation/nsdate\nGSM Timestamps: https://en.wikipedia.org/wiki/GSM_03.40\n http://seven-bit-forensics.blogspot.com/2014/02/decoding-gsmsms-timestamps.html\nVMWare Snapshot timestamp: https://stuffphilwrites.com/2013/03/vmware-snapshot-forensics/\n\"\"\"\nfrom datetime import datetime as dt, timedelta\nimport struct\nfrom binascii import hexlify, unhexlify\nfrom string import hexdigits\nimport argparse\nimport sys\nfrom dateutil import parser as duparser\nimport base64\nfrom colorama import init\ninit(autoreset=True)\n\n__author__ = 'Corey Forman'\n__date__ = '7 Dec 2020'\n__version__ = '2.5'\n__description__ = 'Python 3 CLI Date Time Conversion Tool'\n\nclass TimeDecoder(object):\n \"\"\"Run the decoding class\"\"\"\n def __init__(self):\n self.epoch_1601 = dt(1601, 1, 1)\n self.epoch_1899 = dt(1899, 12, 30)\n self.epoch_1904 = dt(1904, 1, 1)\n self.epoch_1970 = dt(1970, 1, 1)\n self.epoch_1980 = dt(1980, 1, 6)\n self.epoch_2000 = dt(2000, 1, 1)\n self.epoch_2001 = dt(2001, 1, 1)\n self.hundreds_nano = 10000000\n self.nano_2001 = 1000000000\n self.epoch_active = 116444736000000000\n self.hfs_dec_subtract = 2082844800\n self.ts_funcs = [self.from_unix_sec, self.from_unix_milli, self.from_win_64_hex, self.from_win_64_hexle, self.from_chrome, self.from_ad,\n self.from_unix_hex_32be, self.from_unix_hex_32le, self.from_cookie, self.from_ole_be, self.from_ole_le, self.from_mac,\n self.from_hfs_dec, self.from_hfs_be, self.from_hfs_le, self.from_msdos, self.from_fat, self.from_systime, self.from_filetime,\n self.from_prtime, self.from_ole_auto, self.from_ms1904, self.from_ios_time, self.from_sym_time, self.from_gps_time,\n self.from_eitime, self.from_bplist, self.from_gsm, self.from_vm]\n self.date_funcs = [self.to_unix_sec, self.to_unix_milli, self.to_win_64_hex, self.to_win_64_hexle, self.to_chrome, self.to_ad, self.to_unix_hex_32be,\n self.to_unix_hex_32le, self.to_cookie, self.to_ole_be, self.to_ole_le, self.to_mac, self.to_hfs_dec, self.to_hfs_be, self.to_hfs_le,\n self.to_msdos, self.to_fat, self.to_systime, self.to_filetime, self.to_prtime, self.to_ole_auto, self.to_ms1904, self.to_ios_time,\n self.to_sym_time, self.to_gps_time, self.to_eitime, self.to_bplist, self.to_gsm, self.to_vm]\n self.in_unix_sec = self.in_unix_milli = self.in_windows_hex_64 = self.in_windows_hex_le = self.in_chrome = self.in_ad = self.in_unix_hex_32 = self.in_unix_hex_32le = self.in_cookie = self.in_ole_be = self.in_ole_le = self.in_mac = self.in_hfs_dec = self.in_hfs_be = self.in_hfs_le = self.in_fat = self.in_msdos = self.in_systemtime = self.in_filetime = self.in_prtime = self.in_ole_auto = self.in_ms1904 = self.in_iostime = self.in_symtime = self.in_gpstime = self.in_eitime = self.in_bplist = self.in_gsm = self.in_vm = None\n self.out_unix_sec = self.out_unix_milli = self.out_windows_hex_64 = self.out_windows_hex_le = self.out_chrome = self.out_adtime = self.out_unix_hex_32 = self.out_unix_hex_32le = self.out_cookie = self.out_ole_be = self.out_ole_le = self.out_mac = self.out_hfs_dec = self.out_hfs_be = self.out_hfs_le = self.out_fat = self.out_msdos = self.out_systemtime = self.out_filetime = self.out_prtime = self.out_ole_auto = self.out_ms1904 = self.out_iostime = self.out_symtime = self.out_gpstime = self.out_eitime = self.out_bplist = self.out_gsm = self.out_vm = None\n self.leapseconds = {\n 10:[dt(1972,1,1), dt(1972,7,1)],\n 11:[dt(1972,7,1), dt(1973,1,1)],\n 12:[dt(1973,1,1), dt(1974,1,1)],\n 13:[dt(1974,1,1), dt(1975,1,1)],\n 14:[dt(1975,1,1), dt(1976,1,1)],\n 15:[dt(1976,1,1), dt(1977,1,1)],\n 16:[dt(1977,1,1), dt(1978,1,1)],\n 17:[dt(1978,1,1), dt(1979,1,1)],\n 18:[dt(1979,1,1), dt(1980,1,1)],\n 19:[dt(1980,1,1), dt(1981,7,1)],\n 20:[dt(1981,7,1), dt(1982,7,1)],\n 21:[dt(1982,7,1), dt(1983,7,1)],\n 22:[dt(1983,7,1), dt(1985,7,1)],\n 23:[dt(1985,7,1), dt(1988,1,1)],\n 24:[dt(1988,1,1), dt(1990,1,1)],\n 25:[dt(1990,1,1), dt(1991,1,1)],\n 26:[dt(1991,1,1), dt(1992,7,1)],\n 27:[dt(1992,7,1), dt(1993,7,1)],\n 28:[dt(1993,7,1), dt(1994,7,1)],\n 29:[dt(1994,7,1), dt(1996,1,1)],\n 30:[dt(1996,1,1), dt(1997,7,1)],\n 31:[dt(1997,7,1), dt(1999,1,1)],\n 32:[dt(1999,1,1), dt(2006,1,1)],\n 33:[dt(2006,1,1), dt(2009,1,1)],\n 34:[dt(2009,1,1), dt(2012,7,1)],\n 35:[dt(2012,7,1), dt(2015,7,1)],\n 36:[dt(2015,7,1), dt(2017,1,1)],\n 37:[dt(2017,1,1), dt.now() - timedelta(seconds=37)]\n }\n # There have been no further leapseconds since 2017,1,1 at the __date__ of this script\n # which is why the leapseconds end with a dt.now object to valid/relevant timestamp output.\n self.left_color = \"\\033[1;31m\"\n self.right_color = \"\\033[1;m\"\n\n def run(self):\n \"\"\"Process arguments and errors\"\"\"\n if len(sys.argv[1:]) == 0:\n arg_parse.print_usage()\n arg_parse.exit()\n try:\n if args.unix:\n result, indiv_output, combined_output, reason = self.from_unix_sec()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.umil:\n result, indiv_output, combined_output, reason = self.from_unix_milli()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.wh:\n result, indiv_output, combined_output, reason = self.from_win_64_hex()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.whle:\n result, indiv_output, combined_output, reason = self.from_win_64_hexle()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.chrome:\n result, indiv_output, combined_output, reason = self.from_chrome()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.active:\n result, indiv_output, combined_output, reason = self.from_ad()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.uhbe:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32be()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.uhle:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32le()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.cookie:\n result, indiv_output, combined_output, reason = self.from_cookie()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.oleb:\n result, indiv_output, combined_output, reason = self.from_ole_be()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.olel:\n result, indiv_output, combined_output, reason = self.from_ole_le()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.mac:\n result, indiv_output, combined_output, reason = self.from_mac()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.hfsdec:\n result, indiv_output, combined_output, reason = self.from_hfs_dec()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.hfsbe:\n result, indiv_output, combined_output, reason = self.from_hfs_be()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.hfsle:\n result, indiv_output, combined_output, reason = self.from_hfs_le()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.fat:\n result, indiv_output, combined_output, reason = self.from_fat()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.msdos:\n result, indiv_output, combined_output, reason = self.from_msdos()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.sys:\n result, indiv_output, combined_output, reason = self.from_systime()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.ft:\n result, indiv_output, combined_output, reason = self.from_filetime()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.pr:\n result, indiv_output, combined_output, reason = self.from_prtime()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.auto:\n result, indiv_output, combined_output, reason = self.from_ole_auto()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.ms1904:\n result, indiv_output, combined_output, reason = self.from_ms1904()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.ios:\n result, indiv_output, combined_output, reason = self.from_ios_time()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.sym:\n result, indiv_output, combined_output, reason = self.from_sym_time()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.gps:\n result, indiv_output, combined_output, reason = self.from_gps_time()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.eitime:\n result, indiv_output, combined_output, reason = self.from_eitime()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.bplist:\n result, indiv_output, combined_output, reason = self.from_bplist()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.gsm:\n result, indiv_output, combined_output, reason = self.from_gsm()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.vm:\n result, indiv_output, combined_output, reason = self.from_vm()\n if indiv_output == False:\n print(reason)\n else:\n print(indiv_output)\n elif args.timestamp:\n self.to_timestamps()\n elif args.guess:\n self.from_all()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n\n def to_timestamps(self):\n \"\"\"Convert provided date to all timestamps\"\"\"\n print ('\\nConverting Date: ' + timestamp + '\\n')\n for func in self.date_funcs:\n func()\n self.timestamp_output()\n\n def from_unix_sec(self):\n \"\"\"Convert Unix Seconds value to a date\"\"\"\n reason = \"[!] Unix seconds timestamp is 10 digits in length\"\n try:\n if not (len(unix) == 10) or not (unix.isdigit()):\n self.in_unix_sec = indiv_output = combined_output = False\n pass\n else:\n self.in_unix_sec = dt.utcfromtimestamp(float(unix)).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Unix Seconds: \" + self.in_unix_sec + \" UTC\")\n combined_output = str(\"\\033[1;31mUnix Seconds:\\t\\t\\t\" + self.in_unix_sec + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_unix_sec = indiv_output = combined_output = False\n return self.in_unix_sec, indiv_output, combined_output, reason\n\n def to_unix_sec(self):\n \"\"\"Convert date to a Unix Seconds value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_unix_sec = str(int((dt_obj - self.epoch_1970).total_seconds()) - int(dt_tz))\n ts_output = str(\"Unix Seconds:\\t\\t\\t\" + self.out_unix_sec)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_sec = ts_output = False\n return self.out_unix_sec, ts_output\n\n def from_unix_milli(self):\n \"\"\"Convert Unix Millisecond value to a date\"\"\"\n reason = \"[!] Unix milliseconds timestamp is 13 digits in length\"\n try:\n if not (len(umil) == 13) or not (umil.isdigit()):\n self.in_unix_milli = indiv_output = combined_output = False\n pass\n else:\n self.in_unix_milli = dt.utcfromtimestamp(float(umil) / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Unix Milliseconds: \" + self.in_unix_milli + \" UTC\")\n combined_output = str(\"\\033[1;31mUnix Milliseconds:\\t\\t\" + self.in_unix_milli + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_unix_milli = indiv_output = combined_output = False\n return self.in_unix_milli, indiv_output, combined_output, reason\n\n def to_unix_milli(self):\n \"\"\"Convert date to a Unix Millisecond value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_unix_milli = str((int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000))\n ts_output = str(\"Unix Milliseconds:\\t\\t\" + self.out_unix_milli)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_milli = ts_output = False\n return self.out_unix_milli, ts_output\n\n def from_win_64_hex(self):\n \"\"\"Convert a Windows 64 Hex Big-Endian value to a date\"\"\"\n reason = \"[!] Windows 64-bit Hex Big-Endian timestamp is 16 hex characters (8 bytes)\"\n try:\n if not (len(wh) == 16) or not (all(char in hexdigits for char in wh)):\n self.in_windows_hex_64 = indiv_output = combined_output = False\n pass\n else:\n base10_microseconds = int(wh, 16) / 10\n dt_obj = self.epoch_1601 + timedelta(microseconds=base10_microseconds)\n self.in_windows_hex_64 = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Windows 64-bit Hex BE: \" + self.in_windows_hex_64 + \" UTC\")\n combined_output = str(\"\\033[1;31mWindows 64-bit Hex BE:\\t\\t\" + self.in_windows_hex_64 + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_windows_hex_64 = indiv_output = combined_output = False\n return self.in_windows_hex_64, indiv_output, combined_output, reason\n\n def to_win_64_hex(self):\n \"\"\"Convert a date to a Windows 64 Hex Big-Endian value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n minus_epoch = dt_obj - self.epoch_1601\n calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)\n self.out_windows_hex_64 = str(hex(int(calculated_time)*10))[2:].zfill(16)\n ts_output = str(\"Windows 64-bit Hex BE:\\t\\t\" + self.out_windows_hex_64)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_windows_hex_64 = ts_output = False\n return self.out_windows_hex_64, ts_output\n\n def from_win_64_hexle(self):\n \"\"\"Convert a Windows 64 Hex Little-Endian value to a date\"\"\"\n reason = \"[!] Windows 64-bit Hex Little-Endian timestamp is 16 hex characters (8 bytes)\"\n try:\n if not (len(whle) == 16) or not (all(char in hexdigits for char in whle)):\n self.in_windows_hex_le = indiv_output = combined_output = False\n pass\n else:\n indiv_output = combined_output = False\n endianness_change, = struct.unpack(\"<Q\", unhexlify(whle))\n converted_time = endianness_change / 10\n try:\n dt_obj = self.epoch_1601 + timedelta(microseconds=converted_time)\n self.in_windows_hex_le = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Windows 64-bit Hex LE: \" + self.in_windows_hex_le + \" UTC\")\n combined_output = str(\"\\033[1;31mWindows 64-bit Hex LE:\\t\\t\" + self.in_windows_hex_le + \" UTC\\033[1;m\".format())\n except OverflowError:\n pass\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_windows_hex_le = indiv_output = combined_output = False\n return self.in_windows_hex_le, indiv_output, combined_output, reason\n\n def to_win_64_hexle(self):\n \"\"\"Convert a date to a Windows 64 Hex Little-Endian value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n minus_epoch = dt_obj - self.epoch_1601\n calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)\n self.out_windows_hex_le = str(struct.pack(\"<Q\", int(calculated_time*10)).hex())[2:].zfill(16)\n ts_output = str(\"Windows 64-bit Hex LE:\\t\\t\" + self.out_windows_hex_le)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_windows_hex_le = ts_output = False\n return self.out_windows_hex_le, ts_output\n\n def from_chrome(self):\n \"\"\"Convert a Chrome Timestamp/Webkit Value to a date\"\"\"\n reason = \"[!] Chrome/Webkit timestamp is 17 digits\"\n try:\n if not (len(chrome) == 17) or not (chrome.isdigit()):\n self.in_chrome = indiv_output = combined_output = False\n pass\n else:\n delta = timedelta(microseconds=int(chrome))\n converted_time = self.epoch_1601 + delta\n self.in_chrome = converted_time.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Google Chrome Time: \" + self.in_chrome + \" UTC\")\n combined_output = str(\"\\033[1;31mGoogle Chrome:\\t\\t\\t\" + self.in_chrome + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_chrome = indiv_output = combined_output = False\n return self.in_chrome, indiv_output, combined_output, reason\n\n def to_chrome(self):\n \"\"\"Convert a date to a Chrome Timestamp/Webkit value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n chrome_time = ((dt_obj - self.epoch_1601).total_seconds() - int(dt_tz))* 1000000\n self.out_chrome = str(int(chrome_time))\n ts_output = str(\"Google Chrome:\\t\\t\\t\" + self.out_chrome)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_chrome = ts_output = False\n return self.out_chrome, ts_output\n\n def from_ad(self):\n \"\"\"Convert an Active Directory/LDAP timestamp to a date\"\"\"\n reason = \"[!] Active Directory/LDAP timestamps are 18 digits\"\n try:\n if not (len(active) == 18) or not (active.isdigit()):\n self.in_ad = indiv_output = combined_output = False\n pass\n else:\n dt_obj = dt.utcfromtimestamp((float(active) - self.epoch_active) / self.hundreds_nano)\n self.in_ad = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Active Directory Timestamp: \" + self.in_ad + \" UTC\")\n combined_output = str(\"\\033[1;31mActive Directory/LDAP dt:\\t\" + self.in_ad + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_ad = indiv_output = combined_output = False\n return self.in_ad, indiv_output, combined_output, reason\n\n def to_ad(self):\n \"\"\"Convert a date to an Active Directory/LDAP timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n tz_shift = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))\n self.out_adtime = str(int(tz_shift * self.hundreds_nano + self.epoch_active))\n ts_output = str(\"Active Directory/LDAP dt:\\t\" + self.out_adtime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_adtime = ts_output = False\n return self.out_adtime, ts_output\n\n def from_unix_hex_32be(self):\n \"\"\"Convert a Unix Hex 32-bit Big-Endian timestamp to a date\"\"\"\n reason = \"[!] Unix Hex 32-bit Big-Endian timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(uhbe) == 8) or not (all(char in hexdigits for char in uhbe)):\n self.in_unix_hex_32 = indiv_output = combined_output = False\n pass\n else:\n to_dec = int(uhbe, 16)\n self.in_unix_hex_32 = dt.utcfromtimestamp(float(to_dec)).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Unix Hex 32-bit BE: \" + self.in_unix_hex_32 + \" UTC\")\n combined_output = str(\"\\033[1;31mUnix Hex 32-bit BE:\\t\\t\" + self.in_unix_hex_32 + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_unix_hex_32 = indiv_output = combined_output = False\n return self.in_unix_hex_32, indiv_output, combined_output, reason\n\n def to_unix_hex_32be(self):\n \"\"\"Convert a date to a Unix Hex 32-bit Big-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix_time = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))\n self.out_unix_hex_32 = str(struct.pack(\">L\", unix_time).hex())\n ts_output = str(\"Unix Hex 32-bit BE:\\t\\t\" + self.out_unix_hex_32)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_hex_32 = ts_output = False\n return self.out_unix_hex_32, ts_output\n\n def from_unix_hex_32le(self):\n \"\"\"Convert a Unix Hex 32-bit Little-Endian timestamp to a date\"\"\"\n reason = \"[!] Unix Hex 32-bit Little-Endian timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(uhle) == 8) or not (all(char in hexdigits for char in uhle)):\n self.in_unix_hex_32le = indiv_output = combined_output = False\n pass\n else:\n to_dec = struct.unpack(\"<L\", unhexlify(uhle))[0]\n self.in_unix_hex_32le = dt.utcfromtimestamp(float(to_dec)).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Unix Hex 32-bit LE: \" + self.in_unix_hex_32le + \" UTC\")\n combined_output = str(\"\\033[1;31mUnix Hex 32-bit LE:\\t\\t\" + self.in_unix_hex_32le + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_unix_hex_32le = indiv_output = combined_output = False\n return self.in_unix_hex_32le, indiv_output, combined_output, reason\n\n def to_unix_hex_32le(self):\n \"\"\"Convert a date to a Unix Hex 32-bit Little-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix_time = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))\n self.out_unix_hex_32le = str(struct.pack(\"<L\", unix_time).hex())\n ts_output = str(\"Unix Hex 32-bit LE:\\t\\t\" + self.out_unix_hex_32le)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_hex_32le = ts_output = False\n return self.out_unix_hex_32le, ts_output\n\n def from_cookie(self):\n \"\"\"Convert an Internet Explorer timestamp to a date\"\"\"\n reason = \"[!] Internet Explorer Cookie timestamps (txt cookies) consist of 2 integers values. Must be input with a comma between them.\"\n try:\n if not (\",\" in cookie) or not (cookie.split(\",\")[0].isdigit() and cookie.split(\",\")[1].isdigit()):\n self.in_cookie = indiv_output = combined_output = False\n pass\n else:\n low, high = [int(h, base=10) for h in cookie.split(',')]\n calc = 10**-7 * (high * 2**32 + low) - 11644473600\n dt_obj = dt.utcfromtimestamp(calc)\n self.in_cookie = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Windows Cookie Date: \" + self.in_cookie + \" UTC\")\n combined_output = str(\"\\033[1;31mWindows Cookie Date:\\t\\t\" + self.in_cookie + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_cookie = indiv_output = combined_output = False\n return self.in_cookie, indiv_output, combined_output, reason\n\n def to_cookie(self):\n \"\"\"Convert a date to Internet Explorer timestamp values\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))\n high = int(((unix + 11644473600) * 10**7) / 2**32)\n low = int((unix + 11644473600) * 10**7) - (high * 2**32)\n self.out_cookie = str(low) + ',' + str(high)\n ts_output = str(\"Windows Cookie Date:\\t\\t\" + self.out_cookie)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_cookie = ts_output = False\n return self.out_cookie, ts_output\n\n def from_ole_be(self):\n \"\"\"Convert an OLE Big-Endian timestamp to a date\"\"\"\n reason = \"[!] OLE Big-Endian timestamps are 16 hex characters (8 bytes)\"\n try:\n if not (len(oleb) == 16) or not (all(char in hexdigits for char in oleb)):\n self.in_ole_be = indiv_output = combined_output = False\n pass\n else:\n delta = struct.unpack('>d', struct.pack('>Q', int(oleb, 16)))[0]\n dt_obj = self.epoch_1899 + timedelta(days=delta)\n self.in_ole_be = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Windows OLE 64-bit double BE: \" + self.in_ole_be + \" UTC\")\n combined_output = str(\"\\033[1;31mWindows OLE 64-bit double BE:\\t\" + self.in_ole_be + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_ole_be = indiv_output = combined_output = False\n return self.in_ole_be, indiv_output, combined_output, reason\n\n def to_ole_be(self):\n \"\"\"Convert a date to an OLE Big-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n delta = ((dt_obj - self.epoch_1899).total_seconds() - int(dt_tz)) / 86400\n conv = struct.unpack('<Q', struct.pack('<d', delta))[0]\n self.out_ole_be = str(struct.pack('>Q', conv).hex())\n ts_output = str(\"Windows OLE 64-bit double BE:\\t\" + self.out_ole_be)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_ole_be = ts_output = False\n return self.out_ole_be, ts_output\n\n def from_ole_le(self):\n \"\"\"Convert an OLE Little-Endian timestamp to a date\"\"\"\n reason = \"[!] OLE Little-Endian timestamps are 16 hex characters (8 bytes)\"\n try:\n if not (len(olel) == 16) or not (all(char in hexdigits for char in olel)):\n self.in_ole_le = indiv_output = combined_output = False\n pass\n else:\n to_le = hexlify(struct.pack('<Q', int(olel, 16)))\n delta = struct.unpack('>d', struct.pack('>Q', int(to_le, 16)))[0]\n dt_obj = self.epoch_1899 + timedelta(days=delta)\n self.in_ole_le = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Windows OLE 64-bit double LE: \" + self.in_ole_le + \" UTC\")\n combined_output = str(\"\\033[1;31mWindows OLE 64-bit double LE:\\t\" + self.in_ole_le + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_ole_le = indiv_output = combined_output = False\n return self.in_ole_le, indiv_output, combined_output, reason\n\n def to_ole_le(self):\n \"\"\"Convert a date to an OLE Little-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n delta = ((dt_obj - self.epoch_1899).total_seconds() - int(dt_tz)) / 86400\n conv = struct.unpack('<Q', struct.pack('<d', delta))[0]\n self.out_ole_le = str(struct.pack('<Q', conv).hex())\n ts_output = str(\"Windows OLE 64-bit double LE:\\t\" + self.out_ole_le)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_ole_le = ts_output = False\n return self.out_ole_le, ts_output\n\n def from_mac(self):\n \"\"\"Convert a Mac Absolute timestamp to a date - Also used for Safari plist timestamps\"\"\"\n reason = \"[!] Mac Absolute timestamps are 9 digits, commonly followed by a decimal and up to 6 digits for milliseconds\"\n try:\n if not (\".\" in mac) or not ((len(mac.split(\".\")[0]) == 9) and (len(mac.split(\".\")[1]) in range(0,7))) or not (''.join(mac.split(\".\")).isdigit()):\n self.in_mac = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_2001 + timedelta(seconds=float(mac))\n self.in_mac = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Mac Absolute Time: \" + self.in_mac + \" UTC\")\n combined_output = str(\"\\033[1;31mMac Absolute Time:\\t\\t\" + self.in_mac + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_mac = indiv_output = combined_output = False\n return self.in_mac, indiv_output, combined_output, reason\n\n def to_mac(self):\n \"\"\"Convert a date to a Mac Absolute timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_mac = str(int((dt_obj - self.epoch_2001).total_seconds() - int(dt_tz)))\n ts_output = str(\"Mac Absolute Time:\\t\\t\" + self.out_mac)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_mac = ts_output = False\n return self.out_mac, ts_output\n\n def from_hfs_dec(self):\n \"\"\"Convert a Mac OS/HFS+ Decimal Timestamp to a date\"\"\"\n reason = \"[!] Mac OS/HFS+ Decimal timestamps are 10 digits\"\n try:\n if not (len(hfsdec) == 10) or not (hfsdec.isdigit()) or not (int(hfsdec) >= 2082844800):\n self.in_hfs_dec = indiv_output = combined_output = False\n pass\n else:\n self.in_hfs_dec = dt.utcfromtimestamp(float(int(hfsdec) - self.hfs_dec_subtract)).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Mac OS/HFS+ Decimal Date: \" + self.in_hfs_dec + \" UTC\")\n combined_output = str(\"\\033[1;31mMac OS/HFS+ Decimal Time:\\t\" + self.in_hfs_dec + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_hfs_dec = indiv_output = combined_output = False\n return self.in_hfs_dec, indiv_output, combined_output, reason\n\n def to_hfs_dec(self):\n \"\"\"Convert a date to a Mac OS/HFS+ Decimal Timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_hfs_dec = str(int((dt_obj - self.epoch_1904).total_seconds() - int(dt_tz)))\n ts_output = str(\"Mac OS/HFS+ Decimal Time:\\t\" + self.out_hfs_dec)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_hfs_dec = ts_output = False\n return self.out_hfs_dec, ts_output\n\n def from_hfs_be(self):\n \"\"\"Convert an HFS/HFS+ Big-Endian timestamp to a date (HFS+ is in UTC)\"\"\"\n reason = \"[!] HFS/HFS+ Big-Endian timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(hfsbe) == 8) or not (all(char in hexdigits for char in hfsbe)):\n self.in_hfs_be = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_1904 + timedelta(seconds=int(hfsbe, 16))\n self.in_hfs_be = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"HFS/HFS+ 32-bit Hex BE: \" + self.in_hfs_be + \" HFS Local / HFS+ UTC\")\n combined_output = str(\"\\033[1;31mHFS/HFS+ 32-bit Hex BE:\\t\\t\" + self.in_hfs_be + \" HFS Local / HFS+ UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_hfs_be = indiv_output = combined_output = False\n return self.in_hfs_be, indiv_output, combined_output, reason\n\n def to_hfs_be(self):\n \"\"\"Convert a date to an HFS/HFS+ Big-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n conv = int((dt_obj - self.epoch_1904).total_seconds() - int(dt_tz))\n self.out_hfs_be = '{0:08x}'.format(conv)\n ts_output = str(\"HFS/HFS+ 32-bit Hex BE:\\t\\t\" + self.out_hfs_be)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_hfs_be = ts_output = False\n return self.out_hfs_be, ts_output\n\n def from_hfs_le(self):\n \"\"\"Convert an HFS/HFS+ Little-Endian timestamp to a date (HFS+ is in UTC)\"\"\"\n reason = \"[!] HFS/HFS+ Little-Endian timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(hfsle) == 8) or not (all(char in hexdigits for char in hfsle)):\n self.in_hfs_le = indiv_output = combined_output = False\n pass\n else:\n to_le = struct.unpack('>I', struct.pack('<I', int(hfsle, 16)))[0]\n dt_obj = self.epoch_1904 + timedelta(seconds=to_le)\n self.in_hfs_le = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"HFS/HFS+ 32 big Hex LE: \" + self.in_hfs_le + \" HFS Local / HFS+ UTC\")\n combined_output = str(\"\\033[1;31mHFS/HFS+ 32-bit Hex LE:\\t\\t\" + self.in_hfs_le + \" HFS Local / HFS+ UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_hfs_le = indiv_output = combined_output = False\n return self.in_hfs_le, indiv_output, combined_output, reason\n\n def to_hfs_le(self):\n \"\"\"Convert a date to an HFS/HFS+ Little-Endian timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n conv = int((dt_obj - self.epoch_1904).total_seconds() - int(dt_tz))\n self.out_hfs_le = str(struct.pack('<I', conv).hex())\n ts_output = str(\"HFS/HFS+ 32-bit Hex LE:\\t\\t\" + self.out_hfs_le)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_hfs_le = ts_output = False\n return self.out_hfs_le, ts_output\n\n def from_fat(self):\n \"\"\"Convert an MS-DOS wFatDate wFatTime timestamp to a date\"\"\"\n reason = \"[!] MS-DOS wFatDate wFatTime timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(fat) == 8) or not (all(char in hexdigits for char in fat)):\n self.in_fat = indiv_output = combined_output = False\n pass\n else:\n byte_swap = [fat[i:i+2] for i in range(0, len(fat), 2)]\n to_le = byte_swap[1]+byte_swap[0]+byte_swap[3]+byte_swap[2]\n binary = '{0:032b}'.format(int(to_le, 16))\n stamp = [binary[:7], binary[7:11], binary[11:16], binary[16:21], binary[21:27], binary[27:32]]\n for binary in stamp[:]:\n dec = int(binary, 2)\n stamp.remove(binary)\n stamp.append(dec)\n fat_year = stamp[0] + 1980\n fat_month = stamp[1]\n fat_day = stamp[2]\n fat_hour = stamp[3]\n fat_min = stamp[4]\n fat_sec = stamp[5] * 2\n if not (fat_year in range(1970,2100)) or not (fat_month in range(1,13)) or not (fat_day in range(1,32)) or not (fat_hour in range(0,24)) or not (fat_min in range(0,60)) or not (fat_sec in range(0,60)):\n self.in_fat = indiv_output = combined_output = False\n else:\n dt_obj = dt(fat_year, fat_month, fat_day, fat_hour, fat_min, fat_sec)\n self.in_fat = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"FAT Date + Time: \" + self.in_fat + \" Local\")\n combined_output = str(\"\\033[1;31mFAT Date + Time:\\t\\t\" + self.in_fat + \" Local\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_fat = indiv_output = combined_output = False\n return self.in_fat, indiv_output, combined_output, reason\n\n def to_fat(self):\n \"\"\"Convert a date to an MS-DOS wFatDate wFatTime timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n year = '{0:07b}'.format(dt_obj.year - 1980)\n month = '{0:04b}'.format(dt_obj.month)\n day = '{0:05b}'.format(dt_obj.day)\n hour = '{0:05b}'.format(dt_obj.hour)\n minute = '{0:06b}'.format(dt_obj.minute)\n seconds = '{0:05b}'.format(int(dt_obj.second / 2))\n to_hex = str(struct.pack('>I', int(year + month + day + hour + minute + seconds, 2)).hex())\n byte_swap = ''.join([to_hex[i:i+2] for i in range(0, len(to_hex), 2)][::-1])\n self.out_fat = ''.join([byte_swap[i:i+4] for i in range(0, len(byte_swap), 4)][::-1])\n ts_output = str(\"FAT Date + Time:\\t\\t\" + self.out_fat)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_fat = ts_output = False\n return self.out_fat, ts_output\n\n def from_msdos(self):\n \"\"\"Convert an MS-DOS timestamp to a date\"\"\"\n reason = \"[!] MS-DOS 32-bit timestamps are 8 hex characters (4 bytes)\"\n try:\n if not (len(msdos) == 8) or not (all(char in hexdigits for char in msdos)):\n self.in_msdos = indiv_output = combined_output = False\n pass\n else:\n swap = ''.join([msdos[i:i+2] for i in range(0, len(msdos), 2)][::-1])\n binary = '{0:032b}'.format(int(swap, 16))\n stamp = [binary[:7], binary[7:11], binary[11:16], binary[16:21], binary[21:27], binary[27:32]]\n for val in stamp[:]:\n dec = int(val, 2)\n stamp.remove(val)\n stamp.append(dec)\n dos_year = stamp[0] + 1980\n dos_month = stamp[1]\n dos_day = stamp[2]\n dos_hour = stamp[3]\n dos_min = stamp[4]\n dos_sec = stamp[5] * 2\n if not (dos_year in range(1970,2100)) or not (dos_month in range(1,13)) or not (dos_day in range(1,32)) or not (dos_hour in range(0,24)) or not (dos_min in range(0,60)) or not (dos_sec in range(0,60)):\n self.in_msdos = indiv_output = combined_output = False\n else:\n dt_obj = dt(dos_year, dos_month, dos_day, dos_hour, dos_min, dos_sec)\n self.in_msdos = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"MS-DOS 32-bit Hex Value: \" + self.in_msdos + \" Local\")\n combined_output = str(\"\\033[1;31mMS-DOS 32-bit Hex Value:\\t\" + self.in_msdos + \" Local\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_msdos = indiv_output = combined_output = False\n return self.in_msdos, indiv_output, combined_output, reason\n\n def to_msdos(self):\n \"\"\"Convert a date to an MS-DOS timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n year = '{0:07b}'.format(dt_obj.year - 1980)\n month = '{0:04b}'.format(dt_obj.month)\n day = '{0:05b}'.format(dt_obj.day)\n hour = '{0:05b}'.format(dt_obj.hour)\n minute = '{0:06b}'.format(dt_obj.minute)\n seconds = '{0:05b}'.format(int(dt_obj.second / 2))\n hexval = str(struct.pack('>I', int(year + month + day + hour + minute + seconds, 2)).hex())\n self.out_msdos = ''.join([hexval[i:i+2] for i in range(0, len(hexval), 2)][::-1])\n ts_output = str(\"MS-DOS 32-bit Hex Value:\\t\" + self.out_msdos)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_msdos = ts_output = False\n return self.out_msdos, ts_output\n\n def from_systime(self):\n \"\"\"Convert a Microsoft 128-bit SYSTEMTIME timestamp to a date\"\"\"\n reason = \"[!] Microsoft 128-bit SYSTEMTIME timestamps are 32 hex characters (16 bytes)\"\n try:\n if not (len(systime) == 32) or not (all(char in hexdigits for char in systime)):\n self.in_systemtime = indiv_output = combined_output = False\n pass\n else:\n to_le = ''.join([systime[i:i+2] for i in range(0, len(systime), 2)][::-1])\n converted = [to_le[i:i + 4] for i in range(0, len(to_le), 4)][::-1]\n stamp = []\n for i in converted:\n dec = int(i, 16)\n stamp.append(dec)\n dt_obj = dt(stamp[0], stamp[1], stamp[3], stamp[4], stamp[5], stamp[6], stamp[7]*1000)\n self.in_systemtime = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Microsoft 128-bit SYSTEMTIME: \" + self.in_systemtime + \" UTC\")\n combined_output = str(\"\\033[1;31mMicrosoft 128-bit SYSTEMTIME:\\t\" + self.in_systemtime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_systemtime = indiv_output = combined_output = False\n return self.in_systemtime, indiv_output, combined_output, reason\n\n def to_systime(self):\n \"\"\"Convert a date to a Microsoft 128-bit SYSTEMTIME timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n micro = int(dt_obj.microsecond / 1000)\n tz_shift = dt_obj.timestamp() - int(dt_tz)\n add_micro = (tz_shift * 1000) + micro\n convert_to_seconds = add_micro / 1000\n new_dt_obj = dt.fromtimestamp(convert_to_seconds)\n full_date = new_dt_obj.strftime('%Y, %m, %w, %d, %H, %M, %S, ' + str(micro))\n stamp = []\n \"\"\" Will leave the following here for temporary Python 2 compatibility \"\"\"\n if sys.version_info >= (3, 0):\n for value in full_date.split(','):\n stamp.append(hexlify(struct.pack('<H', int(value))).decode('utf8'))\n elif sys.version_info < (3, 0):\n for value in full_date.split(','):\n stamp.append(hexlify(struct.pack('<H', int(value))))\n self.out_systemtime = ''.join(stamp)\n ts_output = str(\"Microsoft 128-bit SYSTEMTIME:\\t\" + self.out_systemtime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_systemtime = ts_output = False\n return self.out_systemtime, ts_output\n\n def from_filetime(self):\n \"\"\"Convert a Microsoft FILETIME timestamp to a date\"\"\"\n reason = \"[!] Microsoft FILETIME timestamps are 2 sets of 8 hex characters (4 bytes), separated by a colon\"\n try:\n if not (\":\" in ft) or not (all(char in hexdigits for char in ft[0:8]) and all(char in hexdigits for char in ft[9:])):\n self.in_filetime = indiv_output = combined_output = False\n pass\n else:\n part2, part1 = [int(h, base=16) for h in ft.split(':')]\n converted_time = struct.unpack('>Q', struct.pack('>LL', part1, part2))[0]\n dt_obj = dt.utcfromtimestamp(float(converted_time - self.epoch_active) / self.hundreds_nano)\n self.in_filetime = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Microsoft FILETIME time: \" + self.in_filetime + \" UTC\")\n combined_output = str(\"\\033[1;31mMicrosoft FILETIME time:\\t\" + self.in_filetime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_filetime = indiv_output = combined_output = False\n return self.in_filetime, indiv_output, combined_output, reason\n\n def to_filetime(self):\n \"\"\"Convert a date to a Microsoft FILETIME timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n minus_epoch = dt_obj - self.epoch_1601\n calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)\n indiv_output = str(struct.pack(\">Q\", int(calculated_time*10)).hex())\n self.out_filetime = str(indiv_output[8:]) + \":\" + str(indiv_output[:8])\n ts_output = str(\"Microsoft FILETIME time:\\t\" + self.out_filetime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_filetime = ts_output = False\n return self.out_filetime, ts_output\n\n def from_prtime(self):\n \"\"\"Convert a Mozilla PRTime timestamp to a date\"\"\"\n reason = \"[!] Mozilla PRTime timestamps are 16 digits\"\n try:\n if not (len(pr) == 16) or not (pr.isdigit()):\n self.in_prtime = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_1970 + timedelta(microseconds=int(pr))\n self.in_prtime = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Mozilla PRTime: \" + self.in_prtime + \" UTC\")\n combined_output = str(\"\\033[1;31mMozilla PRTime:\\t\\t\\t\" + self.in_prtime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_prtime = indiv_output = combined_output = False\n return self.in_prtime, indiv_output, combined_output, reason\n\n def to_prtime(self):\n \"\"\"Convert a date to Mozilla's PRTime timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_prtime = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz)) * 1000000))\n ts_output = str(\"Mozilla PRTime:\\t\\t\\t\" + self.out_prtime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_prtime = ts_output = False\n return self.out_prtime, ts_output\n\n def from_ole_auto(self):\n \"\"\"Convert an OLE Automation timestamp to a date\"\"\"\n reason = \"[!] OLE Automation timestamps are 2 integers, separated by a dot. The left is 5 digits, the right is between 9-12 digits\"\n try:\n if not (\".\" in auto) or not ((len(auto.split(\".\")[0]) == 5) and (len(auto.split(\".\")[1]) in range(9,13))) or not (''.join(auto.split(\".\")).isdigit()):\n self.in_ole_auto = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_1899 + timedelta(days=float(auto))\n self.in_ole_auto = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"OLE Automation Date: \" + self.in_ole_auto + \" UTC\")\n combined_output = str(\"\\033[1;31mOLE Automation Date:\\t\\t\" + self.in_ole_auto + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_ole_auto = indiv_output = combined_output = False\n return self.in_ole_auto, indiv_output, combined_output, reason\n\n def to_ole_auto(self):\n \"\"\"Convert a date to an OLE Automation timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_ole_auto = \"{0:.12f}\".format(((dt_obj - self.epoch_1899).total_seconds() - int(dt_tz)) / 86400)\n ts_output = str(\"OLE Automation Date:\\t\\t\" + self.out_ole_auto)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_ole_auto = ts_output = False\n return self.out_ole_auto, ts_output\n\n def from_ms1904(self):\n \"\"\"Convert a Microsoft Excel 1904 timestamp to a date\"\"\"\n reason = \"[!] Microsoft Excel 1904 timestamps are 2 integers, separated by a dot. The left is 5 digits, the right is between 9-12 digits\"\n try:\n if not (\".\" in ms1904) or not ((len(ms1904.split(\".\")[0]) == 5) and (len(ms1904.split(\".\")[1]) in range(9,13))) or not (''.join(ms1904.split(\".\")).isdigit()):\n self.in_ms1904 = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_1904 + timedelta(days=float(ms1904))\n self.in_ms1904 = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"MS Excel 1904 Date: \" + self.in_ms1904 + \" UTC\")\n combined_output = str(\"\\033[1;31mMS Excel 1904 Date:\\t\\t\" + self.in_ms1904 + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_ms1904 = indiv_output = combined_output = False\n return self.in_ms1904, indiv_output, combined_output, reason\n\n def to_ms1904(self):\n \"\"\"Convert a date to a Microsoft Excel 1904 timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_ms1904 = \"{0:.12f}\".format(((dt_obj - self.epoch_1904).total_seconds() - int(dt_tz)) / 86400)\n ts_output = str(\"MS Excel 1904 Date:\\t\\t\" + self.out_ms1904)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_ms1904 = ts_output = False\n return self.out_ms1904, ts_output\n\n def from_ios_time(self):\n \"\"\"Convert an iOS 11 timestamp to a date\"\"\"\n reason = \"[!] iOS 11 timestamps are typically 15-18 digits\"\n try:\n if not (len(ios) in range(15,19)) or not (ios.isdigit()):\n self.in_iostime = indiv_output = combined_output = False\n pass\n else:\n dt_obj = (int(ios) / int(self.nano_2001)) + 978307200\n self.in_iostime = dt.utcfromtimestamp(dt_obj).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"iOS 11 Date: \" + self.in_iostime + \" UTC\")\n combined_output = str(\"\\033[1;31miOS 11 Date:\\t\\t\\t\" + self.in_iostime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_iostime = indiv_output = combined_output = False\n return self.in_iostime, indiv_output, combined_output, reason\n\n def to_ios_time(self):\n \"\"\"Convert a date to an iOS 11 timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds() - int(dt_tz)) * self.nano_2001))\n ts_output = str(\"iOS 11 Date:\\t\\t\\t\" + self.out_iostime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_iostime = ts_output = False\n return self.out_iostime, ts_output\n\n def from_sym_time(self):\n \"\"\"Convert a Symantec 6-byte hex timestamp to a date\"\"\"\n reason = \"[!] Symantec 6-byte hex timestamps are 12 hex characters\"\n try:\n if not (len(sym) == 12) or not (all(char in hexdigits for char in sym)):\n self.in_symtime = indiv_output = combined_output = False\n pass\n else:\n hex_to_dec = [int(sym[i:i+2], 16) for i in range(0, len(sym), 2)]\n hex_to_dec[0] = hex_to_dec[0] + 1970\n hex_to_dec[1] = hex_to_dec[1] + 1\n dt_obj = dt(hex_to_dec[0], hex_to_dec[1], hex_to_dec[2], hex_to_dec[3], hex_to_dec[4], hex_to_dec[5])\n self.in_symtime = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Symantec AV Timestamp: \" + self.in_symtime)\n combined_output = str(\"\\033[1;31mSymantec AV timestamp:\\t\\t\" + self.in_symtime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_symtime = indiv_output = combined_output = False\n return self.in_symtime, indiv_output, combined_output, reason\n\n def to_sym_time(self):\n \"\"\"Convert a date to Symantec's 6-byte hex timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n sym_year = '{0:x}'.format(dt_obj.year - 1970).zfill(2)\n sym_month = '{0:x}'.format(dt_obj.month - 1).zfill(2)\n sym_day = '{0:x}'.format(dt_obj.day).zfill(2)\n sym_hour = '{0:x}'.format(dt_obj.hour).zfill(2)\n sym_minute = '{0:x}'.format(dt_obj.minute).zfill(2)\n sym_second = '{0:x}'.format(dt_obj.second).zfill(2)\n self.out_symtime = sym_year + sym_month + sym_day + sym_hour + sym_minute + sym_second\n ts_output = str(\"Symantec AV time:\\t\\t\" + self.out_symtime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_symtime = ts_output = False\n return self.out_symtime, ts_output\n\n def date_range(self, start, end, check_date):\n \"\"\"Check if date is in range of start and end, return True if it is\"\"\"\n if start <= end:\n return start <= check_date <= end\n else:\n return start <= check_date or check_date <= end\n\n def from_gps_time(self):\n \"\"\"Convert a GPS timestamp to a date (involves leap seconds)\"\"\"\n reason = \"[!] GPS timestamps are 10 digits\"\n try:\n if not (len(gps) == 10) or not (gps.isdigit()):\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check == True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"GPS Timestamp: \" + self.in_gpstime)\n combined_output = str(\"\\033[1;31mGPS timestamp:\\t\\t\\t\" + self.in_gpstime + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason\n\n def to_gps_time(self):\n \"\"\"Convert a date to a GPS timestamp (involves leap seconds)\"\"\"\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check == True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"GPS time:\\t\\t\\t\" + self.out_gpstime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output\n\n def from_eitime(self):\n \"\"\"Convert a Google ei URL timestamp\"\"\"\n reason = \"[!] Google ei URL timestamps contain only URL-safe base64 characters: [A-Z][a-z][0-9][=-_]\"\n try:\n urlsafe_chars='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890=-_'\n if not (all(char in urlsafe_chars for char in eitime)):\n self.in_eitime = indiv_output = combined_output = False\n pass\n else:\n padding_check = (len(eitime)%4)\n if padding_check != 0:\n padding_reqd = (4 - padding_check)\n result_eitime = eitime + (padding_reqd * '=')\n else:\n result_eitime = eitime\n try:\n decoded_eitime = base64.urlsafe_b64decode(result_eitime).hex()[:8]\n unix_timestamp, = struct.unpack(\"<L\", unhexlify(decoded_eitime))\n self.in_eitime = dt.utcfromtimestamp(unix_timestamp).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"Google URL EI Timestamp: \" + self.in_eitime)\n combined_output = str(\"\\033[1;31mGoogle EI URL timestamp:\\t\" + self.in_eitime + \" UTC\\033[1;m\".format())\n except base64.binascii.Error as e:\n self.in_eitime = indiv_output = combined_output = False\n pass\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_eitime = indiv_output = combined_output = False\n return self.in_eitime, indiv_output, combined_output, reason\n\n def to_eitime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix_time = int((dt_obj - self.epoch_1970).total_seconds() + int(dt_tz))\n unix_hex = struct.pack(\"<L\", unix_time)\n urlsafe_encode = base64.urlsafe_b64encode(unix_hex)\n self.out_eitime = urlsafe_encode.decode(encoding=\"UTF-8\").strip(\"=\")\n ts_output = str(\"Google EI time:\\t\\t\\t\" + self.out_eitime)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_eitime = ts_output = False\n return self.out_eitime, ts_output\n\n def from_bplist(self):\n \"\"\"Convert a Binary Plist timestamp to a date\"\"\"\n reason = \"[!] Binary Plist timestamps are 9 digits\"\n try:\n if not (len(bplist) == 9) or not (bplist.isdigit()):\n self.in_bplist = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_2001 + timedelta(seconds=float(bplist))\n self.in_bplist = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"iOS Binary Plist Timestamp: \" + self.in_bplist)\n combined_output = str(\"\\033[1;31miOS Binary Plist timestamp:\\t\" + self.in_bplist + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_bplist = indiv_output = combined_output = False\n return self.in_bplist, indiv_output, combined_output, reason\n\n def to_bplist(self):\n \"\"\"Convert a date to a Binary Plist timestamp\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_bplist = str(int((dt_obj - self.epoch_2001).total_seconds()) - int(dt_tz))\n ts_output = str(\"iOS Binary Plist time:\\t\\t\" + self.out_bplist)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_bplist = ts_output = False\n return self.out_bplist, ts_output\n\n def from_gsm(self):\n \"\"\"Convert a GSM timestamp to a date\"\"\"\n reason = \"[!] GSM timestamps are 14 hex characters (7 bytes)\"\n try:\n # The last byte of the GSM timestamp is a hex representation of the timezone.\n # If the timezone bitwise operation on this byte results in a timezone offset\n # of less than -12 or greater than 12, then the value is incorrect.\n # The values in tz_in_range are hex bytes which return proper timezones.\n tz_in_range = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '0a', '0b', '0c', '0d', '0e', '0f', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '8a', '8b', '8c', '8d', '8e', '8f', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8']\n tz_check = gsm[12:14][::-1].lower()\n if not (len(gsm) == 14) or not (all(char in hexdigits for char in gsm)) or not (tz_check in tz_in_range):\n self.in_gsm = indiv_output = combined_output = False\n pass\n else:\n swap = [gsm[i:i+2] for i in range(0, len(gsm), 2)]\n for value in swap[:]:\n le = value[::-1]\n swap.remove(value)\n swap.append(le)\n ts_tz = '{0:08b}'.format(int(swap[6], 16))\n if int(ts_tz[0]) == 1:\n utc_offset = -int(str(int(ts_tz[1:4], 2)) + str(int(ts_tz[4:8], 2))) * 0.25\n elif int(ts_tz[0]) == 0:\n utc_offset = int(str(int(ts_tz[0:4], 2)) + str(int(ts_tz[4:8], 2))) * 0.25\n swap[6] = utc_offset\n for string in swap[:]:\n swap.remove(string)\n swap.append(int(string))\n dt_year, dt_month, dt_day, dt_hour, dt_min, dt_sec, dt_tz = swap\n if dt_year in range(0, 20):\n dt_year = dt_year + 2000\n if dt_tz == 0:\n dt_tz = \" UTC\"\n elif dt_tz > 0:\n dt_tz = \" UTC+\" + str(dt_tz)\n else:\n dt_tz = \" UTC\" + str(dt_tz)\n self.in_gsm = str((dt(dt_year, dt_month, dt_day, dt_hour, dt_min, dt_sec).strftime('%Y-%m-%d %H:%M:%S.%f')) + dt_tz)\n indiv_output = str(\"GSM Timestamp: \" + self.in_gsm)\n combined_output = str(\"\\033[1;31mGSM Timestamp:\\t\\t\\t\" + self.in_gsm + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gsm = indiv_output = combined_output = False\n return self.in_gsm, indiv_output, combined_output, reason\n\n def to_gsm(self):\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n else:\n dt_tz = 0\n if dt_tz == 0:\n hex_tz = '{:02d}'.format(0)\n elif dt_tz < 0:\n dt_tz = dt_tz / 3600\n conversion = str('{:02d}'.format(int(abs(dt_tz)) * 4))\n conversion_list = []\n for char in range(len(conversion)):\n conversion_list.append(conversion[char])\n high_order = '{0:04b}'.format(int(conversion_list[0]))\n low_order = '{0:04b}'.format(int(conversion_list[1]))\n high_order = '{0:04b}'.format(int(high_order, 2) + 8)\n hex_tz = hex(int((high_order + low_order),2)).lstrip('0x').upper()\n else:\n dt_tz = dt_tz / 3600\n conversion = str(int(dt_tz) *4)\n conversion_list = []\n for char in range(len(conversion)):\n conversion_list.append(conversion[char])\n high_order = '{0:04b}'.format(int(conversion_list[0]))\n low_order = '{0:04b}'.format(int(conversion_list[1]))\n hex_tz = hex(int((high_order + low_order),2)).lstrip('0x').upper()\n date_list = [str(dt_obj.year - 2000), '{:02d}'.format(dt_obj.month), '{:02d}'.format(dt_obj.day), '{:02d}'.format(dt_obj.hour), '{:02d}'.format(dt_obj.minute), '{:02d}'.format(dt_obj.second), hex_tz]\n date_value_swap = []\n for value in date_list[:]:\n be = value[::-1]\n date_value_swap.append(be)\n self.out_gsm = ''.join(date_value_swap)\n ts_output = str(\"GSM time:\\t\\t\\t\" + self.out_gsm)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gsm = ts_output = False\n return self.out_gsm, ts_output\n\n def from_vm(self):\n \"\"\"Convert from a .vmsd createTimeHigh/createTimeLow timestamp\"\"\"\n reason = \"[!] VMSD timestamps are a 6-digit \\'High\\' value followed by a signed/unsigned integer at least 9 digits\"\n try:\n if ((\",\" not in vm) and (type(vm.split(\",\")[0])) != int) or (type(vm.split(\",\")[1]) != int):\n self.in_vm = indiv_output = combined_output = False\n pass\n else:\n cTimeHigh = int(vm.split(',')[0])\n cTimeLow = int(vm.split(',')[1])\n dt_obj = float((cTimeHigh * 2**32) + struct.unpack('I', struct.pack('i', cTimeLow))[0]) / 1000000\n self.in_vm = dt.utcfromtimestamp(dt_obj).strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"VMSD Timestamp: \" + self.in_vm)\n combined_output = str(\"\\033[1;31mVMSD Timestamp:\\t\\t\\t\" + self.in_vm + \" UTC\\033[1;m\".format())\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_vm = indiv_output = combined_output = False\n return self.in_vm, indiv_output, combined_output, reason\n\n def to_vm(self):\n \"\"\"Convert date to a .vmsd createTime* value\"\"\"\n try:\n dt_obj = duparser.parse(timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(timestamp, ignoretz=True)\n else:\n dt_tz = 0\n unix_seconds = (int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000000)\n createTimeHigh = int(float(unix_seconds) / 2**32)\n unpacked_int = unix_seconds - (createTimeHigh * 2**32)\n createTimeLow = struct.unpack('i', struct.pack('I', unpacked_int))[0]\n self.out_vm = str(createTimeHigh) + ',' + str(createTimeLow)\n ts_output = str(\"VMSD time:\\t\\t\\t\" + self.out_vm)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_vm = ts_output = False\n return self.out_vm, ts_output\n\n def from_all(self):\n \"\"\"Output all processed timestamp values\"\"\"\n \"\"\"Find date from provided timestamp\"\"\"\n this_year = int(dt.now().strftime('%Y'))\n states = []\n inputs = (self.in_unix_sec, self.in_unix_milli, self.in_windows_hex_64, self.in_windows_hex_le, self.in_chrome, self.in_ad, self.in_unix_hex_32, self.in_unix_hex_32le, self.in_cookie, self.in_ole_be, self.in_ole_le, self.in_mac, self.in_hfs_dec, self.in_hfs_be, self.in_hfs_le, self.in_msdos, self.in_fat, self.in_systemtime, self.in_filetime, self.in_prtime, self.in_ole_auto, self.in_ms1904, self.in_iostime, self.in_symtime, self.in_gpstime, self.in_eitime, self.in_bplist, self.in_gsm)\n print ('\\nGuessing Date from Timestamp: ' + sys.argv[2] + '\\r')\n print ('Outputs which do not result in a date/time value are not displayed.\\r')\n print ('\\033[1;31mMost likely results (results within +/- 5 years) are highlighted.\\n\\033[1;m'.format())\n for func in self.ts_funcs:\n result, indiv_output, combined_output, reason = func()\n states.append(result)\n if isinstance(result, str):\n if int(duparser.parse(result).strftime('%Y')) in range(this_year -5, this_year +5):\n print(combined_output)\n else:\n print(combined_output.strip(self.left_color).strip(self.right_color))\n if all([ state == False for state in states ]) :\n print ('No valid dates found. Check your input and try again.')\n print ('\\r')\n\n def timestamp_output(self):\n \"\"\"Output all processed dates from timestamp values\"\"\"\n for func in self.date_funcs:\n result, ts_output = func()\n if isinstance(result, str):\n print(ts_output)\n print('\\r')\n\nif __name__ == '__main__':\n now = dt.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n arg_parse = argparse.ArgumentParser(description='Time Decoder and Converter v' +str(__version__))\n arg_parse.add_argument('--unix', metavar='<value>', help='convert from Unix Seconds')\n arg_parse.add_argument('--umil', metavar='<value>', help='convert from Unix Milliseconds')\n arg_parse.add_argument('--wh', metavar='<value>', help='convert from Windows 64-bit Hex BE')\n arg_parse.add_argument('--whle', metavar='<value>', help='convert from Windows 64-bit Hex LE')\n arg_parse.add_argument('--chrome', metavar='<value>', help='convert from Google Chrome time')\n arg_parse.add_argument('--active', metavar='<value>', help='convert from Active Directory value')\n arg_parse.add_argument('--uhbe', metavar='<value>', help='convert from Unix Hex 32-bit BE')\n arg_parse.add_argument('--uhle', metavar='<value>', help='convert from Unix Hex 32-bit LE')\n arg_parse.add_argument('--cookie', metavar='<value>', help='convert from Windows Cookie Date (Low Value,High Value)')\n arg_parse.add_argument('--oleb', metavar='<value>', help='convert from Windows OLE 64-bit BE - remove 0x and spaces! Example from SRUM: 0x40e33f5d 0x97dfe8fb should be 40e33f5d97dfe8fb')\n arg_parse.add_argument('--olel', metavar='<value>', help='convert from Windows OLE 64-bit LE')\n arg_parse.add_argument('--mac', metavar='<value>', help='convert from Mac Absolute Time')\n arg_parse.add_argument('--hfsdec', metavar='<value>', help='convert from Mac OS/HFS+ Decimal Time')\n arg_parse.add_argument('--hfsbe', metavar='<value>', help='convert from HFS(+) BE times (HFS = Local, HFS+ = UTC)')\n arg_parse.add_argument('--hfsle', metavar='<value>', help='convert from HFS(+) LE times (HFS = Local, HFS+ = UTC)')\n arg_parse.add_argument('--fat', metavar='<value>', help='convert from FAT Date + Time (wFat)')\n arg_parse.add_argument('--msdos', metavar='<value>', help='convert from 32-bit MS-DOS time - result is Local Time')\n arg_parse.add_argument('--sys', metavar='<value>', help='convert from 128-bit SYSTEMTIME')\n arg_parse.add_argument('--ft', metavar='<value>', help='convert from FILETIME timestamp')\n arg_parse.add_argument('--pr', metavar='<value>', help='convert from Mozilla\\'s PRTime')\n arg_parse.add_argument('--auto', metavar='<value>', help='convert from OLE Automation Date format')\n arg_parse.add_argument('--ms1904', metavar='<value>', help='convert from MS Excel 1904 Date format')\n arg_parse.add_argument('--ios', metavar='<value>', help='convert from iOS 11 Timestamp')\n arg_parse.add_argument('--sym', metavar='<value>', help='convert Symantec\\'s 12-byte AV Timestamp')\n arg_parse.add_argument('--gps', metavar='<value>', help='convert from a GPS Timestamp')\n arg_parse.add_argument('--eitime', metavar='<value>', help='convert from a Google EI URL Timestamp')\n arg_parse.add_argument('--bplist', metavar='<value>', help='convert from an iOS Binary Plist Timestamp')\n arg_parse.add_argument('--gsm', metavar='<value>', help='convert from a GSM Timestamp')\n arg_parse.add_argument('--vm', metavar='<value>', help='convert from a VMWare Snapshot (.vmsd) timestamp - enter as \"high value,low value\"')\n arg_parse.add_argument('--guess', metavar='<value>', help='guess timestamp and output all reasonable possibilities')\n arg_parse.add_argument('--timestamp', metavar='DATE', help='convert date to every timestamp - enter date as \\\"Y-M-D HH:MM:SS.m\\\" in 24h fmt - without argument gives current date/time', nargs='?', const=now)\n arg_parse.add_argument('--version', '-v', action='version', version='%(prog)s ' +str(__version__))\n args = arg_parse.parse_args()\n guess = args.guess; unix = args.unix; umil = args.umil; wh = args.wh; whle = args.whle; chrome = args.chrome; active = args.active; uhbe = args.uhbe; uhle = args.uhle; cookie = args.cookie; oleb = args.oleb; olel = args.olel; mac = args.mac; hfsdec = args.hfsdec; hfsbe = args.hfsbe; hfsle = args.hfsle; fat = args.fat; msdos = args.msdos; systime = args.sys; ft = args.ft; pr = args.pr; auto = args.auto; ms1904 = args.ms1904; ios = args.ios; sym = args.sym; gps = args.gps; timestamp = args.timestamp; eitime = args.eitime; bplist = args.bplist; gsm = args.gsm; vm = args.vm\n if args.guess:\n unix = umil = wh = whle = chrome = active = uhbe = uhle = cookie = oleb = olel = mac = hfsdec = hfsbe = hfsle = fat = msdos = systime = ft = pr = auto = ms1904 = ios = sym = gps = eitime = bplist = gsm = vm = guess\n\n td = TimeDecoder()\n td.run()\n","sub_path":"time_decode.py","file_name":"time_decode.py","file_ext":"py","file_size_in_byte":88368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216418620","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDumps all data images to the specified directory.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport cPickle\n\nfrom os import makedirs, path\n\n\nimport cv2\nimport numpy as np\n\n\n\n\nFLOAT_DTYPE = \"float32\"\n\n\n\n\ndef save_image(img, out_file):\n \"\"\"Writes the specified image to the output file.\"\"\"\n\n\n num_channels = img.shape[0]\n img_height = img.shape[1]\n img_width = img.shape[2]\n\n\n if num_channels == 1:\n out_img = img[0] * 255\n\n else:\n out_img = np.zeros((img_height, img_width, 3), dtype=FLOAT_DTYPE)\n\n out_img[:, :, 0] = img[2, :, :] * 255\n out_img[:, :, 1] = img[1, :, :] * 255\n out_img[:, :, 2] = img[0, :, :] * 255\n\n out_img[out_img > 255] = 255\n out_img[out_img < 0] = 0\n\n\n cv2.imwrite(out_file, out_img)\n\n\n\n\n\n\n\ndef main(in_data_file, out_dir):\n \"\"\"Main method called with command line arguments.\"\"\"\n\n with open(in_data_file, \"rb\") as f_in:\n images = cPickle.load(f_in)\n\n\n try:\n makedirs(out_dir)\n except OSError:\n pass\n\n\n\n for ex_ind in range(images.shape[0]):\n img = images[ex_ind]\n out_filename = path.join(out_dir, \"%08d.jpg\" % ex_ind)\n save_image(img, out_filename)\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"in_data_file\", help=\"File storing pickled image data\")\n parser.add_argument(\"out_dir\", help=\"Directory to dump images into\")\n\n\n args = parser.parse_args()\n main(path.abspath(args.in_data_file), path.abspath(args.out_dir))\n\n\n","sub_path":"dump_data_images.py","file_name":"dump_data_images.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"576808577","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef pre_processing(x_data):\n x_max = x_data.max()\n x_min = x_data.min()\n for _ in range(x_data.size):\n _temp = (x_data[_] - x_min) / (x_max - x_min)\n x_data[_] = _temp\n return x_data\n\n\ndef gradient_update(_X_k_i, w_t, training_values, prices_values, _size):\n _sum_gd = 0\n for _i in range(_size):\n wX = np.dot(w_t, training_values[_i])\n _sum_gd += ((_X_k_i * (wX - prices_values[_i])) / (2 * np.sqrt((wX - prices_values[_i]) ** 2 + 1)))\n return _sum_gd / _size\n\n\ndef loss_achieved(w_t, training_values, prices_values, _size, hyper):\n _sum_loss = 0\n for _i in range(_size):\n wX = np.dot(w_t.T, training_values[_i])\n _sum_loss += (np.sqrt(1 / (hyper ** 2) * (prices_values[_i] - wX) ** 2 + 1) - 1)\n return _sum_loss / _size\n\n\nif __name__ == '__main__':\n np.set_printoptions(suppress=True)\n df = pd.read_csv(\"./real_estate.csv\")\n\n # 1(a)\n null_index = df[df.isnull().any(axis=1) == True].index\n df.dropna(axis=0, inplace=True)\n prices = df['price']\n df.drop(columns=['transactiondate', 'latitude', 'longitude', 'price'], inplace=True)\n print(\"null indices: \", null_index.values)\n\n # 1(b)\n x_new_age = pre_processing(np.array(df['age']))\n x_new_mrt = pre_processing(np.array(df['nearestMRT']))\n x_new_nCon = pre_processing(np.array(df['nConvenience']))\n\n x_new_age_mean = x_new_age.mean()\n x_new_mrt_mean = x_new_mrt.mean()\n x_new_nCon_mean = x_new_nCon.mean()\n print(\"x_new_age_mean: \", x_new_age_mean)\n print(\"x_new_nearestMRT: \", x_new_mrt_mean)\n print(\"x_new_nConvenience: \", x_new_nCon_mean)\n\n # 2\n x_new = pd.DataFrame(columns=['age', 'nearestMRT', 'nConvenience'])\n x_new['age'] = x_new_age\n x_new['nearestMRT'] = x_new_mrt\n x_new['nConvenience'] = x_new_nCon\n size = x_new.index.size\n training_price = prices.values[:int(size / 2)]\n test_price = prices.values[int(size / 2):]\n training_set = x_new.values[:int(size / 2)]\n test_set = x_new.values[int(size / 2):]\n\n x_print = pd.DataFrame(columns=['age', 'nearestMRT', 'nConvenience', 'price'])\n x_print['age'] = x_new_age\n x_print['nearestMRT'] = x_new_mrt\n x_print['nConvenience'] = x_new_nCon\n x_print['price'] = prices.values\n\n training_print = x_print.values[:int(size / 2)]\n test_print = x_print.values[int(size / 2):]\n\n first_training_row = training_print[0]\n last_training_row = training_print[-1]\n first_test_row = test_print[0]\n last_test_row = test_print[-1]\n\n print(\"first training row: \", first_training_row)\n print(\"last training row: \", last_training_row)\n print(\"first test row: \", first_test_row)\n print(\"last test row: \", last_test_row)\n\n # 5\n fig, ax = plt.subplots(3, 3, figsize=(10, 10))\n nIter = 400\n alphas = [10, 5, 2, 1, 0.5, 0.25, 0.1, 0.05, 0.01]\n losses = []\n training_size = training_set.shape[0]\n training_gd_set = np.insert(training_set, 0, 1, axis=1)\n c = 2\n loss = []\n w_plot = []\n for i, ax in enumerate(ax.flat):\n w = np.ones(4)\n for index in range(nIter):\n temp = index\n if temp >= training_size:\n temp -= training_size\n w = w - gradient_update(training_gd_set[temp], w, training_gd_set, training_price, training_size) * alphas[\n i]\n loss_mean = loss_achieved(w, training_gd_set, training_price, training_size, c)\n loss.append(loss_mean)\n # print(w)\n losses.append(loss)\n ax.plot(losses[i])\n loss.clear()\n ax.set_title(f\"step size: {alphas[i]}\")\n\n plt.tight_layout()\n plt.show()\n\n # 5 c\n w_plot.clear()\n fig, ax = plt.subplots(figsize=(10, 10))\n w = np.ones(4)\n w_plot.append(w)\n for index in range(nIter):\n temp = index\n if temp >= training_size:\n temp -= training_size\n Xki = np.array([1, training_gd_set[temp][0], training_gd_set[temp][1], training_gd_set[temp][2]])\n w = w - gradient_update(Xki, w, training_gd_set, training_price, training_size) * 0.3\n w_plot.append(w)\n ax.plot(w_plot)\n plt.show()\n\n print(\"w0: \", w[0])\n print(\"w1: \", w[1])\n print(\"w2: \", w[2])\n print(\"w3: \", w[3])\n\n # y-model\n test_size = test_set.shape[0]\n test_set_gd_set = np.insert(test_set, 0, 1, axis=1)\n test_loss = loss_achieved(w, test_set_gd_set, test_price, test_size, c)\n train_loss = loss_achieved(w, training_gd_set, training_price, training_size, c)\n print(\"batch training loss: \", train_loss)\n print(\"batch test_loss: \", test_loss)\n\n # 6\n fig, ax = plt.subplots(3, 3, figsize=(10, 10))\n losses.clear()\n loss.clear()\n epoch_times = 6\n for i, ax in enumerate(ax.flat):\n w = np.ones(4)\n for _ in range(epoch_times):\n # for index in range(training_size):\n for index in range(training_size):\n # Xki = np.array([1, training_gd_set[index][0], training_gd_set[index][1], training_gd_set[index][2]])\n Xki = np.array(training_gd_set[index])\n wX = np.dot(w, training_gd_set[index].T)\n derivative_loss = (wX - training_price[index]) / (\n 2 * np.sqrt((wX - training_price[index]) ** 2 + 4))\n w = w - alphas[i] * derivative_loss * Xki\n loss_mean = loss_achieved(w, training_gd_set, training_price, training_size, c)\n loss.append(loss_mean)\n\n # print(w)\n losses.append(loss)\n ax.plot(losses[i])\n loss.clear()\n ax.set_title(f\"step size: {alphas[i]}\")\n\n plt.tight_layout()\n plt.show()\n\n helper = []\n w = np.ones(4)\n helper.append(w)\n for _ in range(epoch_times):\n for index in range(training_size):\n # Xki = np.array([1, training_gd_set[index][0], training_gd_set[index][1], training_gd_set[index][2]])\n Xki = np.array(training_gd_set[index])\n wX = np.dot(w, training_gd_set[index].T)\n derivative_loss = (wX - training_price[index]) / (\n 2 * np.sqrt((wX - training_price[index]) ** 2 + 4))\n w = w - 0.4 * derivative_loss * Xki\n helper.append(w)\n # loss_mean = loss_achieved(w, training_gd_set, training_price, training_size, c)\n # loss.append(loss_mean)\n plt.plot(helper)\n plt.show()\n\n print(\"w0: \", w[0])\n print(\"w1: \", w[1])\n print(\"w2: \", w[2])\n print(\"w3: \", w[3])\n\n # y-model\n test_loss = loss_achieved(w, test_set_gd_set, test_price, test_size, c)\n train_loss = loss_achieved(w, training_gd_set, training_price, training_size, c)\n print(\"SGD train_loss: \", train_loss)\n print(\"SGD test_loss: \", test_loss)\n","sub_path":"Homeworks/Homework1/Homework1 (1)/Homework1Code-1.py","file_name":"Homework1Code-1.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641746366","text":"from keras import Model\nfrom keras.layers import Flatten, Dense, Input, Convolution2D, MaxPooling2D, BatchNormalization, Activation, \\\n GlobalAveragePooling2D, AveragePooling2D, Reshape\nfrom keras import regularizers\nimport numpy as np\n\n# size of pooling area for max pooling\n_pool_size = (2, 2)\n_stride = (2, 2)\n# convolution kernel size\n_kernel_size = (3, 3)\nBATCH_NUM_PER_CLASS = 15\nFEATURE_DIM = 128\n\n\ndef _conv_act_bn(inputs, filters, name, kernel_size=(3, 3), norm_rate=0.0, padding='same'):\n x = Convolution2D(filters, kernel_size=kernel_size, padding=padding, name=name,\n kernel_regularizer=regularizers.l2(norm_rate))(inputs)\n\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n return x\n\n\ndef _block(layer_num, inputs, filters, name, kernel_size=(3, 3), norm_rate=0.0, padding='same'):\n x = inputs\n\n for i in range(layer_num):\n x = _conv_act_bn(x, filters, name=name + '_{}'.format(i + 1), kernel_size=kernel_size, norm_rate=norm_rate,\n padding=padding)\n x = MaxPooling2D(pool_size=_pool_size, strides=(2, 2))(x)\n return x\n\n\n# def LTC_BN(num_class, filters, layer_num, input_shape=(256, 256, 1,), norm_rate=0.0, name=None):\n# \"\"\"\n# VGG model with batch-normalize on each block.\n# :param num_class: the number of your DataSet classes\n# :param filters: a list of filters\n# :param layer_num: a list of the number of each block\n# :param name: default None. it can be given list to name the block\n# :param input_shape: the shape of input\n# :param norm_rate: l2 norm rate\n# :return: vgg batch-normalize model\n# \"\"\"\n# if len(filters) < len(layer_num):\n# print('ERROR:filters length must be equal or longer than layer_num.')\n# if name is None:\n# name = ['Conv{}'.format(i + 1) for i in range(len(layer_num))]\n# inputs = Input(shape=input_shape, name='input')\n# x = inputs\n# for i in range(len(layer_num)):\n# x = _block(layer_num=layer_num[i], inputs=x, filters=filters[i], name=name[i])\n# x = AveragePooling2D()(x)\n# x = Flatten()(x)\n# x = Dense(1024, kernel_regularizer=regularizers.l2(norm_rate))(x)\n# x = Activation('relu')(x)\n# x = BatchNormalization()(x)\n# x = Dense(512, kernel_regularizer=regularizers.l2(norm_rate))(x)\n# x = Activation('relu')(x)\n# x = BatchNormalization()(x)\n# predictions = Dense(num_class, activation='softmax', name='prediction',\n# kernel_regularizer=regularizers.l2(norm_rate))(x)\n# model = Model(inputs=inputs, outputs=predictions)\n# return model\n#\n#\n# def CNNEncoder(input_shape, norm_rate=0.0):\n# \"\"\"docstring for ClassName\"\"\"\n# inputs = Input(shape=input_shape, name='input')\n# x = Dense(1024, kernel_regularizer=regularizers.l2(norm_rate))(inputs)\n# x = Activation('relu')(x)\n# x = BatchNormalization()(x)\n# x = Dense(512, kernel_regularizer=regularizers.l2(norm_rate))(x)\n# x = Activation('relu')(x)\n# x = BatchNormalization()(x)\n# return x\ndef get_concentrate_rp(base_model, train_data, sample_data, train_label):\n \"\"\"\n 提取sample集和train集特征并结合\n :param base_model:\n :param sample_data: [-1,n,n,f] 要求每类的包含的图片数相同,且同类紧挨着,从第一类到第五类\n :param train_data: 训练集数据\n :param train_label:训练集标签\n :return: 联合特征及训练标签\n \"\"\"\n sample_features = base_model.predict(sample_data, verbose=1)\n sample_data = None\n train_features = base_model.predict(train_data, verbose=1)\n train_data = None\n # print(sample_features.shape) # (100, 8, 8, 512)\n # print(train_features.shape) # (220, 8, 8, 512)\n sample_features = np.reshape(sample_features, newshape=(\n 5, 20, base_model.output.shape[1], base_model.output.shape[2], base_model.output.shape[3]))\n # print(sample_features.shape) # (5, 20, 8, 8, 512)\n sample_features = np.sum(sample_features, 1)\n # print(sample_features.shape) # (5, 8, 8, 512)\n sample_features = np.expand_dims(sample_features, axis=0).repeat(train_features.shape[0], axis=0)\n # print(sample_features.shape) # (220, 5, 8, 8, 512)\n train_features = np.expand_dims(train_features, axis=0).repeat(5, axis=0)\n # print(train_features.shape) # (5, 220, 8, 8, 512)\n train_features = np.transpose(train_features, (1, 0, 2, 3, 4))\n # print(train_features.shape) # (220, 5, 8, 8, 512)\n relation_pairs = np.concatenate((sample_features, train_features), axis=4).reshape(train_features.shape[0], -1,\n train_features.shape[2],\n train_features.shape[2])\n # print(relation_pairs.shape) # (220*5, 512*2, 5, 5)==(1100, 1024, 8, 8)\n ralation_label = train_label\n # print(relation_label.shape) # (220, 5)\n return relation_pairs, ralation_label\n\n\ndef LTC_BN(num_class, input_shape, norm_rate=0.0):\n \"\"\"docstring for RelationNetwork\"\"\"\n\n inputs = Input(shape=input_shape, name='input')\n\n x = _conv_act_bn(inputs, filters=16, name='RN_Conv1')\n x = _conv_act_bn(x, filters=32, name='RN_Conv2')\n x = Flatten()(x)\n x = Dense(1024, kernel_regularizer=regularizers.l2(norm_rate))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n # x = Reshape(target_shape=(-1,5))(x)\n x = Dense(num_class, kernel_regularizer=regularizers.l2(norm_rate))(x)\n predictions = Activation('sigmoid')(x)\n # = Reshape(target_shape=(220, -1))(x)\n # predictions =\n\n # predictions = Dense(num_class, activation='sigmoid', name='prediction',\n # kernel_regularizer=regularizers.l2(norm_rate))(x)\n model = Model(inputs=inputs, outputs=predictions)\n return model\n\n\nif __name__ == '__main__':\n from keras.optimizers import Adam\n\n model = LTC_BN(5, input_shape=(5, 5, 128), norm_rate=0.0)\n optimizer = Adam(1e-4)\n model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy']) #\n model.summary()\n","sub_path":"pano/LTC.py","file_name":"LTC.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194847015","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\n\nfrom .models import Image,Detail\n\n\ndef index(request):\n from random import randint\n \n if request.user.is_superuser:\n import_directory()\n \n i = []\n detail = []\n bgimage = Image.objects.all()\n \n if len(bgimage) > 1:\n i = bgimage[randint(0,(len(bgimage)-1))]\n if len(i.detail_set.all()) > 0: \n detail = i.detail_set.all()[0]\n \n return render(request, 'bgrunner/index.html', {'i': i, 'd': detail})\n \n\ndef detail(request,image_id):\n \n i = Image.objects.get(pk=image_id)\n detail = []\n if len(i.detail_set.all()) > 0: \n detail = i.detail_set.all()[0]\n \n return render(request, 'bgrunner/detail.html', { 'i': i ,'d' : detail })\n \n\n# SHOULD HAVE DIFFERENT VIEWS FOR EDIT AND FOR ADD - MAYBE ALLOW COMMENT-LIKE STORIES INSTEAD OF JUST OVERVIEW? START WITH JUST OVERVIEW?\n\ndef add_detail(request,image_id):\n from django.core.urlresolvers import reverse\n \n if request.user.is_superuser:\n i = Image.objects.get(pk=image_id)\n \n if len(i.detail_set.all()) > 0:\n d = i.detail_set.all()[0]\n d.title = request.POST['title']\n d.description = request.POST['description']\n d.pub_date=i.pub_date\n d.save()\n \n else:\n i.detail_set.create(title=request.POST['title'],description=request.POST['description'],pub_date=i.pub_date)\n \n \n # return HttpResponseRedirect(reverse('bgrunner:detail', args=(image_id)))\n return render(request, 'bgrunner/detail.html', { 'i': i ,'d' : i.detail_set.all()[0] })\n \n else:\n \n return HttpResponse(\"User not authorized to edit entry: %s. Visit <a href=\\\"/admin\\\">/admin</a> to login\" % image_id)\n\ndef rotate(request,image_id):\n if request.user.is_superuser:\n from PIL import Image as pilimg\n \n amt = 270\n if request.POST['rotate_amount']:\n amt = request.POST['rotate_amount']\n\n i = Image.objects.get(pk=image_id)\n \n im = pilimg.open(i.location)\n imclone = im.rotate(float(amt))\n width, height = imclone.size\n \n newname = i.name + \"_rotate_\" + str(amt) + \"deg.jpg\"\n i.location = i.location.rsplit('/',1)[0] + newname\n i.shortcut = i.shortcut.rsplit('/',1)[0] + newname\n i.width = width\n i.height = height\n i.save()\n \n imclone.save(i.location)\n \n detail = []\n if len(i.detail_set.all()) > 0: \n detail = i.detail_set.all()[0]\n \n return render(request, 'bgrunner/detail.html', { 'i': i ,'d' : detail })\n \n \n else:\n \n return HttpResponse(\"User not authorized to edit entry: %s. Visit <a href=\\\"/admin\\\">/admin</a> to login\" % image_id)\n \n\n# THIS COULD TAKE A WHILE\ndef image_fun(request,image_id):\n if request.user.is_superuser:\n \n from PIL import Image as pilimg\n\n img = Image.objects.get(pk=image_id)\n \n i = pilimg.open(img.location)\n i = i.convert(\"RGBA\")\n \n i.convert(\"RGBA\")\n datas = i.getdata()\n\n newData = []\n for item in datas:\n if item[0] < 25 and item[1] < 20 and item[2] < 25:\n newData.append((0, 0, 0, 0))\n else:\n newData.append(item)\n\n i.putdata(newData)\n \n i.save(img.location + \"__trans.png\", \"PNG\")\n \n img.shortcut += \"__trans.png\"\n detail = []\n if len(img.detail_set.all()) > 0: \n detail = img.detail_set.all()[0]\n \n return render(request, 'bgrunner/detail.html', { 'i': img ,'d' : detail })\n\n else:\n \n return HttpResponse(\"User not authorized to edit entry: %s. Visit <a href=\\\"/admin\\\">/admin</a> to login\" % image_id)\n \n\n\n\n\n \n \n \ndef save_image(name,location,height,width,shortcut):\n i = Image(name=name, location=location, pub_date=timezone.now(), height=height, width=width, shortcut=shortcut)\n i.save()\n\ndef import_directory():\n from os import listdir\n from os.path import isfile, join\n from PIL import Image as pilimg\n path = \"/Users/geoffbooth/dev/python/django/dad_slider/bgrunner/static/bgrunner/images/\"\n images = [ f for f in listdir(path) if isfile(join(path,f)) ]\n for i in images:\n file = path + i\n im = pilimg.open(file)\n width, height = im.size\n shortcut = \"bgrunner/images/\" + i\n\n save_image(name=i,location=file,height=height,width=width,shortcut=shortcut )\n ","sub_path":"bgrunner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351007183","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Board, Topic, User, Post\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom .form import NewTopicForm, NewBoardForm, PostForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Count\nfrom django.views.generic import UpdateView, ListView\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.urls import reverse\n\n# Create your views here.\n\n#######################\n### Contoh ListView ###\n#######################\n\n### Tanpa argument / parameter ###\nclass BoardListView(ListView) :\n model = Board\n context_object_name = 'boards'\n template_name = 'home.html'\n paginate_by = 5\n\n### dengan 1 argument / parameter ###\nclass TopicListView(ListView) :\n model = Topic\n context_object_name = 'topics'\n template_name = 'topics.html'\n paginate_by = 5\n\n def get_context_data(self, **kwargs) :\n kwargs['board'] = self.board\n return super().get_context_data(**kwargs)\n\n def get_queryset(self) :\n self.board = get_object_or_404(Board, pk=self.kwargs.get('pk'))\n queryset = self.board.topics.all()\n return queryset\n\nclass PostListView(ListView) :\n model = Post\n context_object_name = 'posts'\n template_name = 'topic_posts.html'\n paginate_by = 5\n\n def get_context_data(self, **kwargs) :\n session_key = 'viewed_topic_{}'.format(self.topic.pk)\n if not self.request.session.get(session_key, False) :\n self.topic.views += 1\n self.topic.save()\n self.request.session[session_key] = True\n\n kwargs['topic'] = self.topic\n return super().get_context_data(**kwargs)\n\n def get_queryset(self) :\n self.topic = get_object_or_404(Topic, board__pk=self.kwargs.get('pk'), pk=self.kwargs.get('t_pk'))\n queryset = self.topic.posts.order_by('created_at')\n return queryset\n\n# Contoh UpdateView #\n@method_decorator(login_required, name='dispatch')\nclass PostUpdateView(UpdateView) :\n model = Post\n fields = ['message']\n template_name = 'edit_post.html'\n pk_url_kwarg = 'p_pk'\n context_object_name = 'post'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(created_by=self.request.user)\n\n def form_valid(self, form) :\n post = form.save(commit = False)\n post.updated_by = self.request.user\n post.updated_at = timezone.now()\n post.save()\n\n return redirect('topic_posts', pk=post.topic.board.pk, t_pk=post.topic.pk)\n\n@login_required\ndef topic_reply(request, pk, t_pk):\n topic = get_object_or_404(Topic, board__pk = pk, pk = t_pk)\n\n if request.method == 'POST' :\n form = PostForm(request.POST)\n if form.is_valid() :\n post = form.save(commit=False)\n post.topic = topic\n post.created_by = request.user\n post.save()\n\n topic.last_updated = timezone.now()\n topic.save()\n\n topic_url = reverse('topic_posts', kwargs={'pk': pk, 't_pk': t_pk})\n topic_post_url = '{url}?page={page}#{id}'.format(\n url=topic_url,\n id=post.pk,\n page=topic.get_page_count()\n )\n\n return redirect(topic_post_url)\n #return redirect('topic_posts', pk=pk, t_pk = t_pk)\n else :\n form = PostForm()\n\n return render(request, 'topic_reply.html', {'form': form, 'topic': topic})\n\n@login_required\ndef new_topic(request, pk):\n board = get_object_or_404(Board,pk=pk)\n\n if request.method == 'POST' :\n form = NewTopicForm(request.POST)\n if form.is_valid() :\n topic = form.save(commit=False)\n topic.board = board\n topic.starter = request.user\n topic.save()\n post = Post.objects.create(\n message=form.cleaned_data.get('message'),\n topic=topic,\n created_by=request.user\n )\n return redirect('board_topics', pk=board.pk)\n else :\n form = NewTopicForm()\n\n return render(request, 'new_topic.html', {'form': form, 'board': board})\n\n@login_required\ndef new_board(request):\n\n if request.method == 'POST' :\n form = NewBoardForm(request.POST)\n if form.is_valid() :\n board = form.save(commit=False)\n board.save()\n return redirect('home')\n else:\n form = NewBoardForm()\n\n return render(request, 'new_board.html', {'form':form})\n\ndef about(request):\n return render(request, 'about.html')\n\ndef about_company(request):\n return render(request, 'about_company.html', {'company_name': 'Simple Complex'})\n\n# obsolete views\n\"\"\"\n###############################\n## Sudah diganti ke ListView ##\n###############################\n\ndef home(request):\n boards = Board.objects.all()\n return render(request, 'home.html', {'boards': boards} )\n\ndef board_topics(request, pk):\n board = get_object_or_404(Board,pk=pk)\n queryset = board.topics.all()\n page = request.GET.get('page',1)\n paginator = Paginator(queryset,5)\n topics = paginator.page(1)\n\n try :\n topics = paginator.page(page)\n except PageNotAnInteger :\n topics = paginator.page(1)\n except EmptyPage :\n topics = paginator.page(paginator.num_pages)\n\n return render(request, 'topics.html', {'board': board, 'topics': topics})\n\ndef topic_posts(request, pk, t_pk):\n topic = get_object_or_404(Topic, board__pk=pk , pk=t_pk)\n topic.views += 1\n topic.save()\n queryset = topic.posts.all()\n\n page = request.GET.get('page',1)\n paginator = Paginator(queryset,3)\n posts = paginator.page(1)\n\n try :\n posts = paginator.page(page)\n except PageNotAnInteger :\n posts = paginator.page(1)\n except EmptyPage :\n posts = paginator.page(paginator.num_pages)\n\n return render(request, 'topic_posts.html', {'topic': topic, 'posts': posts})\n\n\"\"\"\n","sub_path":"boards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"278339598","text":"# -*- coding: utf-8 -*-\n# author: jinwilliam\n\nfrom taskqueue import tqueue as wq\nimport time\n\nclass worker(object):\n \"\"\"worker\n \n \"\"\"\n def __init__(self):\n print('worker loaded.')\n\n def init(self):\n print('worker init.')\n wq.init()\n wq.subscribe_channel()\n return True\n \n def work(self):\n while True:\n time.sleep(0.1)\n msg = wq.pull()\n msg_channel = str(msg[1], encoding='utf-8')\n msg_content = str(msg[2], encoding='utf-8')\n if msg_channel == 'pypipe_task':\n print(msg_content)\n else:\n print(msg_channel)\n \n\n\nworker = worker()\n\nif __name__ == '__main__':\n worker.init()\n worker.work()\n","sub_path":"core/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195071383","text":"# -*- coding: utf-8 -*-\n\nfrom benedict import benedict\n\nimport pickle\nimport unittest\n\n\nclass pickle_test_case(unittest.TestCase):\n\n def test_pickle(self):\n\n d = {\n 'a': {},\n 'b': { 'x': 1 },\n 'c': [],\n 'd': [0, 1],\n 'e': 0.0,\n 'f': '',\n 'g': None,\n 'h': '0'\n }\n\n b = benedict(d, keypath_separator='/')\n b_encoded = pickle.dumps(b)\n # print(b_encoded)\n\n b_decoded = pickle.loads(b_encoded)\n # print(b_decoded)\n # print(b_decoded.keypath_separator)\n self.assertTrue(isinstance(b_decoded, benedict))\n self.assertEqual(b_decoded.keypath_separator, b.keypath_separator)\n self.assertEqual(b_decoded, b)\n","sub_path":"tests/dicts/io/test_pickle.py","file_name":"test_pickle.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507586148","text":"import torch\nimport argparse\nfrom apex.fp16_utils import FP16_Optimizer\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--local_rank\", default=0, type=int)\nargs = parser.parse_args()\n\ntorch.cuda.set_device(args.local_rank)\ntorch.distributed.init_process_group(backend='nccl',\n init_method='env://')\n\ntorch.backends.cudnn.benchmark = True\n\nN, D_in, D_out = 64, 1024, 16\n\nx = torch.randn(N, D_in, device='cuda', dtype=torch.half)\ny = torch.randn(N, D_out, device='cuda', dtype=torch.half)\n\nmodel = torch.nn.Linear(D_in, D_out).cuda().half()\nmodel = torch.nn.parallel.DistributedDataParallel(model,\n device_ids=[args.local_rank],\n output_device=args.local_rank)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n### Construct FP16_Optimizer ###\noptimizer = FP16_Optimizer(optimizer)\n###\n\nloss_fn = torch.nn.MSELoss()\n\nfor t in range(500):\n optimizer.zero_grad()\n y_pred = model(x)\n loss = loss_fn(y_pred.float(), y.float())\n ### Change loss.backward() to: ###\n optimizer.backward(loss)\n ###\n optimizer.step()\n\nprint(\"final loss = \", loss)\n\n","sub_path":"examples/deprecated_api/FP16_Optimizer_simple/distributed_pytorch/distributed_data_parallel.py","file_name":"distributed_data_parallel.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"386181366","text":"# -*- coding UTF-8 -*-\n\nimport urllib.request\n\nmmurl = \"http://mm.taobao.com/json/request_top_list.htm?type=0&page=\"\n\nSUCC = 0\nFAIL = 0\n\ndef getPicUrl(htmlurl,num):\n global SUCC , FAIL\n i = 0\n flg = 1\n tmpa = 0\n tmpb = 0\n while flg == 1:\n try:\n percontHTML = urllib.request.urlopen(htmlurl).read().decode('GBK')\n pica = '''src=\"http://img0'''\n picb = \".jpg\"\n tmpa = percontHTML.find(pica,tmpb)\n tmpb = percontHTML.find(picb,tmpa)\n imgurl = percontHTML[tmpa + 5:tmpb + 4]\n #print(imgurl)\n if imgurl == \"\":\n flg = 0\n print(\"Photo\" , num , \"Down_Done\")\n else:\n i += 1\n print(\"Downloading\" , num , \"Girl\" , i , \"Photo\")\n urllib.request.urlretrieve(imgurl,\"beautiful\\\\\" + str(num) + \"-\" + str(i) + \".jpg\")\n SUCC += 1\n except:\n print(\"Fail,Server Stop!\")\n FAIL += 1\n pass\n\n\ndef getPageUrl(mmurl):\n i = 1\n j = 1\n pa = 0\n pb = 0\n hreflist = []\n while i < 3:\n url = mmurl + str(i)\n #print(url)\n cont = urllib.request.urlopen(url).read().decode('GBK')\n diva = '''<div class=\"pic s60\">'''\n divb = '</div>'\n while j<11:\n pa = cont.find(diva, pb)#div截取\n pb = cont.find(divb, pa)\n divcont = cont[pa:pb]\n #print(divcont)\n hrefa = \"<a href=\"\n hrefb = \".htm\"\n aa = divcont.find(hrefa)#url截取\n ab = divcont.find(hrefb)\n acont = divcont[aa + 9:ab + 4]\n hreflist.append(acont)\n #print(acont)\n j += 1\n j = 1\n pa = 0\n pb = 0\n print(\"Downloading\" + str(i) + \"Photo_url\")\n i += 1\n return hreflist\n\nif __name__ == \"__main__\":\n global SUCC , FAIL\n hreflist = getPageUrl(mmurl)\n print(\"Total\", len(hreflist))\n for i in range(0,len(hreflist)):\n num = i + 1\n print(\"Start Download\" , num , \"Girl\")\n test = hreflist[i]\n getPicUrl(test,num)\n print(\"Success:\" , SUCC , \"****Fail:\" , FAIL)\n","sub_path":"0830/taobao.url.py","file_name":"taobao.url.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534307760","text":"from rethinkdb import RethinkDB\nfrom time import time as epoch\n\n\nclass DataBaseManager:\n\tdef __init__(self, client):\n\t\tself.r = RethinkDB()\n\t\tself.connection = self.r.connect(\n\t\t\tport=28015,\n\t\t\thost=\"localhost\"\n\t\t)\n\t\t# Initiate database connection\n\t\t\n\t\tself.client = client\n\t\t# Connection to discord\n\t\n\tdef get_table(self, table):\n\t\t# Returns the request table as a Cursor object, creating if not\n\t\t# exists\n\t\t\n\t\tt_list = self.r.db(\"tfr\").table_list().run(self.connection)\n\t\t# A list of all the table names\n\t\t\n\t\tif table in t_list:\n\t\t\t# Table exists, return that\n\t\t\treturn self.r.db(\"tfr\").table(table)\n\t\t\n\t\t# Create the table, it doesn't exist\n\t\tself.r.db(\"tfr\").table_create(table).run(self.connection)\n\t\t\n\t\t# Return the newly created table\n\t\treturn self.r.db(\"tfr\").table(table)\n\t\n\tdef get_prefix(self, guild):\n\t\t# Returns the prefix for the guild with given id\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure the guild (id) is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tguild_conf = list(\n\t\t\tself.get_table(\"guilds\").filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t)\n\t\t# Return all objects in \"guilds\" table with matching guild id\n\t\t# Utilising __iter__ function to convert the result into a list\n\t\t\n\t\tif guild_conf:\n\t\t\t# An object matching the guild id was found\n\t\t\tguild_conf = guild_conf[0]\n\t\t# Select the first match\n\t\t\n\t\tif guild_conf and \"prefix\" in guild_conf.keys():\n\t\t\t# Object found and it contains a prefix, return that\n\t\t\treturn guild_conf[\"prefix\"]\n\t\t\n\t\tif not guild_conf:\n\t\t\t# Object not found, create an entry for this guild\n\t\t\tself.get_table(\"guilds\").insert(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"prefix\": self.client.config.default_prefix\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t\n\t\telse:\n\t\t\t# Object found but doesn't contain a prefix, update it to\n\t\t\t# contain the default prefix\n\t\t\tself.get_table(\"guilds\").get(guild).update(\n\t\t\t\t{\n\t\t\t\t\t\"prefix\": self.client.config.default_prefix\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t\n\t\t# Return default prefix\n\t\treturn self.client.config.default_prefix\n\t\n\tdef update_prefix(self, guild, new_prefix):\n\t\t# Updates prefix for guild with given id to new_prefix\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"guilds\") # Connect to guilds table\n\t\t\t.filter(\n\t\t\t\t# Filter to return objects only matching guild (id)\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.update( # Update all matches to the new prefix\n\t\t\t\t{\n\t\t\t\t\t\"prefix\": new_prefix\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef add_xp(self, guild, member, amount):\n\t\t# Adds {amount} xp to {member} in {guild}\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tif not isinstance(member, str):\n\t\t\t# Ensure member is a string\n\t\t\tmember = str(member)\n\t\t\n\t\ttable = self.get_table(\"xp\")\n\t\t\n\t\tuser_xp = list(\n\t\t\ttable.filter(\n\t\t\t\t# Filter all entries in the \"xp\" table to return only\n\t\t\t\t# those with matching guild (id) and member (id) as\n\t\t\t\t# below\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"user\": member\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t)\n\t\t# Utilise __iter__ function to convert the result into a list\n\t\t\n\t\tif user_xp:\n\t\t\t# Some match was found\n\t\t\t\n\t\t\tuser_xp = user_xp[0]\n\t\t\t# Select first match only\n\t\t\t\n\t\t\tprevious = user_xp[\"xp\"]\n\t\t\t# The XP of the user before adding the new xp\n\t\t\t\n\t\t\tself.get_table(\"xp\").get(user_xp[\"id\"]).update(\n\t\t\t\t# Update user in the database, using the id of the\n\t\t\t\t# object returned to update only the correct user\n\t\t\t\t{\n\t\t\t\t\t\"xp\": previous + amount, # Add XP\n\t\t\t\t\t\"cooldown\": (\n\t\t\t\t\t\tepoch() + self.client.config.xp_cooldown\n\t\t\t\t\t) * 1000 # Update cooldown\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t\t\n\t\t\treturn previous + amount # Return new XP total\n\t\t\n\t\telse:\n\t\t\t# No entry found in XP table matching user and guild,\n\t\t\t# create one\n\t\t\ttable.insert(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild, # Add the guild id\n\t\t\t\t\t\"user\": member, # Add the user id\n\t\t\t\t\t\"xp\": amount, # Add the total xp\n\t\t\t\t\t\"cooldown\": (\n\t\t\t\t\t\tepoch() + self.client.config.xp_cooldown\n\t\t\t\t\t) * 1000 # Add the cooldown\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t\t\n\t\t\treturn amount\n\t\n\tdef xp_level(self, guild, member):\n\t\t# Returns the xp level (as an int) of the given member\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tif not isinstance(member, str):\n\t\t\t# Ensure member is a string\n\t\t\tmember = str(member)\n\t\t\n\t\tlevel = 1\n\t\t# Start at level 1\n\t\t\n\t\tresult = list(\n\t\t\tself.get_table(\"xp\").filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"user\": member\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t) # Attempt to find the users xp in the database\n\t\t\n\t\tif result:\n\t\t\t# If some user(s) were found, select the first only\n\t\t\tresult = result[0]\n\t\telse:\n\t\t\t# If no database entry with their xp was found, return\n\t\t\t# the starting level (1)\n\t\t\treturn level\n\t\t\n\t\txp = result[\"xp\"]\n\t\t\n\t\t# Logic adapted from Nadeko source levelling code for\n\t\t# consistency\n\t\t\n\t\tbase_xp = 36\n\t\ttotal_xp = 0\n\t\tlvl = 1\n\t\t\n\t\twhile True:\n\t\t\trequired = (base_xp + base_xp / 4.0 * (lvl - 1))\n\t\t\t\n\t\t\tif required + total_xp > xp:\n\t\t\t\tbreak\n\t\t\t\n\t\t\ttotal_xp += required\n\t\t\tlvl += 1\n\t\t\n\t\treturn lvl - 1\n\t\n\tdef in_xp_cooldown(self, member, guild):\n\t\t# Returns whether or not the member is currently on cooldown\n\t\t# to earn more xp\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tif not isinstance(member, str):\n\t\t\t# Ensure member is a string\n\t\t\tmember = str(member)\n\t\t\n\t\t# Attempt to find user in the database and then turn the\n\t\t# result into a python list\n\t\tresult = list(\n\t\t\tself.get_table(\"xp\").filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"user\": member\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\t\n\t\tif not result:\n\t\t\t# User not found in db\n\t\t\treturn False\n\t\t\n\t\tif \"cooldown\" not in result[0]:\n\t\t\t# Somehow not in their db entry, returning False will\n\t\t\t# cause them to gain xp, which will add a cooldown entry\n\t\t\t# if not one exists\n\t\t\treturn False\n\t\t\n\t\treturn result[0][\"cooldown\"] >= epoch() * 1000\n\t\n\tdef get_xp(self, member, guild):\n\t\t# Returns the total xp for {member} in {guild} as an int\n\t\t\n\t\txp_db = list(\n\t\t\tself.get_table(\"xp\").filter(\n\t\t\t\t{\n\t\t\t\t\t# Filter all entries in the \"xp\" table to only\n\t\t\t\t\t# those which match the guild and member id\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"member\": member\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t)\n\t\t# Convert to a python list so its easy to select the first\n\t\t# result\n\t\t\n\t\treturn {\n\t\t\t\"xp\": xp_db[0][\"xp\"] if xp_db else 0,\n\t\t\t# If a db entry exists, return the xp that contains,\n\t\t\t# if not, return 0\n\t\t\t\"level\": self.xp_level(\n\t\t\t\txp_db[0][\"xp\"] if xp_db else 0, member\n\t\t\t)\n\t\t\t# Automatically generate the level using the same method\n\t\t\t# and return that also\n\t\t}\n\t\n\tdef add_qotd(self, question, thought, fact, guild, author):\n\t\t# Adds the information for a QOTD to the database\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tif not isinstance(author, str):\n\t\t\t# Ensure author is a string\n\t\t\tauthor = str(author)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"qotd\")\n\t\t\t.insert(\n\t\t\t\t# Insert QOTD dictionary into \"qotd\" table\n\t\t\t\t{\n\t\t\t\t\t\"question\": question,\n\t\t\t\t\t\"thought\": thought,\n\t\t\t\t\t\"fact\": fact,\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"author\": author\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t)\n\t\n\tdef get_qotd(self, guild):\n\t\t# Returns all qotd stored for the given guild\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"qotd\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t# Return all entries in \"qotd\" table matching the guild id\n\t\t\t.run(self.connection)\n\t\t) # Convert to a list for ease of use\n\t\n\tdef remove_qotd(self, qotd_id):\n\t\t# Removes the QOTD of given id from the database\n\t\t\n\t\t(\n\t\t\tself.get_table(\"qotd\")\n\t\t\t.get(qotd_id)\n\t\t\t.delete()\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef get_qotd_channel(self, guild):\n\t\t# Returns the id of the QOTD output channel for the given\n\t\t# guild, or None if not found\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\n\t\tconfig = list(\n\t\t\tself.get_table(\"guilds\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\t\n\t\tif len(config) == 0:\n\t\t\treturn None\n\t\t# Not really sure how this could happen,\n\t\t# A config entry should be created on the message event\n\t\t# before a command is ran if no prefix entry exists\n\t\t\n\t\tconfig = config[0]\n\t\t\n\t\tif \"qotd_channel\" in config.keys():\n\t\t\treturn config[\"qotd_channel\"]\n\t\t\n\t\treturn None\n\t\n\tdef set_qotd_channel(self, guild, channel):\n\t\t# Sets the QOTD output channel for the given guild\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\t\n\t\tif not isinstance(channel, str):\n\t\t\t# Ensure channel is a string\n\t\t\tchannel = str(channel)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"guilds\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": str(guild)\n\t\t\t\t}\n\t\t\t)\n\t\t\t.update(\n\t\t\t\t{\n\t\t\t\t\t\"qotd_channel\": str(channel)\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef set_qotd_role(self, guild, role):\n\t\t# Sets the role which will be mentioned when a QOTD is sent\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\t\n\t\tif not isinstance(role, str):\n\t\t\t# Ensure role is a string:\n\t\t\trole = str(role)\n\t\t\t\n\t\t(\n\t\t\tself.get_table(\"guilds\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": str(guild)\n\t\t\t\t}\n\t\t\t)\n\t\t\t.update(\n\t\t\t\t{\n\t\t\t\t\t\"qotd_role\": str(role)\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef get_qotd_role(self, guild):\n\t\t# Returns the role to be mentioned when a QOTD is sent,\n\t\t# should it exist else None\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\t\n\t\tconfig = list(\n\t\t\tself.get_table(\"guilds\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\t\n\t\tif len(config) == 0:\n\t\t\treturn None\n\t\t# Not really sure how this could happen,\n\t\t# A config entry should be created on the message event\n\t\t# before a command is ran if no prefix entry exists\n\t\t\n\t\tconfig = config[0]\n\t\t\n\t\tif \"qotd_role\" in config.keys():\n\t\t\treturn config[\"qotd_role\"]\n\t\t\n\t\treturn None\n\t\n\tdef get_all_qotd(self, guild):\n\t\t# Returns a list of all the QOTD for the guild in dictionary\n\t\t# form\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"qotd\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef qotd_exists(self, qotd, guild):\n\t\t# Checks a QOTD entry exists with the id {qotd} and it is\n\t\t# \"owned\" by the correct guild\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tres = (\n\t\t\tself.get_table(\"qotd\")\n\t\t\t.get(qotd)\n\t\t\t.run(self.connection)\n\t\t)\n\t\t\n\t\treturn res and res[\"guild\"] == guild\n\t\n\tdef get_filters(self, guild, channel=\"\"):\n\t\t# Returns all filters for the given guild, and channel if\n\t\t# specified\n\t\t\n\t\tmatch = {\n\t\t\t\"guild\": str(guild),\n\t\t\t\"type\": \"text\"\n\t\t}\n\t\t\n\t\tif channel:\n\t\t\tmatch[\"channel\"] = str(channel)\n\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"filters\")\n\t\t\t.filter(\n\t\t\t\tmatch\n\t\t\t)\n\t\t\t.run(\n\t\t\t\tself.connection\n\t\t\t)\n\t\t)\n\t\n\tdef insert_punishment(self, **options):\n\t\t# Creates a new punishment entry\n\t\t\n\t\tself.get_table(\"punishments\").insert(\n\t\t\toptions\n\t\t).run(self.connection)\n\t\n\tdef get_img_filters(self, guild):\n\t\t# Returns all image filters stored for the given guild as a list\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"filters\").filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"type\": \"image\"\n\t\t\t\t}\n\t\t\t).run(\n\t\t\t\tself.connection\n\t\t\t)\n\t\t)\n\t\n\tdef add_text_filter(self, punish, regex, channel, reason, guild):\n\t\t# Adds a text filter (Regular Expression) to the given guild\n\t\t# and channel\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\tif not isinstance(channel, str):\n\t\t\t# Ensure channel is a string\n\t\t\tchannel = str(channel)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"filters\")\n\t\t\t.insert(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"action\": punish,\n\t\t\t\t\t\"filter\": regex,\n\t\t\t\t\t\"channel\": channel,\n\t\t\t\t\t\"reason\": reason,\n\t\t\t\t\t\"type\": \"text\"\n\t\t\t\t}\n\t\t\t).run(self.connection)\n\t\t)\n\t\n\tdef filter_exists(self, guild, f_id, f_type):\n\t\t# Returns True if the filter being searched for exists,\n\t\t# is of the correct type and for the correct guild else False\n\t\t\n\t\treturn len(\n\t\t\tlist(\n\t\t\t\tself.get_table(\"filters\")\n\t\t\t\t.filter(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"guild\": (\n\t\t\t\t\t\t\tguild if isinstance(guild, str) else\n\t\t\t\t\t\t\tstr(guild)\n\t\t\t\t\t\t),\n\t\t\t\t\t\t\"id\": f_id,\n\t\t\t\t\t\t\"type\": f_type\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\t.run(self.connection)\n\t\t\t)\n\t\t) > 0\n\t\n\tdef insert_img_filter(\n\t\t\tself, guild, img, ignore_colour, reason,\n\t\t\tname, punishment\n\t\t):\n\t\t# Inserts an image filter to the database (b64 encoded\n\t\t# representation of image)\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"filters\")\n\t\t\t.insert(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"type\": \"image\",\n\t\t\t\t\t\"img\": img,\n\t\t\t\t\t\"ignore_colour\": ignore_colour,\n\t\t\t\t\t\"reason\": reason,\n\t\t\t\t\t\"name\": name,\n\t\t\t\t\t\"action\": punishment\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef img_filter_exists(self, guild, name):\n\t\t# Return True if there is one or more entry matching this\n\t\t# name and guild\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\t\t\n\t\treturn len(\n\t\t\tlist(\n\t\t\t\tself.get_table(\"filters\")\n\t\t\t\t.filter(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"guild\": str(guild),\n\t\t\t\t\t\t\"type\": \"image\",\n\t\t\t\t\t\t\"name\": name\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\t.run(self.connection)\n\t\t\t)\n\t\t) > 0\n\t\n\tdef get_img_filter(self, guild, f_id):\n\t\t# Returns the image filter matching guild and id\n\t\t\n\t\tif not isinstance(guild, str):\n\t\t\t# Ensure guild is a string\n\t\t\tguild = str(guild)\n\n\t\treturn list(\n\t\t\tself.get_table(\"filters\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": str(guild),\n\t\t\t\t\t\"type\": \"image\",\n\t\t\t\t\t\"id\": f_id\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\n\tdef add_custom_command(self, guild, name, description, body):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"customs\")\n\t\t\t.insert(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"name\": name.lower(),\n\t\t\t\t\t\"description\": description,\n\t\t\t\t\t\"body\": body\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\n\tdef get_customs(self, guild):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"customs\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef remove_custom(self, name, guild):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"customs\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"name\": name.lower(),\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.delete()\n\t\t\t.run(self.connection)\n\t\t)\n\n\tdef custom_command_exists(self, name, guild):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\n\t\treturn len(\n\t\t\tlist(\n\t\t\t\tself.get_table(\"customs\")\n\t\t\t\t.filter(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"name\": name.lower(),\n\t\t\t\t\t\t\"guild\": guild\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\t.run(self.connection)\n\t\t\t)\n\t\t) > 0\n\n\t@staticmethod\n\tdef get(iterable, key):\n\t\tfor i in iterable:\n\t\t\tif key(i):\n\t\t\t\treturn i\n\t\treturn None\n\n\tdef get_guild_currency(self, guild):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\n\t\treturn list(\n\t\t\tself.get_table(\"currency\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\n\tdef get_currency(self, guild, member):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\t\n\t\tif not isinstance(member, str):\n\t\t\tmember = str(member)\n\t\t\t\n\t\tguild_cur = self.get_guild_currency(guild)\n\t\t\n\t\tmember_cur = self.get(\n\t\t\tguild_cur,\n\t\t\tlambda m: m[\"member\"] == member\n\t\t)\n\t\t\n\t\tif not guild_cur or not member_cur:\n\t\t\tblank = {\n\t\t\t\t\"guild\": guild,\n\t\t\t\t\"member\": member,\n\t\t\t\t\"cur\": 0,\n\t\t\t\t\"gamblingSuspended\": False\n\t\t\t}\n\t\t\t\n\t\t\treturn (\n\t\t\t\tself.get_table(\"currency\")\n\t\t\t\t.insert(\n\t\t\t\t\tblank,\n\t\t\t\t\treturn_changes=True\n\t\t\t\t)\n\t\t\t\t.run(self.connection)\n\t\t\t)[\"changes\"][0][\"new_val\"]\n\t\t\t\n\t\treturn member_cur\n\n\tdef update_member_cur(self, guild, member, new_amount):\n\t\told_value = self.get_currency(guild, member)\n\t\t\n\t\t(\n\t\t\tself.get_table(\"currency\")\n\t\t\t.get(old_value[\"id\"])\n\t\t\t.update(\n\t\t\t\t{\n\t\t\t\t\t\"cur\": new_amount\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n\t\t\n\tdef is_gambling_suspended(self, guild, member):\n\t\treturn (\n\t\t\tself.get_currency(guild, member)\n\t\t\t.get(\"gamblingSuspended\", False)\n\t\t)\n\n\tdef get_shop_items(self, guild, item_type):\n\t\tif not isinstance(guild, str):\n\t\t\tguild = str(guild)\n\t\t\t\n\t\treturn list(\n\t\t\tself.get_table(\"shop\")\n\t\t\t.filter(\n\t\t\t\t{\n\t\t\t\t\t\"guild\": guild,\n\t\t\t\t\t\"type\": item_type.lower()\n\t\t\t\t}\n\t\t\t)\n\t\t\t.run(self.connection)\n\t\t)\n","sub_path":"Utils/DataBaseManager.py","file_name":"DataBaseManager.py","file_ext":"py","file_size_in_byte":16341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312382296","text":"import unittest\nimport logging\nfrom nba_percolator import Percolator\n\nclass CreateTestCase(unittest.TestCase):\n\n source = 'specimen'\n config = {\n 'elastic': {\n 'host': 'elasticsearch'\n },\n 'paths': {\n 'incoming': '/shared-data/incoming',\n 'processed': '/shared-data/processed',\n 'jobs': '/shared-data/jobs',\n 'failed': '/shared-data/failed',\n 'done': '/shared-data/done',\n 'delta': '/shared-data/incremental'\n },\n 'sources':\n {\n 'specimen':\n {\n 'table': 'testspecimen',\n 'id': 'id',\n 'enrich': True,\n 'code': 'XC',\n 'incremental': False,\n 'path': '/shared-data/test'\n }\n },\n 'postgres':\n {\n 'host': 'postgres',\n 'user': 'postgres',\n 'pass': 'postgres',\n 'db': 'ppdb'\n }\n }\n\n def __init__(self, *args, **kwargs):\n super(CreateTestCase, self).__init__(*args, **kwargs)\n logger = logging.getLogger('percolator')\n logger.setLevel(logging.ERROR)\n self.pp = Percolator(config=self.config)\n self.pp.set_source(self.source)\n try:\n self.pp.generate_mapping(create_tables=True)\n except:\n pass\n\n def test_create_delete_record(self):\n recordID = \"test123\"\n status = 'REJECTED'\n deleteRecord = self.pp.create_delete_record(self.source, recordID, status)\n\n self.assertIsInstance(deleteRecord, dict)\n self.assertIsNotNone(deleteRecord.get('sourceSystemCode'))\n self.assertEqual(deleteRecord.get('unitID'), recordID)\n self.assertEqual(deleteRecord.get('status'), status)\n\n def test_create_name_summary(self):\n vernacularName = {\n 'test': False,\n 'name': 'vernacularName',\n 'language': 'NL',\n 'other': 'not important',\n 'forgetit': 'removed'\n }\n\n nameSummary = self.pp.create_name_summary(vernacularName)\n self.assertIsInstance(nameSummary, dict)\n self.assertEqual(nameSummary.get('name'), vernacularName.get('name'))\n self.assertEqual(nameSummary.get('language'), vernacularName.get('language'))\n self.assertIsNone(nameSummary.get('other'))\n\n def test_create_scientific_summary(self):\n scientificName = {\n 'test': False,\n 'other': 'not important',\n 'forgetit': 'removed',\n 'fullScientificName': 'fullScientificName',\n 'taxonomicStatus': 'taxonomicStatus',\n 'genusOrMonomial': 'genusOrMonomial',\n 'subgenus': 'subgenus',\n 'specificEpithet': 'specificEpithet',\n 'infraspecificEpithet': 'infraspecificEpithet',\n 'authorshipVerbatim': 'authorshipVerbatim'\n }\n\n scientificSummary = self.pp.create_scientific_summary(scientificName)\n self.assertIsInstance(scientificSummary, dict)\n self.assertEqual(scientificSummary.get('fullScientificName'), scientificName.get('fullScientificName'))\n self.assertEqual(scientificSummary.get('subgenus'), scientificName.get('subgenus'))\n self.assertIsNone(scientificSummary.get('other'))\n\n def test_create_enrichment(self):\n vernacularName = {\n 'test': False,\n 'name': 'vernacularName',\n 'language': 'NL',\n 'other': 'not important',\n 'forgetit': 'removed'\n }\n scientificName = {\n 'test': False,\n 'other': 'not important',\n 'forgetit': 'removed',\n 'fullScientificName': 'fullScientificName',\n 'taxonomicStatus': 'taxonomicStatus',\n 'genusOrMonomial': 'genusOrMonomial',\n 'subgenus': 'subgenus',\n 'specificEpithet': 'specificEpithet',\n 'infraspecificEpithet': 'infraspecificEpithet',\n 'authorshipVerbatim': 'authorshipVerbatim'\n }\n rec = {\n 'id': 'TEST123',\n 'sourceSystem': {\n 'code': 'TEST'\n },\n 'acceptedName': {\n 'scientificNameGroup': 'scientificNameGroup'\n },\n 'defaultClassification': 'test',\n 'vernacularNames': [vernacularName],\n 'synonyms': [scientificName]\n }\n enrichment = self.pp.create_enrichment(rec, 'test')\n\n self.assertIsInstance(enrichment, dict)\n self.assertIsNotNone(enrichment.get('taxonId'))\n self.assertIsNotNone(enrichment.get('synonyms'))\n self.assertIsNotNone(enrichment.get('sourceSystem'))\n self.assertIsNotNone(enrichment.get('sourceSystem').get('code'))\n self.assertIsNone(enrichment.get('defaultClassification'))\n\n def test_create_col_enrichment(self):\n vernacularName = {\n 'test': False,\n 'name': 'vernacularName',\n 'language': 'NL',\n 'other': 'not important',\n 'forgetit': 'removed'\n }\n scientificName = {\n 'test': False,\n 'other': 'not important',\n 'forgetit': 'removed',\n 'fullScientificName': 'fullScientificName',\n 'taxonomicStatus': 'taxonomicStatus',\n 'genusOrMonomial': 'genusOrMonomial',\n 'subgenus': 'subgenus',\n 'specificEpithet': 'specificEpithet',\n 'infraspecificEpithet': 'infraspecificEpithet',\n 'authorshipVerbatim': 'authorshipVerbatim'\n }\n rec = {\n 'id': 'TEST123',\n 'sourceSystem': {\n 'code': 'COL'\n },\n 'acceptedName': {\n 'scientificNameGroup': 'scientificNameGroup'\n },\n 'defaultClassification': 'test',\n 'vernacularNames': [vernacularName],\n 'synonyms': [scientificName]\n }\n enrichment = self.pp.create_enrichment(rec, 'test')\n self.assertIsNotNone(enrichment.get('defaultClassification'))\n\n def test_cache_taxon(self):\n systemCode = 'XC'\n rec = [{\n 'acceptedName': {\n 'scientificNameGroup': 'TEST'\n },\n 'id': 'test123'\n }]\n\n self.pp.cache_taxon_record(rec, systemCode)\n\n taxon = self.pp.get_taxon('TEST', self.source)\n self.assertIsNotNone(taxon)\n self.assertIsInstance(taxon, dict)\n self.assertEqual(taxon.get('id'), rec.get('id'))\n\n","sub_path":"tests/create_test.py","file_name":"create_test.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571792920","text":"#!/usr/bin/env python\n\"\"\"Example: 2-4\nContrast with listcomp_tshirts Ex 2-4\"\"\"\n\ncolors = ['black', 'white']\nsizes = ['S', 'M', 'L']\n\n# listcomp:\n# tshirts = [(color, size) for color in colors for size in sizes]\n\nfor tshirt in ('%s %s' % (c, s) for c in colors for s in sizes):\n print(tshirt)\n","sub_path":"02_04_genexps_tshirts.py","file_name":"02_04_genexps_tshirts.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374244418","text":"# Python 3.7.4 64-bit | Qt 5.9.6 | PyQt5 5.9.2 | Windows 10\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Kanak Choudhury\r\n\"\"\"\r\n\r\npath = 'Kanak_lab1\\\\';\r\n\r\nimport numpy as np\r\nimport re\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# function to classify new data\r\n\r\ndef classify(indx, total_class, vocab_len, dt, prob, condi_prob):\r\n a1 = np.zeros([1,vocab_len]);\r\n a2 = np.where(dt[:,0]==indx+1)[0];\r\n a1[0,dt[a2,1]-1] = dt[a2,2];\r\n res = np.argmax(np.transpose(np.log(prob)) + sum(\r\n np.transpose(a1*np.log(np.transpose(condi_prob)))))+1;\r\n return int(res);\r\n\r\n# data reading\r\n\r\nprint(\" Reading Data...\")\r\ntr = open(path+'train_data.csv', 'r').read().splitlines();\r\ndim_tr = np.shape(tr)[0];\r\n\r\nts = open(path+'test_data.csv', 'r').read().splitlines();\r\ndim_ts = np.shape(ts)[0];\r\n\r\ntr_lb = open(path+'train_label.csv', 'r').read().splitlines();\r\ndim_tr_lb = np.shape(tr_lb)[0];\r\n\r\nts_lb = open(path+'test_label.csv', 'r').read().splitlines();\r\ndim_ts_lb = np.shape(ts_lb)[0];\r\n\r\nvocab = open(path+'vocabulary.txt', 'r').read().splitlines();\r\ndim_vocab = np.shape(vocab)[0];\r\n\r\nmaxdim = max(dim_tr, dim_ts, dim_tr_lb, dim_ts_lb)\r\n\r\n\r\ntr_dt=np.zeros([dim_tr,3]);\r\nts_dt=np.zeros([dim_ts,3]);\r\ntr_dt_lb=np.zeros([dim_tr_lb]);\r\nts_dt_lb=np.zeros([dim_ts_lb]);\r\n\r\nfor i in range(0,dim_tr):\r\n if i < dim_tr:\r\n vec_tr = re.split(r'\\W+', tr[i])\r\n tr_dt[i,:]=vec_tr;\r\n\r\n if i < dim_ts:\r\n vec_ts = re.split(r'\\W+', ts[i])\r\n ts_dt[i,:]=vec_ts;\r\n\r\n if i < dim_tr_lb:\r\n vec_tr_lb = re.split(r'\\W+', tr_lb[i])\r\n tr_dt_lb[i]=vec_tr_lb[0];\r\n\r\n if i < dim_ts_lb:\r\n vec_ts_lb = re.split(r'\\W+', ts_lb[i])\r\n ts_dt_lb[i]=vec_ts_lb[0];\r\ntr_dt = tr_dt.astype(int)\r\nts_dt = ts_dt.astype(int)\r\ntr_dt_lb = tr_dt_lb.astype(int)\r\nts_dt_lb = ts_dt_lb.astype(int)\r\n\r\n\r\ndel i, tr, vec_tr\r\ndel ts, vec_ts\r\ndel tr_lb, vec_tr_lb\r\ndel ts_lb, vec_ts_lb\r\n\r\n\r\n# Prior Probability\r\nomega = open(path+'map.csv', 'r').read().splitlines();\r\ndim_omega = np.shape(omega)[0]\r\n\r\nnclass_tr=[tr_dt_lb.tolist().count(i+1) for i in range(0,dim_omega)]\r\n\r\npriorl_prob = [nclass_tr[i]/dim_tr_lb for i in range(0,dim_omega)];\r\n\r\nnclass_ts=[ts_dt_lb.tolist().count(i+1) for i in range(0,dim_omega)]\r\n\r\nfor i in range(0,dim_omega):\r\n print(\" P(omega = %2d) = %.4f\" %(i+1,priorl_prob[i]))\r\n\r\ncond_prob = np.zeros([dim_vocab,dim_omega]);\r\ncont_vec = np.zeros([dim_vocab,dim_omega]);\r\n\r\n\r\n# Conditional Probability\r\nprint(\"Conditional Probablities...\")\r\nfor j in range(0,dim_omega):\r\n b1 = np.where(tr_dt_lb == j+1)[0];\r\n for i in range(0,dim_tr):\r\n if tr_dt[i,0] in b1:\r\n cont_vec[tr_dt[i,1]-1,j] = cont_vec[tr_dt[i,1]-1,j] + tr_dt[i,2];\r\n total_count = sum(cont_vec[:,j]);\r\n cond_prob[:,j] = (cont_vec[:,j] + 1)/(total_count + dim_vocab);\r\n\r\ndel b1, total_count\r\n\r\n# traing Data Classification\r\nprint(\"\\n Classifying Train Data...\")\r\ntr_pred = np.zeros(dim_tr_lb);\r\nfor k in range(0,dim_tr_lb):\r\n tr_pred[k] = classify(k, dim_omega, dim_vocab, tr_dt, priorl_prob, cond_prob)\r\n\r\n\r\ntr_conf_mat = confusion_matrix(tr_dt_lb, tr_pred)\r\nprint(\" Confusion matrix for train data-\")\r\nprint(tr_conf_mat)\r\nprint(\"\\n Overall accuracy for train data = %.4f\" %(np.trace(tr_conf_mat)/dim_tr_lb))\r\nfor i in range(0,dim_omega):\r\n print(\" Class accuracy for group %.2d = %.4f\" %(i+1,tr_conf_mat[i,i]/nclass_tr[i]))\r\n\r\n\r\n# Test Data Classification\r\nprint(\"\\n Classifying Test Data...\")\r\nts_pred = np.zeros(dim_ts_lb);\r\nfor k in range(0,dim_ts_lb):\r\n ts_pred[k] = classify(k, dim_omega, dim_vocab, ts_dt, priorl_prob, cond_prob)\r\n\r\nts_conf_mat = confusion_matrix(ts_dt_lb, ts_pred)\r\nprint(\" Confusion matrix for test data-\")\r\nprint(ts_conf_mat)\r\nprint(\"\\n Overall accuracy for test data = %.4f\" %(np.trace(ts_conf_mat)/dim_ts_lb))\r\nfor i in range(0,dim_omega):\r\n print(\" Class accuracy for group %.2d = %.4f\" %(i+1,ts_conf_mat[i,i]/nclass_ts[i]))","sub_path":"Lab1/Kanak_NaiveBayes.py","file_name":"Kanak_NaiveBayes.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303869505","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn.functional as F\nimport math\nfrom utils.model_utils import *\nfrom utils.ri_utils import *\nfrom models.vrcnet import Linear_ResBlock\n\nclass local_decoder(nn.Module):\n def __init__(self, knn=32, dilation=2, mlp=[64, 128, 128], mlp_merge=[512, 1024, 1024], input_channel=3):\n super(local_decoder, self).__init__()\n self.knn = knn\n self.dilation = dilation\n \n self.mlp1 = nn.Sequential(nn.Conv2d(input_channel, mlp[0], kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(mlp[0], mlp[1], kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(mlp[1], mlp[2], kernel_size=1))\n self.mlp2 = nn.Sequential(nn.Conv2d(input_channel, mlp[0], kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(mlp[0], mlp[1], kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(mlp[1], mlp[2], kernel_size=1))\n self.mlp_merge = nn.Sequential(nn.Conv1d(2*mlp[2], mlp_merge[0], kernel_size=1),\n nn.ReLU(),\n nn.Conv1d(mlp_merge[0], mlp_merge[1], kernel_size=1),\n nn.ReLU(),\n nn.Conv1d(mlp_merge[1], mlp_merge[2], kernel_size=1))\n\n def forward(self, points, features=None):\n batch_size, _, num_points = points.size()\n \n if features is None:\n points_knn = get_edge_features(points, knn(points, self.knn))\n points_knn = points_knn - points.unsqueeze(2).repeat(1,1,self.knn,1)\n \n points_knn_d = get_edge_features(points, dilated_knn(points, self.knn, self.dilation))\n points_knn_d = points_knn_d - points.unsqueeze(2).repeat(1,1,self.knn,1)\n \n a1, a2, a3, points_knn_d = acenn_rir_feature(points_knn_d.transpose(1,3).contiguous(),\n points.transpose(1,2).contiguous())\n _, _, _, points_knn = point_projection_feature(points_knn.transpose(1,3).contiguous(), a1, a2, a3)\n \n points_knn = points_knn.transpose(1,3).contiguous()\n points_knn_d = points_knn_d.transpose(1,3).contiguous()\n else:\n points_knn = get_edge_features(features, knn(points, self.knn))\n points_knn_d = get_edge_features(features, dilated_knn(points, self.knn, self.dilation))\n \n points_knn = self.mlp1(points_knn)\n points_knn_d = self.mlp2(points_knn_d)\n \n local_feature_knn, _ = torch.max(points_knn, 2)\n local_feature_knn_d, _ = torch.max(points_knn_d, 2)\n\n local_feature = torch.cat((local_feature_knn, local_feature_knn_d), 1)\n local_feature = self.mlp_merge(local_feature)\n return local_feature\n \n \nclass RIEncoder(nn.Module):\n def __init__(self, input_size=3, d_model=1024, num_coarse=1024, n_head=1, dropout=0.0):\n super(RIEncoder, self).__init__()\n self.num_coarse = num_coarse\n self.d_model = d_model\n self.n_head = n_head\n self.dropout = dropout\n self.conv1 = nn.Conv1d(input_size, 128, 1)\n self.conv2 = nn.Conv1d(128, 256, 1)\n self.conv3 = nn.Conv1d(512, 512, 1)\n self.conv4 = nn.Conv1d(512, d_model, 1)\n \n self.fc_qkv = nn.Linear(d_model, d_model * 3)\n self.attn = nn.MultiheadAttention(self.d_model, self.n_head, self.dropout)\n self.ln = nn.LayerNorm(d_model)\n self.fcs1 = nn.Linear(d_model, d_model)\n self.fcs2 = nn.Linear(d_model, 1)\n \n self.fc1 = nn.Linear(d_model, d_model)\n self.fc2 = nn.Linear(d_model, d_model)\n self.fc3 = nn.Linear(d_model, self.num_coarse * 3)\n\n def forward(self, points):\n batch_size, _, num_points = points.size()\n div_batch_size = batch_size // 8\n x = F.relu(self.conv1(points))\n x = self.conv2(x)\n global_feature, _ = torch.max(x, 2)\n \n x = torch.cat((x, global_feature.view(batch_size, -1, 1).repeat(1, 1, num_points).contiguous()), 1)\n x = F.relu(self.conv3(x))\n x = self.conv4(x)\n global_feature, _ = torch.max(x, 2)\n global_feature = global_feature.view(batch_size, -1)\n global_feature_tup = global_feature.chunk(8, dim=0)\n global_feature = torch.stack(global_feature_tup)\n \n qkv = self.fc_qkv(global_feature)\n q, k, v = torch.split(qkv, self.d_model, dim=2)\n multi_attn, _ = self.attn(q, k, v)\n sel = self.ln(multi_attn + global_feature)\n sel = F.softmax(self.fcs2(self.fcs1(sel)))\n global_feature = (sel * global_feature).sum(dim=0, keepdim=False)\n _, inx = sel.squeeze().max(dim=0, keepdim=False)\n \n coarse = F.relu(self.fc1(global_feature))\n coarse = F.relu(self.fc2(coarse))\n coarse = self.fc3(coarse).view(-1, 3, self.num_coarse)\n \n return global_feature, coarse, inx\n\n\nclass RIDecoder(nn.Module):\n def __init__(self, num_coarse, num_fine, scale, global_feature_size, local_feature_size, k=32, d=2):\n super(RIDecoder, self).__init__()\n self.num_coarse = num_coarse\n self.num_fine = num_fine\n self.local_feature_size = local_feature_size\n self.cat_feature_num = 2 + 3 + global_feature_size + local_feature_size\n\n self.scale = scale\n self.grid = gen_grid_up(2 ** (int(math.log2(scale))), 0.05).cuda().contiguous()\n self.conv1 = nn.Conv1d(self.cat_feature_num, 512, 1)\n self.conv2 = nn.Conv1d(512, 512, 1)\n self.conv3 = nn.Conv1d(512, 3, 1)\n \n self.decoder = local_decoder(k, d)\n\n def forward(self, global_feat, sampled_points):\n batch_size = sampled_points.size()[0]\n local_feat = self.decoder(sampled_points)\n\n grid = self.grid.clone().detach()\n grid_feat = grid.unsqueeze(0).repeat(batch_size, 1, self.num_coarse).contiguous().cuda()\n \n local_feat = torch.cat((sampled_points, local_feat), 1)\n\n point_feat = ((local_feat.transpose(1, 2).contiguous()).unsqueeze(2).repeat(1, 1, self.scale, 1).view(-1, self.num_fine, 3+self.local_feature_size)).transpose(1, 2).contiguous()\n\n global_feat = global_feat.unsqueeze(2).repeat(1, 1, self.num_fine)\n\n feat = torch.cat((grid_feat, point_feat, global_feat), 1)\n\n center = ((sampled_points.transpose(1, 2).contiguous()).unsqueeze(2).repeat(1, 1, self.scale, 1).view(-1, self.num_fine, 3)).transpose(1, 2).contiguous()\n\n fine = self.conv3(F.relu(self.conv2(F.relu(self.conv1(feat))))) + center\n return fine\n \n\nclass Model(nn.Module):\n def __init__(self, args, global_feature_size=1024, local_feature_size=1024, k=32, d=2):\n super(Model, self).__init__()\n\n self.input_size = args.input_size\n self.num_coarse = args.num_coarse\n self.num_points = args.num_points\n self.train_loss = args.loss\n self.scale = self.num_points // self.num_coarse\n\n self.encoder = RIEncoder(3, global_feature_size, self.num_coarse)\n self.decoder = RIDecoder(self.num_coarse, self.num_points, self.scale, global_feature_size, local_feature_size, k, d)\n\n def forward(self, x, gt, is_training=True, mean_feature=None, alpha=None):\n batch_size = x.size()[0]\n a1, a2, a3, x = point_projection_feature(x.transpose(1, 2).contiguous(), method='pca')\n x = x.transpose(1, 2).contiguous()\n org_point_input = x\n \n x0, x1, x2 = x.chunk(3, dim=1)\n cloud_1 = x\n #cloud_1 = torch.cat([x0, x1, x2], dim=1)\n cloud_2 = torch.cat([x0, x1, -x2], dim=1)\n cloud_3 = torch.cat([x0, -x1, x2], dim=1)\n cloud_4 = torch.cat([x0, -x1, -x2], dim=1)\n cloud_5 = torch.cat([-x0, x1, x2], dim=1)\n cloud_6 = torch.cat([-x0, x1, -x2], dim=1)\n cloud_7 = torch.cat([-x0, -x1, x2], dim=1)\n cloud_8 = torch.cat([-x0, -x1, -x2], dim=1)\n points = torch.cat([cloud_1, cloud_2, cloud_3, cloud_4,\n cloud_5, cloud_6, cloud_7, cloud_8], dim=0)\n \n feat, out1, inx = self.encoder(points)\n \n points = torch.cat((out1, org_point_input), 2)\n _, sampled_points = furthest_point_sampling(points.transpose(1,2).contiguous(), \n self.num_coarse)\n sampled_points = sampled_points.transpose(1,2).contiguous()\n \n out2 = self.decoder(feat, sampled_points)\n \n for i in range(batch_size):\n if inx[i] >= 4:\n a1[i] *= -1\n if inx[i] % 4 >= 2:\n a2[i] *= -1\n if inx[i] % 2 == 1:\n a3[i] *= -1\n \n out1 = inverse_point_projection_feature(a1, a2, a3, out1.transpose(1,2).contiguous())\n out2 = inverse_point_projection_feature(a1, a2, a3, out2.transpose(1,2).contiguous())\n\n if is_training:\n if self.train_loss == 'emd':\n loss1 = calc_emd(out1, gt)\n loss2 = calc_emd(out2, gt)\n elif self.train_loss == 'cd':\n loss1, _ = calc_cd(out1, gt)\n loss2, _ = calc_cd(out2, gt)\n else:\n raise NotImplementedError('Train loss is either CD or EMD!')\n\n total_train_loss = loss1.mean() + loss2.mean() * alpha\n return out2, loss2, total_train_loss\n else:\n #emd = calc_emd(out2, gt, eps=0.004, iterations=3000)\n cd_p, cd_t, f1 = calc_cd(out2, gt, calc_f1=True)\n return {'out1': out1, 'out2': out2, 'cd_p': cd_p, 'cd_t': cd_t, 'f1': f1}\n \n ","sub_path":"models/rinet_v2.py","file_name":"rinet_v2.py","file_ext":"py","file_size_in_byte":9904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224097146","text":"# Copyright 2016 ClusterHQ Inc. See LICENSE file for details.\n\"\"\"\nOperation to create a container.\n\"\"\"\n\nfrom functools import partial\nfrom datetime import timedelta\nfrom uuid import UUID, uuid4\n\nfrom pyrsistent import PClass, field\nfrom zope.interface import implementer\n\nfrom flocker.apiclient import MountedDataset\nfrom flocker.common import gather_deferreds, loop_until, timeout as _timeout\nfrom flocker.control import DockerImage\n\nfrom benchmark._interfaces import IProbe, IOperation\nfrom benchmark.operations._common import select_node\n\n\nDEFAULT_TIMEOUT = timedelta(minutes=10)\n\n\ndef loop_until_state_found(reactor, get_states, state_matches, timeout):\n \"\"\"\n Loop until a state has been reached.\n\n :param get_states: Callable returning a Deferred firing with a list\n of states.\n :param state_matches: Callable that accepts a state parameter, and\n returns a boolean indicating whether the state matches.\n :param timedelta timeout: Maximum time to wait for state to be found.\n :return Deferred[Any]: The matching state.\n \"\"\"\n def state_reached():\n d = get_states()\n\n def find_match(states):\n for state in states:\n if state_matches(state):\n return state\n return None\n d.addCallback(find_match)\n\n return d\n\n d = loop_until(reactor, state_reached)\n _timeout(reactor, d, timeout.total_seconds())\n return d\n\n\ndef create_dataset(\n reactor, control_service, node_uuid, dataset_id, volume_size,\n timeout=DEFAULT_TIMEOUT\n):\n \"\"\"\n Create a dataset, then wait for it to be mounted.\n\n :param IReactorTime reactor: Twisted Reactor.\n :param IFlockerAPIV1Client control_service: Benchmark control\n service.\n :param UUID node_uuid: Node on which to create dataset.\n :param UUID dataset_id: ID for created dataset.\n :param int volume_size: Size of volume in bytes.\n :param timedelta timeout: Maximum time to wait for dataset to be\n mounted.\n :return Deferred[DatasetState]: The state of the created dataset.\n \"\"\"\n\n d = control_service.create_dataset(\n primary=node_uuid,\n maximum_size=volume_size,\n dataset_id=dataset_id,\n )\n\n def dataset_matches(dataset, state):\n return (\n state.dataset_id == dataset.dataset_id and\n state.primary == dataset.primary and\n state.path is not None\n )\n\n d.addCallback(\n lambda dataset: loop_until_state_found(\n reactor, control_service.list_datasets_state,\n partial(dataset_matches, dataset), timeout\n )\n )\n\n return d\n\n\ndef create_container(\n reactor, control_service, node_uuid, name, image, volumes=None,\n timeout=DEFAULT_TIMEOUT\n):\n \"\"\"\n Create a container, then wait for it to be running.\n\n :param IReactorTime reactor: Twisted Reactor.\n :param IFlockerAPIV1Client control_service: Benchmark control\n service.\n :param UUID node_uuid: Node on which to start the container.\n :param unicode name: Name of the container.\n :param DockerImage image: Docker image for the container.\n :param Optional[Sequence[MountedDataset]] volumes: Volumes to attach\n to the container.\n :param timedelta timeout: Maximum time to wait for container to be\n created.\n :return Deferred[ContainerState]: The state of the created container.\n \"\"\"\n\n d = control_service.create_container(node_uuid, name, image, volumes)\n\n def container_matches(container, state):\n return (\n container.name == state.name and\n container.node_uuid == state.node_uuid and\n state.running\n )\n\n d.addCallback(\n lambda container: loop_until_state_found(\n reactor, control_service.list_containers_state,\n partial(container_matches, container), timeout\n )\n )\n\n return d\n\n\ndef delete_container(reactor, control_service, container):\n \"\"\"\n Delete a container, then wait for it to be removed.\n\n :param IReactorTime reactor: Twisted Reactor.\n :param IFlockerAPIV1Client control_service: Benchmark control\n service.\n :param ContainerState container: Container to be removed.\n :return Deferred[ContainerState]: The state before removal.\n \"\"\"\n\n def container_removed(expected):\n \"\"\"\n Check whether a container has been removed (deleted and stopped).\n\n :param ContainerState expected: A container state to match against the\n results of ``list_containers_state``.\n :return Deferred[Optional[ContainerState]]: ``None`` if the\n ``expected`` container is found, or ``expected`` if it is not\n found.\n \"\"\"\n d = control_service.list_containers_state()\n\n def container_matches(inspecting, expected):\n return (\n expected.name == inspecting.name and\n expected.node_uuid == inspecting.node_uuid and\n inspecting.running\n )\n\n def no_running_match(existing_state):\n for state in existing_state:\n if container_matches(state, expected):\n return None\n return expected\n d.addCallback(no_running_match)\n return d\n\n d = control_service.delete_container(container.name)\n\n def loop_until_container_removed(_ignore):\n return loop_until(reactor, partial(container_removed, container))\n d.addCallback(loop_until_container_removed)\n\n return d\n\n\n@implementer(IProbe)\nclass CreateContainerProbe(PClass):\n \"\"\"\n Probe to create a container and wait for cluster to converge.\n \"\"\"\n\n reactor = field(mandatory=True)\n control_service = field(mandatory=True)\n node_uuid = field(type=UUID, mandatory=True)\n name = field(type=unicode, mandatory=True)\n image = field(mandatory=True)\n dataset_id = field(type=UUID, mandatory=True)\n mountpoint = field(type=unicode, mandatory=True)\n\n @classmethod\n def setup(\n cls, reactor, control_service, name, image, volume_size, mountpoint\n ):\n \"\"\"\n Create a probe.\n\n :param IReactorTime reactor: Twisted Reactor.\n :param IFlockerAPIV1Client control_service: Benchmark control service.\n :param unicode name: Name for created container.\n :param DockerImage image: Docker image for the container.\n :param int volume_size: Size of created volume, in bytes.\n :param unicode mountpoint: Mountpoint for created volume.\n :return: Deferred firing with a new probe.\n \"\"\"\n # Select an arbitrary node on which to create the container.\n d = control_service.list_nodes().addCallback(select_node)\n\n def parallel_setup(node):\n # Ensure the Docker image is cached by starting and stopping a\n # container.\n name = unicode(uuid4())\n container_setup = create_container(\n reactor, control_service, node.uuid, name, image\n )\n container_setup.addCallback(\n partial(delete_container, reactor, control_service)\n )\n\n # Create the dataset\n dataset_id = uuid4()\n dataset_setup = create_dataset(\n reactor, control_service, node.uuid, dataset_id, volume_size\n )\n\n d = gather_deferreds((container_setup, dataset_setup))\n\n # Return only the dataset state\n d.addCallback(lambda results: results[1])\n\n return d\n d.addCallback(parallel_setup)\n\n # Create the CreateContainerProbe instance.\n def create_probe(dataset_state):\n return cls(\n reactor=reactor,\n control_service=control_service,\n node_uuid=dataset_state.primary,\n name=name,\n image=image,\n dataset_id=dataset_state.dataset_id,\n mountpoint=mountpoint,\n )\n d.addCallback(create_probe)\n\n return d\n\n def run(self):\n \"\"\"\n Create a stateful container, and wait for it to be running.\n \"\"\"\n volumes = [\n MountedDataset(\n dataset_id=self.dataset_id, mountpoint=self.mountpoint\n )\n ]\n\n d = create_container(\n self.reactor, self.control_service, self.node_uuid, self.name,\n self.image, volumes\n )\n\n return d\n\n def cleanup(self):\n \"\"\"\n Delete the container and dataset created by the probe.\n \"\"\"\n d = self.control_service.delete_container(self.name)\n\n d.addCallback(\n lambda _ignore: self.control_service.delete_dataset(\n self.dataset_id\n )\n )\n\n return d\n\n\n@implementer(IOperation)\nclass CreateContainer(object):\n\n def __init__(\n self, reactor, cluster, image=u'clusterhq/mongodb', volume_size=None,\n mountpoint=u'/data'\n ):\n self.reactor = reactor\n self.control_service = cluster.get_control_service(reactor)\n self.image = DockerImage(repository=image)\n if volume_size is None:\n self.volume_size = cluster.default_volume_size()\n else:\n self.volume_size = volume_size\n self.mountpoint = mountpoint\n\n def get_probe(self):\n return CreateContainerProbe.setup(\n self.reactor,\n self.control_service,\n unicode(uuid4()),\n self.image,\n self.volume_size,\n self.mountpoint,\n )\n","sub_path":"benchmark/operations/create_container.py","file_name":"create_container.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"226197807","text":"#================================Params.py=====================================#\n# Created by Ciaran O'Hare 2019\n\n# Description:\n# This file just sets up some of the parameters that are used throughout the\n# project. and some classes that link things together.\n\n#==============================================================================#\n\nfrom __future__ import print_function\nfrom numpy import array, sqrt, pi, exp, interp, loadtxt, zeros, shape, ones\nfrom numpy import logspace, linspace, log10\nfrom scipy.special import erf, erfi\n\n\n# Constants\nm_p = 0.9315*1e6\nm_p_keV = 0.9315*1e6\nm_e = 511.0 # keV\nc_m = 2.99792458e8 # speed of light in m/s\nc_cm = c_m*100.0 # speed of light in cm/s\nc_km = c_m/1000.0 # speed of light in km/s\nGeV_2_kg = 1.0e6*1.783e-33 # convert GeV to kg\nalph = 1.0/137.0 # fine structure constant\nm_p_kg = 1.660538782e-27 # amu in kg\na0 = 0.268173 # Bohr radius keV^-1\nN_A = 6.02214e23 # Avocado's constant\nsinTheta_Wsq = 0.2387e0 # sin^2(Theta_W) weinberg angle\nG_F_GeV = 1.16637e-5 # GeV**-2 ! Fermi constan in GeV\nJan1 = 2458849.5 # January 1st 2020\nseconds2year = 365.25*3600*24\n\n\n#==============================================================================#\n# Set Nucleus params\nclass Atom:\n def __init__(self,xi,N,Z,J,Sp,Sn,fion,E_B_vals,E_gap, eps, Vfactor):\n self.IsotopicFraction = xi\n self.NumberOfNeutrons = N\n self.NumberOfProtons = Z\n self.MassNumber = N+Z\n self.NuclearSpin = J\n self.ExpProtonSpin = Sp\n self.ExpNeutronSpin = Sp\n if J>0.0:\n self.SDEnhancement = (4.0/3.0)*((J+1.0)/J)*(Sp-Sn)**2.0\n self.IonisationFormFactor = fion\n self.BindingEnergies = E_B_vals\n self.BandGapEnergy = E_gap\n self.ElectronHoleMeanEnergy = eps\n self.VCellFactor = Vfactor\n#==============================================================================#\n\n\n\n\n\n\n\n\n#==============================================================================#\n# Set parameters of halo models and streams\nclass Halo:\n def __init__(self,rho_0,v_LSR,sig_v,v_esc,v_pec,beta,eta):\n self.LocalDensity = rho_0\n self.RotationSpeed = v_LSR\n self.Dispersion = sig_v\n self.EscapeSpeed = v_esc\n self.PeculiarVelocity = v_pec\n self.Normalisation = erf(v_esc/(sqrt(2.0)*sig_v))-\\\n sqrt(2.0/pi)*(v_esc/sig_v)*\\\n exp(-v_esc**2.0/(2.0*sig_v**2.0))\n\n self.SausageEta = eta\n if eta>0.0:\n self.SausageBeta = beta\n sigr=sqrt(3*v_LSR**2.0/(2.0*(3-2.0*beta)))\n sigphi=sqrt(3*v_LSR**2.0*(1-beta)/(2.0*(3-2.0*beta)))\n sigz=sqrt(3*v_LSR**2.0*(1-beta)/(2.0*(3-2.0*beta)))\n self.SausageDispersionTensor = array([sigr,sigphi,sigz])\n self.Normalisation = erf(v_esc/(sqrt(2.0)*sigr)) \\\n - sqrt((1.0-beta)/beta)\\\n *exp(-v_esc**2.0/(2.0*sigphi**2.0))\\\n *erfi(v_esc/(sqrt(2)*sigr)*sqrt(beta/(1-beta)))\n\n# Standard Halo Model (old parameters)\nSHM = Halo(0.3,\n 220.0,\n 156.0,\n 544.0,\n array([11.1,12.2,7.3]),\n 0.0,\n 0.0)\n\n# Standard Halo Model++\nSHMpp = Halo(0.55,\n 233.0,\n 164.8,\n 528.0,\n array([11.1,12.2,7.3]),\n 0.9,\n 0.2)\n\n####\n\nclass Stream:\n def __init__(self,v1,v2,v3,sig1,sig2,sig3):\n self.Velocity = array([v1,v2,v2])\n self.Dispersion = array([sig1,sig2,sig3])\n\nS1stream = Stream(-29.6,-297.4,-72.8,82.6, 26.9, 58.5)\nS2stream = Stream(6.0, 166.7, -242.8,48.6, 13.5, 26.0)\n#S2stream_b = Stream(-70.9, 153.3, 161.5, 83.9, 29.6, 71.5)\n#==============================================================================#\n\n\n\n\n\n\n\n\n\n\n#==============================================================================#\n# Current number of neutrinos sources:\nn_nu_tot = 15\n# Neutrino files names:\nnufile_root = \".txt\"\nnufile_dir = \"../data/neutrinos/\"\nnuname = [\"\" for x in range(0,n_nu_tot)]\nnuname[0] = \"pp\"\nnuname[1] = \"pep\"\nnuname[2] = \"hep\"\nnuname[3] = \"7Be1\"\nnuname[4] = \"7Be2\"\nnuname[5] = \"8B\"\nnuname[6] = \"13N\"\nnuname[7] = \"15O\"\nnuname[8] = \"17F\"\nnuname[9] = \"DSNB\"\nnuname[10] = \"Atm\"\nnuname[11] = \"GeoU\"\nnuname[12] = \"GeoTh\"\nnuname[13] = \"GeoK\"\nnuname[14] = \"Reactor\"\nn_Enu_vals = 1000\n# Mark which neutrinos are monochromatic\nmono = zeros(n_nu_tot,dtype=bool)\nmono[[1,3,4]] = True\n\n# Set which neutrinos are Solar\nwhichsolar = zeros(n_nu_tot,dtype=bool)\nwhichsolar[0:8] = True\n\n# Neutrino max energies (MeV):\nNuMaxEnergy = array([0.42341,1.44,18.765,0.3843,0.8613,16.34,1.193,\\\n 1.7285,1.7365,91.201,981.75\n ,4.54,2.33,1.3572,\\\n 1.1418e1])\n\n# Neutrino fluxes (cm-2 s-1 MeV-1) and uncertainties (%):\n# (from Vinyoles et al (2017) Barcelona GS98 SSM)\nNuFlux = array([5.98e10,1.44e8,7.98e3,4.93e8,4.50e9,5.16e6,\\\n 2.78e8,2.05e8,5.29e6,85.7,10.54,\\\n 3808776.91874,3352686.94783,21639789.2056,\\\n 208537.673299])\nNuUnc = array([0.006, 0.01, 0.3,0.06, 0.06, 0.02, 0.15 ,\\\n 0.17 ,0.2 ,0.5, 0.25,\\\n 0.2,0.257,0.168,\\\n 0.08])\n\n# Collect neutrino parameters:\nclass Neutrinos:\n def __init__(self,n_nu,solar_label,energies,fluxes,\\\n normlisations,uncertainties):\n self.Flux = fluxes\n self.Energy = energies\n self.Uncertainties = uncertainties*normlisations\n self.Normalisations = normlisations\n self.NumberOfNeutrinos = n_nu\n self.SolarLabel = solar_label\n\n def RecoilDistribution(self,RD):\n self.RD = RD\n#==============================================================================#\n\n\n\n#==============================================================================#\n# Location class only has latitude and longitude at the moment\nclass Location:\n def __init__(self,lat,lon):\n self.Latitude = lat\n self.Longitude = lon\n\nBoulby = Location(54.5591,0.8310)\nGranSasso = Location(42.4691, 13.5654)\nKamioka = Location(36.2381, 137.1863)\nSNOlab = Location(46.4719, -81.1868)\nStawell = Location(-37.0576, 142.7754)\nOahu = Location(21.4389, -158.0001)\nGuantanamoBay = Location(20.0117, -75.1216)\nPyongyang = Location(39.0392, 125.7625)\n#------------------------------------------------------------------------------#\n","sub_path":"erec/Params.py","file_name":"Params.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"390697828","text":"from flask import redirect, render_template, url_for, abort, request\nfrom flask.views import MethodView\nfrom flask_login import current_user, login_required\nfrom flask_wtf import FlaskForm\nfrom wtforms.validators import InputRequired, Length, ValidationError\n\nfrom wtforms import SubmitField, HiddenField\nfrom app.models.information import Information\nfrom app.models.coupon import Coupon, COUPON_STATUS\nfrom app.models.order import Order, ORDER_STATUS\nfrom app.models.user import User\n\nfrom bson.objectid import ObjectId\nimport datetime\n\nclass CouponView(MethodView):\n def get(self):\n form = CouponForm()\n if request.args.get('status') == str(COUPON_STATUS[\"ALL\"]):\n coupons = list(Information.objects(user_id=current_user.id).first().coupon)\n for i in range(len(coupons) - 1, -1, -1):\n if coupons[i].status == COUPON_STATUS[\"EXPIRED\"] or Order.objects(buyer_id=current_user.id, coupon_id=coupons[i].id, status__ne=ORDER_STATUS[\"CANCEL\"]).first() != None:\n del coupons[i]\n status = COUPON_STATUS[\"ALL\"]\n else:\n coupons = Coupon.objects(id__nin=[coupon.id for coupon in Information.objects(user_id=current_user.id).first().coupon],\n begin_time__lte=datetime.datetime.utcnow()+datetime.timedelta(hours=8), status=COUPON_STATUS[\"ACTIVE\"])\n status = None\n\n return render_template('user/coupon/list.html', status=status, coupons=coupons, form=form, COUPON_STATUS=COUPON_STATUS)\n def post(self):\n form = CouponForm()\n\n if form.validate_on_submit():\n information = Information.objects(user_id=current_user.id).first()\n coupon = Coupon.objects(id=form.coupon_id.data).first()\n information.coupon.append(coupon)\n information.save()\n\n return redirect(url_for('user.coupon', status=COUPON_STATUS[\"ALL\"]))\n\n return self.get()\n\ndef validate_coupon(form, coupon_id):\n coupon = Coupon.objects(id=coupon_id.data, begin_time__lte=datetime.datetime.utcnow()+datetime.timedelta(hours=8), status=COUPON_STATUS[\"ACTIVE\"]).first()\n\n if coupon == None:\n raise ValidationError('此優惠券不存在')\n elif Information.objects(user_id=current_user.id, coupon__contain=coupon).first() != None:\n raise ValidationError('此優惠券已兌換')\n\nclass CouponForm(FlaskForm):\n coupon_id = HiddenField(\"\",validators=[InputRequired(), validate_coupon])\n submit = SubmitField('提交')","sub_path":"app/views/user/coupon/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"76982970","text":"from collections import defaultdict\n\nwith open(\"input\") as fp:\n lines = [line for line in fp]\n\n\ndef fIndex():\n index = {}\n for line in lines:\n parts = line.split()\n outer = \" \".join(parts[:2])\n i = 2\n inner = {}\n while i < len(parts):\n if parts[i].isdigit():\n inner[\" \".join(parts[i+1:i+3])] = int(parts[i])\n i += 3\n else:\n i += 1\n index[outer] = inner\n return index\n\n\ndef rIndex():\n forwardIndex = fIndex()\n reverseIndex = defaultdict(set)\n for k, v in forwardIndex.items():\n for bag in v.keys():\n reverseIndex[bag].add(k)\n\n return reverseIndex\n\n\ndef part1():\n output = set()\n reverseIndex = rIndex()\n\n def count(bag):\n bags = reverseIndex.get(bag)\n if not bags:\n return\n for b in bags:\n output.add(b)\n count(b)\n\n count(\"shiny gold\")\n print(len(output))\n\n\ndef part2():\n output = 0\n forwardIndex = fIndex()\n\n def count(bag):\n if not forwardIndex[bag]:\n return 0\n c = 0\n for k, v in forwardIndex.get(bag).items():\n c += v + (count(k) * v)\n return c\n\n for bag, c in forwardIndex.get(\"shiny gold\").items():\n output += c + (count(bag) * c)\n\n print(output)\n\n\npart1()\npart2()\n","sub_path":"2020/day07/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"245763864","text":"#!/usr/bin/env python\n\"\"\"\nThis is a simple script to download an appropriate updated copy of\nFabber to use in the pipeline, prior to the release of FSL 6.0.4\n\"\"\"\nimport os\nimport sys\nimport stat\n\nimport requests\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: get_updated_fabber <destination dir>\")\n sys.exit(1)\n\n destdir = sys.argv[1]\n if not destdir.rstrip(\"/\").endswith(\"bin\"):\n destdir = os.path.join(destdir, \"bin\")\n\n if sys.platform == \"linux\":\n print(\"Using Linux executable built under Ubuntu 18.04.\")\n print(\"This may work for other binary compatible Linux distros\")\n print(\"including recent Centos\")\n url = \"https://github.com/ibme-qubic/fabber_models_asl/releases/download/v2.0.3/fabber_asl_ubuntu18\"\n elif sys.platform == \"darwin\":\n print(\"Using Linux executable built for Mac OSX\")\n url = \"https://github.com/ibme-qubic/fabber_models_asl/releases/download/v2.0.3/fabber_asl_mac\"\n else:\n print(\"Unsupported platform: %s - cannot download Fabber\" % sys.platform)\n sys.exit(1)\n\n print(\"Downloading %s\" % url)\n os.makedirs(destdir, exist_ok=True)\n src = requests.get(url, allow_redirects=True)\n dest = os.path.join(destdir, \"fabber_asl\")\n with open(dest, \"wb\") as dest_file:\n dest_file.write(src.content)\n os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n print(\"Downloaded fabber to %s\" % dest)\n\n # Check the executable actually works\n retcode=os.system(\"%s --version\" % dest)\n if retcode != 0:\n print(\"ERROR: downloaded executable did not run correctly - check if your platform is compatible\")\n sys.exit(1)\n print(\"Executable ran successfully\")\n fabberdir = os.path.abspath(destdir.rstrip(\"/\").rstrip(\"bin\").rstrip(\"/\"))\n print(\"To use in the HCP-ASL pipeline add the option --fabberdir=%s\" % fabberdir)\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/get_updated_fabber.py","file_name":"get_updated_fabber.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"555995944","text":"__author__ = \"Markus Pichler\"\n__credits__ = [\"Markus Pichler\"]\n__maintainer__ = \"Markus Pichler\"\n__email__ = \"markus.pichler@tugraz.at\"\n__version__ = \"0.1\"\n__license__ = \"MIT\"\n\nimport warnings\nfrom math import floor\nfrom os import path, mkdir\nfrom webbrowser import open as show_file\nfrom scipy.optimize import newton\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom .arg_parser import heavy_rain_parser\nfrom .calculation_methods import get_u_w, get_parameters, calculate_u_w, depth_of_rainfall\nfrom .little_helpers import minutes_readable, height2rate, delta2min, rate2height\nfrom .definitions import *\nfrom .in_out import import_series, write_yaml, read_yaml\nfrom .sww_utils import (remove_timezone, guess_freq, rain_events, agg_events, event_duration,\n resample_rain_series, rain_bar_plot, IdfError, )\nfrom .plot_helpers import idf_bar_axes\nfrom .additional_scripts import measured_points\n\n\n########################################################################################################################\nclass IntensityDurationFrequencyAnalyse:\n \"\"\"\n heavy rain as a function of the duration and the return period acc. to DWA-A 531 (2012)\n\n This program reads the measurement data of the rainfall\n and calculates the distribution of the rainfall as a function of the return period and the duration\n \n for duration steps up to 12 hours (and more) and return period in a range of '0.5a <= T_n <= 100a'\n \"\"\"\n\n def __init__(self, series_kind=PARTIAL, worksheet=DWA, extended_durations=False):\n \"\"\"\n heavy rain as a function of the duration and the return period acc. to DWA-A 531 (2012)\n\n This program reads the measurement data of the rainfall\n and calculates the distribution of the rainfall as a function of the return period and the duration\n\n for duration steps up to 12 hours (and more) and return period in a range of '0.5a <= T_n <= 100a'\n\n Args:\n series_kind (str): ['partial', 'annual']\n worksheet (str): ['DWA-A_531', 'ATV-A_121', 'DWA-A_531_advektiv']\n extended_durations (bool): add [720, 1080, 1440, 2880, 4320, 5760, 7200, 8640] minutes to the calculation\n \"\"\"\n self.series_kind = series_kind\n self.worksheet = worksheet\n\n self._series = None # type: pd.Series # rain time-series\n self._freq = None # frequency of the rain series\n\n self._parameter = None # how to calculate the idf curves\n self._return_periods_frame = None # type: pd.DataFrame # with return periods of all given durations\n self._rain_events = None\n\n # sampling points of the duration steps in minutes\n self._duration_steps = [5, 10, 15, 20, 30, 45, 60]\n self._duration_steps += [i * 60 for i in [1.5, 3, 4.5, 6, 7.5, 10, 12]] # duration steps in hours\n if extended_durations:\n self._duration_steps += [i * 60 * 24 for i in\n [0.75, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]] # duration steps in days\n\n # __________________________________________________________________________________________________________________\n @property\n def series(self):\n if self._series is None:\n raise IdfError('No Series defined for IDF-Analysis!')\n return self._series\n\n @series.setter\n def series(self, series):\n self._series = series\n\n def set_series(self, series):\n \"\"\"\n set the series for the analysis\n\n Args:\n series (pandas.Series): precipitation time-series\n \"\"\"\n if not isinstance(series, pd.Series):\n raise IdfError('The series has to be a pandas Series.')\n\n if not isinstance(series.index, pd.DatetimeIndex):\n raise IdfError('The series has to have a DatetimeIndex.')\n\n if series.index.tz is not None:\n series = remove_timezone(series)\n\n series = series.replace(0, np.NaN).dropna()\n self._freq = guess_freq(series.index)\n freq_minutes = delta2min(self._freq)\n self.duration_steps = list(filter(lambda d: d >= freq_minutes, self.duration_steps))\n self.series = series\n\n # __________________________________________________________________________________________________________________\n @property\n def duration_steps(self):\n \"\"\"\n get duration steps (in minutes) for the parameter calculation and basic evaluations\n Returns:\n list | numpy.ndarray: duration steps in minutes\n \"\"\"\n if self._duration_steps is None:\n raise IdfError('No Series defined for IDF-Analysis!')\n return self._duration_steps\n\n @duration_steps.setter\n def duration_steps(self, durations):\n \"\"\"\n set duration steps (in minutes) for the parameter calculation and basic evaluations\n Args:\n durations (list | numpy.ndarray): duration steps in minutes\n \"\"\"\n if not isinstance(durations, (list, np.ndarray)):\n raise IdfError('Duration steps have to be {} got \"{}\"'.format((list, np.ndarray), type(durations)))\n self._duration_steps = durations\n\n # __________________________________________________________________________________________________________________\n @property\n def parameters(self):\n \"\"\"\n get the calculation parameters\n\n calculation method depending on the used worksheet and on the duration\n also the parameters for each method\n\n to save some time and save the parameters with\n :func:`IntensityDurationFrequencyAnalyse.write_parameters`\n and read them later with :func:`IntensityDurationFrequencyAnalyse.read_parameters`\n\n Returns:\n list[dict]: calculation parameters\n \"\"\"\n if self._parameter is None:\n interim_results = calculate_u_w(self.series, self.duration_steps, self.series_kind)\n self._parameter = get_parameters(interim_results, self.worksheet)\n return self._parameter\n\n def write_parameters(self, filename):\n \"\"\"\n save parameters as yaml-file to save computation time.\n\n Args:\n filename (str): filename for the parameters yaml-file\n \"\"\"\n write_yaml(self.parameters, filename)\n\n def read_parameters(self, filename):\n \"\"\"\n read parameters from a .yaml-file to save computation time.\n extract interim results from parameters\n\n Args:\n filename (str): filename of the parameters yaml-file\n \"\"\"\n self._parameter = read_yaml(filename)\n\n def auto_save_parameters(self, filename):\n \"\"\"auto-save the parameters as a yaml-file to save computation time.\"\"\"\n if path.isfile(filename):\n self.read_parameters(filename)\n else:\n self.write_parameters(filename)\n\n # __________________________________________________________________________________________________________________\n def get_u_w(self, duration):\n \"\"\"\n calculate the u and w parameters depending on the durations\n\n Args:\n duration (int | float | list | numpy.ndarray | pandas.Series): in minutes\n\n Returns:\n (numpy.ndarray, numpy.ndarray) | (float, float): u and w\n \"\"\"\n return get_u_w(duration, self.parameters)\n\n # __________________________________________________________________________________________________________________\n def depth_of_rainfall(self, duration, return_period):\n \"\"\"\n calculate the height of the rainfall h in L/m² = mm\n\n Args:\n duration (int | float | list | numpy.ndarray | pandas.Series): duration: in minutes\n return_period (float): in years\n\n Returns:\n int | float | list | numpy.ndarray | pandas.Series: height of the rainfall h in L/m² = mm\n \"\"\"\n u, w = self.get_u_w(duration)\n return depth_of_rainfall(u, w, return_period, series_kind=self.series_kind)\n\n # __________________________________________________________________________________________________________________\n def rain_flow_rate(self, duration, return_period):\n \"\"\"\n convert the height of rainfall to the specific rain flow rate in [l/(s*ha)]\n if 2 array-like parameters are give, a element-wise calculation will be made.\n So the length of the array must be the same.\n\n Args:\n duration (int | float | list | numpy.ndarray | pandas.Series): in minutes\n return_period (float): in years\n\n Returns:\n int | float | list | numpy.ndarray | pandas.Series: specific rain flow rate in [l/(s*ha)]\n \"\"\"\n return height2rate(height_of_rainfall=self.depth_of_rainfall(duration=duration, return_period=return_period),\n duration=duration)\n\n # __________________________________________________________________________________________________________________\n def r_720_1(self):\n \"\"\"\n rain flow rate in [l/(s*ha)] for a duration of 12h and a return period of 1 year\n\n Returns:\n float: rain flow rate in [l/(s*ha)]\n \"\"\"\n return self.rain_flow_rate(duration=720, return_period=1)\n\n # __________________________________________________________________________________________________________________\n def get_return_period(self, height_of_rainfall, duration):\n \"\"\"\n calculate the return period, when the height of rainfall and the duration are given\n\n Args:\n height_of_rainfall (float): in [mm]\n duration (int | float | list | numpy.ndarray | pandas.Series): in minutes\n\n Returns:\n int | float | list | numpy.ndarray | pandas.Series: return period in years\n \"\"\"\n u, w = self.get_u_w(duration)\n return np.exp((height_of_rainfall - u) / w)\n\n # __________________________________________________________________________________________________________________\n def get_duration(self, height_of_rainfall, return_period):\n \"\"\"\n calculate the duration, when the height of rainfall and the return period are given\n\n Args:\n height_of_rainfall (float): in [mm]\n return_period (float): in years\n\n Returns:\n float: duration in minutes\n \"\"\"\n return newton(lambda d: self.depth_of_rainfall(d, return_period) - height_of_rainfall, x0=1)\n\n # __________________________________________________________________________________________________________________\n def result_table(self, durations=None, return_periods=None, add_names=False):\n \"\"\"\n get a standard idf table of rainfall depth with return periods as columns and durations as rows\n\n Args:\n durations (list | numpy.ndarray): list of durations in minutes for the table\n return_periods (list): list of return periods in years for the table\n add_names (bool): weather to use expressive names as index-&column-label\n\n Returns:\n pandas.DataFrame: idf table\n \"\"\"\n if durations is None:\n durations = self.duration_steps\n\n if return_periods is None:\n return_periods = [1, 2, 3, 5, 10, 20, 25, 30, 50, 75, 100]\n\n result_table = dict()\n for t in return_periods:\n result_table[t] = self.depth_of_rainfall(durations, t)\n\n result_table = pd.DataFrame(result_table, index=durations)\n\n if add_names:\n result_table.index.name = 'duration (min)'\n result_table.columns = pd.MultiIndex.from_tuples([(rp, round(1 / rp, 3)) for rp in result_table.columns])\n result_table.columns.names = ['return period (a)', 'frequency (1/a)']\n return result_table\n\n ####################################################################################################################\n def result_figure(self, min_duration=5.0, max_duration=720.0, logx=False, return_periods=None, color=False):\n duration_steps = np.arange(min_duration, max_duration + 1, 1)\n plt.style.use('bmh')\n\n if return_periods is None:\n return_periods = [1, 2, 5, 10, 50, 100]\n\n table = self.result_table(durations=duration_steps, return_periods=return_periods)\n if color:\n table.columns.name = 'T$\\\\mathsf{_N}$ in (a)'\n ax = table.plot(color=(None if color else 'black'), logx=logx, legend=color)\n\n for _, return_time in enumerate(return_periods):\n p = measured_points(self, return_time, max_duration=max_duration)\n ax.plot(p, 'k' + 'x')\n\n if not color:\n x, y = list(p.tail(1).items())[0]\n ax.text(x + 10, y, '{} a'.format(return_time), verticalalignment='center', horizontalalignment='left',\n # bbox=dict(facecolor='white', alpha=1.0, lw=1)\n )\n\n ax.tick_params(axis='both', which='both', direction='out')\n ax.set_xlabel('Duration D in (min)')\n ax.set_ylabel('Rainfall h$\\\\mathsf{_N}$ in (mm)')\n ax.set_title('IDF curves')\n\n fig = ax.get_figure()\n\n cm_to_inch = 2.54\n fig.set_size_inches(h=21 / cm_to_inch, w=29.7 / cm_to_inch) # (11.69, 8.27)\n fig.tight_layout()\n return fig, ax\n\n ####################################################################################################################\n def get_return_periods_frame(self, series, durations=None):\n \"\"\"\n\n Args:\n series (pandas.Series):\n durations (list): list of durations in minutes which are of interest (default: pre defined durations)\n\n Returns:\n pandas.DataFrame: return periods depending of the duration per datetimeindex\n \"\"\"\n if durations is None:\n durations = self.duration_steps\n\n df = pd.DataFrame(index=series.index)\n\n freq = delta2min(guess_freq(series.index))\n for d in durations:\n if d % freq != 0:\n warnings.warn('Using durations (= {} minutes), '\n 'which are not a multiple of the base frequency (= {} minutes) of the series, '\n 'will lead to misinterpretations.'.format(d, freq))\n ts_sum = series.rolling(pd.Timedelta(minutes=d)).sum()\n df[d] = self.get_return_period(height_of_rainfall=ts_sum, duration=d)\n\n # printable_names (bool): if durations should be as readable in dataframe, else in minutes\n # df = df.rename(minutes_readable, axis=0)\n\n return df.round(1)\n\n @property\n def return_periods_frame(self):\n \"\"\"\n get the return periods over the whole time-series for the default duration steps.\n\n Returns:\n pandas.DataFrame: data-frame of return periods where the columns are the duration steps\n \"\"\"\n if self._return_periods_frame is None:\n self._return_periods_frame = self.get_return_periods_frame(self.series)\n return self._return_periods_frame\n\n def write_return_periods_frame(self, filename, **kwargs):\n \"\"\"save the return-periods dataframe as a parquet-file to save computation time.\"\"\"\n df = self.return_periods_frame.copy()\n df.columns = df.columns.to_series().astype(str)\n df.to_parquet(filename, **kwargs)\n\n def read_return_periods_frame(self, filename, **kwargs):\n \"\"\"read the return-periods dataframe as a parquet-file to save computation time.\"\"\"\n df = pd.read_parquet(filename, **kwargs)\n df.columns = df.columns.to_series().astype(int)\n self._return_periods_frame = df\n\n def auto_save_return_periods_frame(self, filename):\n \"\"\"auto-save the return-periods dataframe as a parquet-file to save computation time.\"\"\"\n if path.isfile(filename):\n self.read_return_periods_frame(filename)\n else:\n self.write_return_periods_frame(filename)\n\n ####################################################################################################################\n @classmethod\n def command_line_tool(cls):\n user = heavy_rain_parser()\n\n # --------------------------------------------------\n # use the same directory as the input file and make as subdir with the name of the input_file + \"_idf_data\"\n out = '{label}_idf_data'.format(label='.'.join(user.input.split('.')[:-1]))\n\n if not path.isdir(out):\n mkdir(out)\n action = 'Creating'\n else:\n action = 'Using'\n\n print('{} the subfolder \"{}\" for the interim- and final-results.'.format(action, out))\n\n fn_pattern = path.join(out, 'idf_{}')\n\n # --------------------------------------------------\n idf = cls(series_kind=user.series_kind, worksheet=user.worksheet, extended_durations=True)\n\n # --------------------------------------------------\n parameters_fn = fn_pattern.format('parameters.yaml')\n\n if path.isfile(parameters_fn):\n print('Found existing interim-results in \"{}\" and using them for calculations.'.format(parameters_fn))\n else:\n print('Start reading the time-series {} for the analysis.'.format(user.input))\n ts = import_series(user.input).replace(0, np.NaN).dropna()\n # --------------------------------------------------\n idf.set_series(ts)\n print('Finished reading.')\n\n # --------------------------------------------------\n idf.auto_save_parameters(parameters_fn)\n\n # --------------------------------------------------\n h = user.height_of_rainfall\n r = user.flow_rate_of_rainfall\n d = user.duration\n t = user.return_period\n\n if r is not None:\n if h is None and d is not None:\n h = rate2height(rain_flow_rate=r, duration=d)\n\n elif d is None and h is not None:\n d = h/r * 1000/6\n\n if user.r_720_1:\n d = 720\n t = 1\n\n if any((h, d, t)):\n if all((d, t)):\n pass\n\n elif all((d, h)):\n t = idf.get_return_period(h, d)\n print('The return period is {:0.1f} years.'.format(t))\n\n elif all((h, t)):\n d = idf.get_duration(h, t)\n print('The duration is {:0.1f} minutes.'.format(d))\n\n print('Resultierende Regenhöhe h_N(T_n={t:0.1f}a, D={d:0.1f}min) = {h:0.2f} mm'\n ''.format(t=t, d=d, h=idf.depth_of_rainfall(d, t)))\n print('Resultierende Regenspende r_N(T_n={t:0.1f}a, D={d:0.1f}min) = {r:0.2f} L/(s*ha)'\n ''.format(t=t, d=d, r=idf.rain_flow_rate(d, t)))\n\n # --------------------------------------------------\n if user.plot:\n fig, ax = idf.result_figure()\n plot_fn = fn_pattern.format('_curves_plot.png')\n fig.savefig(plot_fn, dpi=260)\n plt.close(fig)\n show_file(plot_fn)\n print('Created the IDF-curves-plot and saved the file as \"{}\".'.format(plot_fn))\n\n # --------------------------------------------------\n if user.export_table:\n table = idf.result_table(add_names=True)\n print(table.round(2).to_string())\n table_fn = fn_pattern.format('table.csv')\n table.to_csv(table_fn, sep=';', decimal=',', float_format='%0.2f')\n print('Created the IDF-curves-plot and saved the file as \"{}\".'.format(table_fn))\n\n ####################################################################################################################\n @property\n def rain_events(self):\n \"\"\"\n get the all the rain events of the time-series\n\n Returns:\n pandas.DataFrame: data-frame of events with start-, end-time and duration\n \"\"\"\n if self._rain_events is None:\n events = rain_events(self.series)\n events[COL.DUR] = event_duration(events)\n events[COL.LP] = agg_events(events, self.series, 'sum').round(1)\n # events = events.sort_values(by=COL.LP, ascending=False)\n self._rain_events = events\n\n return self._rain_events\n\n def write_rain_events(self, filename, sep=';', decimal='.'):\n \"\"\"save the rain-events dataframe as a csv-file for external use or to save computation time.\"\"\"\n self.rain_events.to_csv(filename, index=False, sep=sep, decimal=decimal)\n\n def read_rain_events(self, filename, sep=';', decimal='.'):\n \"\"\"read the rain-events dataframe as a csv-file to save computation time.\"\"\"\n events = pd.read_csv(filename, skipinitialspace=True, sep=sep, decimal=decimal)\n events[COL.START] = pd.to_datetime(events[COL.START])\n events[COL.END] = pd.to_datetime(events[COL.END])\n events[COL.DUR] = pd.to_timedelta(events[COL.DUR])\n self._rain_events = events\n\n def auto_save_rain_events(self, filename, sep=';', decimal='.'):\n \"\"\"auto-save the rain-events dataframe as a csv-file to save computation time.\"\"\"\n if path.isfile(filename):\n self.read_rain_events(filename, sep=sep, decimal=decimal)\n else:\n self.write_rain_events(filename, sep=sep, decimal=decimal)\n\n ####################################################################################################################\n def event_report(self, filename, min_event_rain_sum=25, min_return_period=0.5, durations=None):\n \"\"\"\n create pdf file with the biggest rain events\n for each event is represented by a plot of the rain series\n and a IDF analysis where the return periods are calculated\n\n Args:\n filename (str): path (directory + filename) for the created pdf-report\n min_event_rain_sum (float): only events with a bigger rain sum will be created\n min_return_period (float): only events with a bigger return period will be analysed\n (the plot will be created anyway)\n out_path (str): path and filename of the final report\n durations (list[int]): analysed durations\n (default: [5, 10, 15, 20, 30, 45, 60, 90, 120, 180, 240, 360, 540, 720, 1080, 1440, 2880, 4320])\n \"\"\"\n if durations is None:\n durations = [5, 10, 15, 20, 30, 45, 60, 90, 120, 180, 240, 360, 540, 720, 1080, 1440, 2880, 4320]\n\n events = self.rain_events\n events[COL.LP] = agg_events(events, self.series, 'sum')\n\n main_events = events[events[COL.LP] > min_event_rain_sum].to_dict(orient='index')\n main_events = main_events.sort_values(by=COL.LP, ascending=False)\n\n unit = 'mm'\n column_name = 'Precipitation'\n\n pdf = PdfPages(filename)\n\n for _, event in tqdm(main_events.items()):\n fig, caption = self.event_plot(event, durations=durations, min_return_period=min_return_period,\n unit=unit, column_name=column_name)\n\n # -------------------------------------\n fig.get_axes()[0].set_title(caption + '\\n\\n\\n')\n\n # DIN A4\n fig.set_size_inches(w=8.27, h=11.69)\n fig.tight_layout()\n pdf.savefig(fig)\n plt.close(fig)\n\n pdf.close()\n\n def event_plot(self, event, durations=None, unit='mm', column_name='Precipitation', min_return_period=0.5):\n start = event[COL.START]\n end = event[COL.END]\n\n if durations is None:\n durations = [5, 10, 15, 20, 30, 45, 60, 90, 120, 180, 240, 360, 540, 720, 1080, 1440, 2880, 4320]\n\n caption = 'rain event\\nbetween {} and {}\\nwith a total sum of {:0.1f} {}\\nand a duration of {}'.format(\n start.strftime('%Y-%m-%d %H:%M'),\n end.strftime('%Y-%m-%d %H:%M'),\n event[COL.LP],\n unit,\n end - start)\n\n freq = guess_freq(self.series.index)\n pstart = start - pd.Timedelta(freq)\n pend = end + pd.Timedelta(freq)\n ts = self.series[pstart:pend].resample(freq).sum().fillna(0).copy()\n\n fig = plt.figure()\n\n # -------------------------------------\n idf_table = self.return_periods_frame[pstart:pend]\n idf_table = idf_table.rename(minutes_readable, axis=0)\n\n # print(idf_table > min_return_period)\n\n max_period, duration = idf_table.max().max(), idf_table.max().idxmax()\n caption += '\\nThe maximum return period was {:0.2f}a\\nat a duration of {}.'.format(max_period, duration)\n\n if not (idf_table > min_return_period).any().any():\n # max_period, duration = idf_table.max().max(), idf_table.max().idxmax()\n # caption += '\\nThe maximum return period was {:0.2f}a\\nat a duration of {}.'.format(max_period, duration)\n rain_ax = fig.add_subplot(111)\n\n else:\n idf_bar_ax = fig.add_subplot(211)\n idf_bar_ax = idf_bar_axes(idf_bar_ax, idf_table, durations)\n rain_ax = fig.add_subplot(212, sharex=idf_bar_ax)\n\n # -------------------------------------\n ts_sum, minutes = resample_rain_series(ts)\n rain_ax = rain_bar_plot(ts_sum, rain_ax)\n rain_ax.set_ylabel('{} in [{}/{}min]'.format(column_name, unit, minutes if minutes != 1 else ''))\n rain_ax.set_xlim(ts.index[0], ts.index[-1])\n\n return fig, caption\n","sub_path":"idf_analysis/idf_class.py","file_name":"idf_class.py","file_ext":"py","file_size_in_byte":25568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"369334705","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom matplotlib import pyplot as plt\n\n\nn_data=torch.ones(100,2)\nx0=torch.normal(2*n_data,1)# normal(means,std)\ny0=torch.zeros(100)\nx1=torch.normal(-2*n_data,1)\ny1=torch.ones(100)\nx=torch.cat((x0,x1),0).type(torch.FloatTensor)\ny=torch.cat((y0,y1),).type(torch.LongTensor)\n\n\nx.y=Variable(x),Variable(y)\n# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap=)\n# plt.show()\n\nclass Net(torch.nn.Module):\n def __init__(self,n_features,n_hidden,n_output=1):\n super(Net,self).__init__()\n self.hidden=torch.nn.Linear(n_features,n_hidden)\n self.predict=torch.nn.Linear(n_hidden,n_output)\n\n def forward(self,x):\n x=F.relu(self.hidden(x))\n x=self.predict(x)\n return x\n\nnet=Net(2,10,2)# input is 2d, output is 2d\nplt.ion() # print on time\nplt.show()\noptimizer=torch.optim.SGD(net.parameters(),lr=0.02)\nloss_func=torch.nn.CrossEntropyLoss()\nfor t in range(100):\n out=net(x)\n loss=loss_func(out,y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if t%2==0:\n plt.cla()\n prediction=torch.max(F.softmax(out),1)[1] # index=0 is value ,index =1 is the position\n pred_y=prediction.data.numpy().squeeze()\n traget_y=y.data.numpy()\n plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0)\n accuracy=sum(pred_y==traget_y)/200.0\n plt.text(1.5,-4,'Accuracy=%0.2f'%accuracy,fontdict={'size':20,'color':'red'})\n plt.pause(0.1)\n\nplt.ioff()\nplt.show()","sub_path":"Classification.py","file_name":"Classification.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"132516961","text":"class Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n ans = 0\n sign = 1 if (dividend > 0 and divisor > 0) or (dividend < 0 and divisor < 0) else -1\n dividend, divisor = abs(dividend), abs(divisor)\n times = [(divisor, 1), ]\n mask = 2\n for i in range(1, 31):\n mask += mask\n times.append((times[i - 1][0] + times[i - 1][0], times[i - 1][1] + times[i - 1][1]))\n divisor, pos = 0, 30\n while True:\n if divisor + times[pos][0] <= dividend:\n ans += times[pos][1]\n divisor += times[pos][0]\n else:\n if pos == 0:\n ans *= sign\n if ans > mask or ans <= -mask:\n return mask-1\n else:\n return ans\n pos -= 1\n\n\n\nprint(Solution().divide(10, 3))\n","sub_path":"29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"266499508","text":"import tensorflow as tf\n# +\nimport tensorflow as tf\nfrom docutils.nodes import target\n\nfrom keras_transformer.extras import ReusableEmbedding, TiedOutputEmbedding\nfrom keras_transformer.position import TransformerCoordinateEmbedding\n\nfrom bert_concept_embeddings.custom_layers import EncoderLayer, TimeSelfAttention, TimeAttention, Encoder\n\n\ndef time_attention_cbow_negative_sampling_model(max_seq_length: int,\n vocabulary_size: int,\n concept_embedding_size: int,\n time_window_size: int):\n \"\"\"\n\n :param max_seq_length:\n :param vocabulary_size:\n :param concept_embedding_size:\n :param time_window_size:\n :return:\n \"\"\"\n target_concepts = tf.keras.layers.Input(shape=(1,), dtype='int32', name='target_concepts')\n\n target_time_stamps = tf.keras.layers.Input(shape=(1,), dtype='int32', name='target_time_stamps')\n\n context_concepts = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='context_concepts')\n\n context_time_stamps = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='context_time_stamps')\n\n mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='mask')\n\n embedding_layer = tf.keras.layers.Embedding(vocabulary_size, concept_embedding_size, name='embedding_layer',\n mask_zero=True)\n\n time_attention_layer = TimeAttention(vocab_size=vocabulary_size,\n target_seq_len=1,\n context_seq_len=max_seq_length,\n time_window_size=time_window_size)\n\n dot_layer = tf.keras.layers.Dot(axes=2)\n\n sigmoid_layer = tf.keras.layers.Dense(1, activation='sigmoid')\n\n # shape = (batch_size, 1, embedding_size)\n target_concept_embeddings = embedding_layer(target_concepts)\n\n # shape = (batch_size, seq_len, embedding_size)\n context_concept_embeddings = embedding_layer(context_concepts)\n\n # shape = (batch_size, 1, seq_len)\n time_attentions = time_attention_layer([target_concepts,\n target_time_stamps,\n context_time_stamps,\n mask])\n\n # shape = (batch_size, 1, embedding_size)\n combined_embeddings = tf.matmul(time_attentions, context_concept_embeddings)\n\n # shape = (batch_size, 1, 1)\n concept_predictions = sigmoid_layer(dot_layer([target_concept_embeddings, combined_embeddings]))\n\n model = tf.keras.Model(\n inputs=[target_concepts, target_time_stamps, context_concepts, context_time_stamps, mask],\n outputs=[concept_predictions])\n\n return model\n\n\n# -\n\ndef time_attention_cbow_model(max_seq_length: int,\n vocabulary_size: int,\n concept_embedding_size: int,\n time_window_size: int):\n \"\"\"\n\n :param max_seq_length:\n :param vocabulary_size:\n :param concept_embedding_size:\n :param time_window_size:\n :return:\n \"\"\"\n target_concepts = tf.keras.layers.Input(shape=(1,), dtype='int32', name='target_concepts')\n\n target_time_stamps = tf.keras.layers.Input(shape=(1,), dtype='int32', name='target_time_stamps')\n\n context_concepts = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='context_concepts')\n\n context_time_stamps = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='context_time_stamps')\n\n mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='mask')\n\n embedding_layer = tf.keras.layers.Embedding(vocabulary_size, concept_embedding_size, name='embedding_layer',\n mask_zero=True)\n\n time_embedding_layer = TimeAttention(vocab_size=vocabulary_size,\n target_seq_len=1,\n context_seq_len=max_seq_length,\n time_window_size=time_window_size)\n\n dense_layer = tf.keras.layers.Dense(vocabulary_size)\n\n softmax_layer = tf.keras.layers.Softmax()\n\n # shape = (batch_size, seq_len, embedding_size)\n concept_embeddings = embedding_layer(context_concepts)\n\n if mask is not None:\n concept_embeddings = concept_embeddings * tf.cast(tf.expand_dims(mask == 0, axis=-1), dtype=tf.float32)\n\n # shape = (batch_size, 1, seq_len)\n time_embeddings = time_embedding_layer([target_concepts,\n target_time_stamps,\n context_time_stamps,\n mask])\n\n # shape = (batch_size, 1, embedding_size)\n combined_embeddings = tf.matmul(time_embeddings, concept_embeddings)\n\n # shape = (batch_size, 1, vocab_size)\n concept_predictions = softmax_layer(dense_layer(combined_embeddings))\n\n model = tf.keras.Model(\n inputs=[target_concepts, target_time_stamps, context_concepts, context_time_stamps, mask],\n outputs=[concept_predictions])\n\n return model\n\n\n# -\ndef transformer_bert_model(\n max_seq_length: int,\n time_window_size: int,\n vocabulary_size: int,\n concept_embedding_size: int,\n depth: int,\n num_heads: int,\n transformer_dropout: float = 0.1,\n embedding_dropout: float = 0.6,\n l2_reg_penalty: float = 1e-4,\n time_attention_trainable=True):\n \"\"\"\n Builds a BERT-based model (Bidirectional Encoder Representations\n from Transformers) following paper \"BERT: Pre-training of Deep\n Bidirectional Transformers for Language Understanding\"\n (https://arxiv.org/abs/1810.04805)\n\n Depending on the value passed with `use_universal_transformer` argument,\n this function applies either an Adaptive Universal Transformer (2018)\n or a vanilla Transformer (2017) to do the job (the original paper uses\n vanilla Transformer).\n \"\"\"\n masked_concept_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='masked_concept_ids')\n\n concept_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='concept_ids')\n\n time_stamps = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='time_stamps')\n\n mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='mask')\n\n concept_mask = tf.expand_dims(tf.expand_dims(mask, axis=1), axis=1)\n\n l2_regularizer = (tf.keras.regularizers.l2(l2_reg_penalty) if l2_reg_penalty else None)\n\n embedding_layer = ReusableEmbedding(\n vocabulary_size, concept_embedding_size,\n input_length=max_seq_length,\n name='bpe_embeddings',\n # Regularization is based on paper \"A Comparative Study on\n # Regularization Strategies for Embedding-based Neural Networks\"\n # https://arxiv.org/pdf/1508.03721.pdf\n embeddings_regularizer=l2_regularizer)\n\n time_attention_layer = TimeSelfAttention(vocab_size=vocabulary_size,\n target_seq_len=max_seq_length,\n context_seq_len=max_seq_length,\n time_window_size=time_window_size,\n return_logits=True,\n self_attention_return_logits=True,\n trainable=time_attention_trainable)\n\n encoder = Encoder(name='encoder',\n num_layers=depth,\n d_model=concept_embedding_size,\n num_heads=num_heads,\n dropout_rate=transformer_dropout)\n\n output_layer = TiedOutputEmbedding(\n projection_regularizer=l2_regularizer,\n projection_dropout=embedding_dropout,\n name='concept_prediction_logits')\n\n softmax_layer = tf.keras.layers.Softmax(name='concept_predictions')\n\n coordinate_embedding_layer = TransformerCoordinateEmbedding(1, name='coordinate_embedding')\n\n next_step_input, embedding_matrix = embedding_layer(masked_concept_ids)\n\n # Building a Vanilla Transformer (described in\n # \"Attention is all you need\", 2017)\n next_step_input = coordinate_embedding_layer(next_step_input, step=0)\n # shape = (batch_size, seq_len, seq_len)\n time_attention = time_attention_layer([concept_ids, time_stamps, mask])\n # pad a dimension to accommodate the head split\n time_attention = tf.expand_dims(time_attention, axis=1)\n\n next_step_input, _ = encoder(next_step_input, concept_mask, time_attention)\n\n concept_predictions = softmax_layer(\n output_layer([next_step_input, embedding_matrix]))\n\n model = tf.keras.Model(\n inputs=[masked_concept_ids, concept_ids, time_stamps, mask],\n outputs=[concept_predictions])\n\n return model\n","sub_path":"bert_concept_embeddings/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"434022851","text":"import html\n\nimport nltk\nimport pandas as pd\nimport preprocessor as tweet_preprocessor\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.corpus import twitter_samples\nfrom nltk.sentiment import SentimentAnalyzer\nfrom nltk.sentiment.util import extract_unigram_feats\n\ntweet_preprocessor.set_options(tweet_preprocessor.OPT.URL,\n tweet_preprocessor.OPT.MENTION,\n tweet_preprocessor.OPT.HASHTAG,\n tweet_preprocessor.OPT.RESERVED,\n tweet_preprocessor.OPT.NUMBER)\n\npositive_tweets = twitter_samples.strings(\"positive_tweets.json\")\nnegative_tweets = twitter_samples.strings(\"negative_tweets.json\")\n\ncleaned_postive_tweets = [tweet_preprocessor.clean(html.unescape(x)) for x in positive_tweets]\ncleaned_negative_tweets = [tweet_preprocessor.clean(html.unescape(x)) for x in negative_tweets]\n\nlabeled_positive_tweets = [(nltk.word_tokenize(x), 1) for x in cleaned_postive_tweets]\nlabeled_negative_tweets = [(nltk.word_tokenize(x), 0) for x in cleaned_negative_tweets]\n\ntraining_samples = labeled_positive_tweets[0:4000] + labeled_negative_tweets[0:4000]\n\nsentim_analyzer = SentimentAnalyzer()\nall_words = []\nfor tweet, sentiment_score in training_samples:\n for word in tweet:\n all_words.append(word)\n\nfeature_words = sentim_analyzer.unigram_word_feats(all_words, min_freq=10)\nsentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=feature_words)\n\ntraining_set = sentim_analyzer.apply_features(training_samples)\n\ntrainer = NaiveBayesClassifier.train\nclassifier = sentim_analyzer.train(trainer, training_set)\n\n### Case Study\n\nresults_dict = dict(\n {\n \"Airline\": list(),\n \"Positive Sentiment Count\": list(),\n \"Negative Sentiment Count\": list()\n }\n)\n\nairline_friendly_name_map = {\n \"americanair\": \"American Airlines\",\n \"united\": \"United Airlines\",\n \"southwestair\": \"Southwest Airlines\",\n \"delta\": \"Delta\"\n}\n\nfor airline_name in [\"americanair\", \"united\", \"southwestair\", \"delta\"]:\n with open(\"case_study_dataset_{}.csv\".format(airline_name), \"r\") as file_handle:\n next(file_handle) # Skip the header\n dataset = list()\n for line in file_handle.readlines():\n dataset.append(nltk.word_tokenize(tweet_preprocessor.clean(html.unescape(line))))\n\n positive_sentiment_count = 0\n negative_sentiment_count = 0\n\n for tweet in dataset:\n sentiment_score = sentim_analyzer.classify(tweet)\n if sentiment_score == 0:\n negative_sentiment_count = negative_sentiment_count + 1\n else:\n positive_sentiment_count = positive_sentiment_count + 1\n\n results_dict[\"Airline\"].append(airline_friendly_name_map[airline_name])\n results_dict[\"Positive Sentiment Count\"].append(positive_sentiment_count)\n results_dict[\"Negative Sentiment Count\"].append(negative_sentiment_count)\n\npd.DataFrame(results_dict).to_csv(\"case_study_naive_bayes_classifier_with_emojis.csv\")\n","sub_path":"nltk_sentiment_analysis/case_study/5_case_study_naive_bayes_classifier_with_emojis.py","file_name":"5_case_study_naive_bayes_classifier_with_emojis.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"360109519","text":"class PartyAnimal:\n\n def __init__(self, name):\n self.name = name\n self.x = 0\n print(self.name, \"constructed\")\n\n def party(self):\n self.x += 1\n print(self.name, \"party count\", self.x)\n\n def __del__(self):\n print(\"I'm destructed\", self.x)\n\n\nan1 = PartyAnimal('an1')\nan2 = PartyAnimal('an2')\n\nan1.party()\nan1.party()\nan1.party()\nan2.party()\n","sub_path":"week_1/tmp_class.py","file_name":"tmp_class.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"302832119","text":"import sys\nclass Solution:\n # @param {int[][]} matrix an integer array of n * m matrix\n # @param {int} k an integer\n # @return {int} the maximum number\n def maxSlidingWindow2(self, matrix, k):\n # Write your code here\n rows = len(matrix)\n cols = len(matrix[0])\n \n sums = [[0 for j in range(cols+1)] for i in range(rows+1)]\n\n for i in range(1, rows+1):\n for j in range(1, cols+1):\n sums[i][j] = matrix[i-1][j-1] + sums[i][j-1] + sums[i-1][j] - sums[i-1][j-1]\n \n maximum = -sys.maxsize - 1\n for i in range(k, rows+1):\n for j in range(k, cols+1):\n s = sums[i][j] - sums[i][j-k] - sums[i-k][j] + sums[i-k][j-k]\n \n if s < maximum:\n maximum = s\n return s","sub_path":"code/558-Sliding_Window_Matrix_Maximum.py","file_name":"558-Sliding_Window_Matrix_Maximum.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365486401","text":"from tkinter import *\nfrom tkinter.filedialog import *\nimport os\nfrom PIL import Image\nimport urllib.request\nimport re\nfrom threading import Thread\n\n\nclass ImageResizer:\n def __init__(self,root):\n self.root = root\n \n self.root.title('URL ImageResizer')\n self.root.grid_rowconfigure(0,minsize=30)\n self.lab = Label(root,text='Input image url:').grid(row=0,columnspan=2,padx=10)\n self.image_url = Entry(root,width=80)\n self.image_url.grid(row=0,column=2,columnspan=3,padx=10)\n\n self.root.grid_rowconfigure(1,minsize=30)\n self.lab = Label(root,text='OR').grid(row=1,column=2,columnspan=2,padx=10)\n\n self.root.grid_rowconfigure(2,minsize=30)\n self.button_resize = Button(root, text='Select .txt file with IMAGES URLS list', font='Ariel 10 bold', command=self.open_urls_list_file)\n self.button_resize.grid(row=2,column=1,columnspan=4,sticky=W+E,padx=(10,10))\n\n self.root.grid_rowconfigure(3,minsize=40)\n self.lab = Label(root,text='Set image WIDTH (px):').grid(row=3,column=1,padx=(10,0),sticky=E+W)\n self.image_width = Entry(root,width=10)\n self.image_width.grid(row=3,column=2,sticky=W+E,padx=(5,10))\n\n self.lab = Label(root,text='Set image HEIGHT (px):').grid(row=3,column=3,sticky=E)\n self.image_height = Entry(root,width=10)\n self.image_height.grid(row=3,column=4,sticky=W+E,padx=(5,10))\n \n \n self.root.grid_rowconfigure(4,minsize=40)\n self.scrollbar = Scrollbar(root)\n self.scrollbar.grid(row=4, column=5, sticky=N+S+E, padx=(0,10))\n self.textfield = Text(root) \n self.textfield.grid(row=4,column=1,columnspan=4, padx=(10,0))\n self.textfield.config(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.textfield.yview)\n \n self.root.grid_rowconfigure(5,minsize=40)\n self.button_resize = Button(root, text='RESIZE', font='Ariel 11 bold', command=self.run_resize)\n self.button_resize.grid(row=5,column=2,columnspan=2,sticky=W+E,padx=(10,10))\n\n self.root.grid_rowconfigure(6,minsize=40)\n self.button_resize = Button(root, text='ABOUT', font='Ariel 11 bold', command=self.about)\n self.button_resize.grid(row=6,column=1,sticky=W+E,padx=(10,10))\n\n self.root.grid_rowconfigure(6,minsize=40)\n self.button_exit = Button(root, text='EXIT', fg='red', font='Ariel 11 bold', command=self.exit)\n self.button_exit.grid(row=6,column=4,sticky=W+E,padx=(10,10))\n\n if not os.path.exists('Original_Images'):\n os.mkdir('Original_Images')\n \n if not os.path.exists('Resized_Images'):\n os.mkdir('Resized_Images')\n\n self.resized_images_dir_list = os.listdir(os.path.join(os.getcwd(),'Resized_Images'))\n\n # Part of code for natural sorting. Credits goes to Jeff Atwood and Mark Byers on Stackoverflow\n # https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n self.resized_images_dir_list_sorted = sorted(self.resized_images_dir_list, key = alphanum_key)\n \n self.textfield.insert(END,'List of files in Resized_Images directory:\\n\\n')\n \n for self.resized_image in self.resized_images_dir_list_sorted:\n self.textfield.insert(END,self.resized_image+'\\n')\n \n self.root.mainloop()\n\n def run_resize(self):\n\n t1 = Thread(target=self.textfield.insert(END,'\\nRESIZING IN PROGRESS...\\nIMAGES ASPECT RATIO WILL BE MAINTAIN...\\nALTERATIONS OF ENTERED WIDTH OR HEIGHT VALUES ARE POSSIBLE...\\n\\n'))\n t2 = Thread(target=self.resize_engine)\n t1.start()\n t2.start()\n \n \n def resize_engine(self):\n \n try: \n if not hasattr(self,'urls_list_split'): # if variable 'urls_list' not exists\n self.image_url_resize = [self.image_url.get()] # make an array of one item from main window\n else:\n self.image_url_resize = self.urls_list_split # already return as array\n\n \n self.image_W = int(self.image_width.get())\n self.image_H = int(self.image_height.get())\n \n for self.image_in_url in self.image_url_resize:\n \n try:\n \n self.image_number = os.listdir(os.path.join(os.getcwd(),'Resized_Images'))\n self.i = len(self.image_number)\n\n urllib.request.urlretrieve(self.image_in_url, os.path.join(os.getcwd(),'Original_Images', str(self.i)+'_Image.jpg'))\n self.img = Image.open(os.path.join(os.getcwd(),'Original_Images', str(self.i)+'_Image.jpg'))\n self.img.thumbnail((self.image_W,self.image_H))\n self.image_W_resized, self.image_H_resized = self.img.size\n self.img.save(os.path.join(os.getcwd(),'Resized_Images', str(self.i)+'_ResizedImage_'+str(self.image_W_resized)+'x'+str(self.image_H_resized)+'.jpg'))\n\n self.textfield.insert(END,os.path.join('Resized_Images',str(self.i)+'_ResizedImage_'+str(self.image_W_resized)+'x'+str(self.image_H_resized)+'.jpg','\\n'))\n \n\n except Exception as e:\n self.textfield.insert(END,'\\nLink '+ self.image_in_url + ' is corrupted..\\n' + str(e) +'\\n\\n')\n \n\n self.textfield.insert(END,'IMAGE RESIZING FINISHED\\n')\n self.textfield.see(END)\n\n del self.urls_list_split\n\n except AttributeError: # if self.urls_list_split has already been deleted\n pass\n\n except ValueError:\n self.textfield.insert(END,'NO URL || HEIGHT AND WIDTH ARE NOT ENTERED OR VALUES ARE NOT AN INTEGER\\n')\n\n\n def open_urls_list_file(self):\n \n self.open_url_address_file = askopenfilename(title = \"Select file with image url addresses\",filetypes = [(\"url txt file\",\"*.txt\")])\n\n with open(self.open_url_address_file,'r') as file:\n self.urls_list = file.read()\n\n if ('http://' in self.urls_list) or ('https://' in self.urls_list):\n self.textfield.delete('1.0',END)\n self.urls_list_split = self.urls_list.split()\n self.textfield.insert(END, self.open_url_address_file+ ' file with urls list loaded... \\nSet image dimensions and click RESIZE.')\n return self.urls_list_split\n\n else:\n self.textfield.delete('1.0',END)\n self.textfield.insert(END,\"CAN'T FIND PROPER URL ADRESS IN SELECTED FILE. CHOOSE ANOTHER FILE\\n\")\n \n def about(self):\n self.about = Toplevel(self.root)\n self.about.title('About')\n self.lab_name = Label(self.about,text='Author: Aleksandar Kurjakov', font='Ariel 12 bold').grid(row=1,column=1,padx=20,pady=15)\n self.lab_contact = Label(self.about,text='Contact: kurjak021@gmail.com', font='Ariel 12 bold').grid(row=2,column=1,padx=20,pady=15)\n self.lab_version = Label(self.about,text='URL ImageResizer V1.0', font='Ariel 10 bold').grid(row=3,column=1,padx=20,pady=15)\n self.about.focus_set() \n self.about.grab_set()\n\n def exit(self):\n self.root.destroy()\n \nroot = Tk()\nApp = ImageResizer(root)\n","sub_path":"ImageResizer_from_url_list.py","file_name":"ImageResizer_from_url_list.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"351933196","text":"from flask import Flask, render_template, request, redirect, jsonify\nfrom json import dump\nfrom Gameboard import Gameboard\nimport db\n\n\napp = Flask(__name__)\n\nimport logging\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\ngame = None\n\n'''\nImplement '/' endpoint\nMethod Type: GET\nreturn: template player1_connect.html and status = \"Pick a Color.\"\nInitial Webpage where gameboard is initialized\n'''\n\n\n@app.route('/', methods=['GET'])\ndef player1_connect():\n global game\n game = Gameboard()\n return render_template(\"player1_connect.html\", status = 'Pick a Color.')\n\n\n'''\nHelper function that sends to all boards don't modify\n'''\n\n\n@app.route('/autoUpdate', methods=['GET'])\ndef updateAllBoards():\n try:\n return jsonify(move=game.board, winner=game.game_result,\n color=game.player1)\n except Exception:\n return jsonify(move=\"\")\n\n\n'''\nImplement '/p1Color' endpoint\nMethod Type: GET\nreturn: template player1_connect.html and status = <Color picked>\nAssign player1 their color\n'''\n\n\n@app.route('/p1Color', methods=['GET'])\ndef player1_config():\n if(request.args.get('color') == 'red'):\n game.player1 = \"red\"\n elif(request.args.get('color') == 'yellow'):\n game.player1 = \"yellow\"\n\n return render_template(\"player1_connect.html\", status = game.player1)\n\n\n\n'''\nImplement '/p2Join' endpoint\nMethod Type: GET\nreturn: template p2Join.html and status = <Color picked> or Error\nif P1 didn't pick color first\n\nAssign player2 their color \n'''\n\n\n@app.route('/p2Join', methods=['GET'])\ndef p2Join():\n if(game.player1 == \"red\"):\n game.player2 = \"yellow\"\n elif(game.player1 == \"yellow\"):\n game.player2 = \"red\"\n else:\n return \"Error\"\n\n return render_template(\"p2Join.html\", status = game.player2)\n\n'''\nImplement '/move1' endpoint\nMethod Type: POST\nreturn: jsonify (move=<CurrentBoard>,\ninvalid=True or False, winner = <currWinner>)\nIf move is valid --> invalid = False else invalid = True\nIf invalid == True, also return reason= <Why Move is Invalid>\n\nProcess Player 1's move\n'''\n\n\n@app.route('/move1', methods=['POST'])\ndef p1_move():\n col = int(request.json['column'][3]) - 1\n ret = game.validate_move('p1', col)\n\n if (ret == \"Valid\"):\n game.add_chip(game.player1, col)\n return jsonify(move=game.board, invalid=False, winner=game.game_result)\n else:\n return jsonify(move=game.board, invalid=True, reason=ret, winner=game.game_result)\n\n'''\nSame as '/move1' but instead proccess Player 2\n'''\n\n\n@app.route('/move2', methods=['POST'])\ndef p2_move():\n col = int(request.json['column'][3]) - 1\n ret = game.validate_move('p2', col)\n\n if (ret == \"Valid\"):\n game.add_chip(game.player2, col)\n return jsonify(move=game.board, invalid=False, winner=game.game_result)\n else:\n return jsonify(move=game.board, invalid=True, reason=ret, winner=game.game_result)\n \n\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='127.0.0.1')\n","sub_path":"Skeleton/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"403311116","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport annoying.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nickname', models.CharField(unique=True, max_length=64)),\n ('email', models.EmailField(max_length=75)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('pub_date', models.DateTimeField(verbose_name=b'date published')),\n ('content', models.TextField(max_length=20000)),\n ('keywords', annoying.fields.JSONField(null=True, blank=True)),\n ('author', models.ForeignKey(to='blog.Author')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"blog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"612618020","text":"import base64\nimport string\nimport random\nimport hashlib\nimport time\nimport struct\nfrom Crypto.Cipher import AES\nimport socket\nimport importlib\nimport config.cypher as config\nimport ast\n\nblock_size = 32\naes_key = base64.b64decode(config.aes_key)\n\ndef encrypt(data, userid):\n if not isinstance(data, dict):\n raise RuntimeError('Invalid data type')\n data = str(data)\n sample = string.digits + string.ascii_letters\n rand_str = ''.join(random.sample(sample, 16))\n data = data.encode('utf-8')\n data = bytes(rand_str, 'utf-8') + struct.pack('I', socket.htonl(len(data))) + data + bytes(userid, 'utf-8')\n data_length = len(data)\n pad_length = block_size - (data_length % block_size)\n pad = bytes(chr(pad_length), 'utf-8')\n data = data + pad * pad_length\n ase_cryptor = AES.new(aes_key, AES.MODE_CBC, aes_key[:16])\n cipher = ase_cryptor.encrypt(data)\n return base64.b64encode(cipher).decode('ascii')\n\ndef decrypt(data):\n try:\n ase_cryptor = AES.new(aes_key, AES.MODE_CBC, aes_key[:16])\n plain_text = ase_cryptor.decrypt(base64.b64decode(data))\n pad_length = plain_text[-1]\n content = plain_text[16:-pad_length]\n data_length = socket.ntohl(struct.unpack('I', content[:4])[0])\n return ast.literal_eval(content[4:4+data_length].decode('utf-8')), content[4+data_length:].decode('utf-8')\n except Exception as err:\n return {}, ''\n\ndef get_signature(*arg):\n signature_param = list(arg)\n signature_param.sort()\n return hashlib.sha1(''.join(signature_param).encode('utf-8')).hexdigest()\n\nif __name__ == '__main__':\n encrypt_data = encrypt('fuck you', 'admin')\n decrypt_data = decrypt(encrypt_data)\n print(encrypt_data, decrypt_data)\n","sub_path":"locode/cypher.py","file_name":"cypher.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"158228244","text":"from utils import read_csv, to_csv\nfrom utils import sort_by_key\nfrom utils import read_group\n\nfrom random import shuffle, choice\nimport os\nimport sys\nimport glob\n\ndef read_input(run, experiment_name):\n all_input_dicts = []\n input_dicts_batch = []\n f_paths_all = glob.glob(f'../prolific_input/run*-group_*/*.csv')\n f_paths_run_name = glob.glob(f'../prolific_input/run{run}-group_{experiment_name}/*.csv')\n header_path = f'../prolific_input/run{run}-group_{experiment_name}/header.txt'\n batch_numbers = []\n # Get info current batch\n if os.path.isfile(header_path):\n with open(header_path) as infile:\n header = infile.read().split(',')\n for f in f_paths_run_name:\n input_dicts = read_csv(f, header = header)\n input_dicts_batch.extend(input_dicts)\n if 'TEST' not in f:\n f_name = f.split('/')[-1]\n batch_n = int(f_name.split('-')[2].split('.')[0][len('batch'):])\n ##../prolific_input/run3-group_experiment1/qu70-s_qu70-batch11.csv\n batch_numbers.append(batch_n)\n # get all input dicts:\n for f in f_paths_all:\n run = f.replace('../prolific_input/', '').split('-')[0]\n #../prolific_input/run3-group_experiment1/qu70-s_qu70-batch11.csv\n input_dicts = read_csv(f, header = header)\n all_input_dicts.extend(input_dicts)\n return all_input_dicts, input_dicts_batch, batch_numbers\n\n\ndef collect_not_annotated(input_dicts, question_dicts):\n\n questions_not_annotated = []\n input_by_quid = sort_by_key(input_dicts, ['quid'])\n for d in question_dicts:\n quid = d['quid']\n if quid not in input_by_quid:\n questions_not_annotated.append(d)\n return questions_not_annotated\n\n\ndef get_annotated_questions(input_dicts, question_dicts):\n\n input_by_quid = sort_by_key(input_dicts, ['quid'])\n questions_by_quid = sort_by_key(question_dicts, ['quid'])\n questions_annotated = []\n for quid in input_by_quid:\n if quid in questions_by_quid:\n question = questions_by_quid[quid][0]\n questions_annotated.append(question)\n return questions_annotated\n\n\ndef collect_invalid(input_dicts, question_dicts):\n\n questions_by_pair = sort_by_key(question_dicts, ['property', 'concept'])\n questions_annotated = get_annotated_questions(input_dicts, question_dicts)\n questions_anntotated_by_pair = sort_by_key(questions_annotated, ['property', 'concept'])\n invalid_annotations = []\n\n for pair, questions_annotated in questions_anntotated_by_pair.items():\n questions = questions_by_pair[pair]\n if len(questions) != len(questions_annotated):\n #print('missing annotations for pair:', pair, len(questions), len(questions_annotated))\n invalid_annotations.extend(questions)\n return invalid_annotations\n\ndef get_available_questions(input_dicts, question_dicts):\n\n questions_for_annotation = []\n questions_not_annotated = collect_not_annotated(input_dicts, question_dicts)\n print('not annotated yet:', len(questions_not_annotated))\n invalid_annotations = collect_invalid(input_dicts, question_dicts)\n print('not valid', len(invalid_annotations))\n\n not_annotated_pair = sort_by_key(questions_not_annotated, ['property', 'concept'])\n invalid_pair = sort_by_key(invalid_annotations, ['property', 'concept'])\n\n for pair, questions in not_annotated_pair.items():\n if pair in invalid_pair:\n questions_for_annotation.extend(invalid_pair[pair])\n else:\n questions_for_annotation.extend(questions)\n test_for_wrong_questions(questions_for_annotation)\n return questions_for_annotation, invalid_annotations\n\ndef test_for_wrong_questions(questions_for_annotation):\n wrong_n_questions = []\n for_annotation_by_pair = sort_by_key(questions_for_annotation, ['property', 'concept'])\n for pair, questions in for_annotation_by_pair.items():\n if len(questions) > 10 or len(questions) < 3:\n wrong_n_questions.append((n, pair))\n assert len(wrong_n_questions) == 0, 'Number of questions per pair not correct.'\n\n\n\ndef get_check_and_test():\n checks = read_csv('../questions/checks.csv')\n tests = read_csv('../questions/tests.csv')\n\n rand_check = choice(checks)\n rand_test = choice(tests)\n tests_checks = [rand_check, rand_test]\n for d in tests_checks:\n if '' in d:\n d.pop('')\n return tests_checks\n\ndef get_batch(questions_to_annotate, n_qu = 70):\n batch = []\n properties = set()\n # shuffle questions:\n shuffle(questions_to_annotate)\n questions_by_pair = sort_by_key(questions_to_annotate, ['property', 'concept'])\n available_properties = set([p.split('-')[0] for p in questions_by_pair.keys()])\n\n if n_qu > len(questions_to_annotate):\n print(f'only {len(questions_to_annotate)} left - adding all to batch.')\n batch.extend(questions_to_annotate)\n else:\n print(f'still more than {n_qu} questions available.')\n for pair, questions in questions_by_pair.items():\n prop = pair.split('-')[0]\n if len(batch) < n_qu:\n if prop not in properties:\n #print('found a new one:', prop, len(batch))\n batch.extend(questions)\n properties.add(prop)\n else:\n props_not_used = available_properties.difference(properties)\n #print('properties not used:', len(props_not_used), len(batch))\n if len(props_not_used) > 0:\n continue\n else:\n batch.extend(questions)\n properties.add(prop)\n #print('no more properties, adding quetions:', len(questions))\n else:\n print('found enough questions', len(batch))\n break\n\n return batch\n\n\ndef batch_to_file(batch, url, experiment_name, run, n_qu, batch_n):\n\n # header = ['quid', 'question', 'example_pos', 'example_neg']\n header_new = ['quid','listNr', 'description', 'exampleTrue', 'exampleFalse',\\\n 'triple', 'completionUrl', 'name']\n dirpath = f'../prolific_input/run{run}-group_{experiment_name}/'\n batch_name = f'qu{n_qu}-s_qu{n_qu}-batch{batch_n}'\n filepath = f'{dirpath}{batch_name}.csv'\n pl_name = f'Agree or disagree (run{run}-{experiment_name}-batch{batch_n}-{n_qu}-{n_qu})'\n\n ### write header###\n if not os.path.isdir(dirpath):\n os.mkdir(dirpath)\n header_path = f'{dirpath}header.txt'\n if not os.path.isfile(header_path):\n with open(header_path, 'w') as outfile:\n outfile.write(','.join(header_new))\n ###\n new_dicts = []\n for d in batch:\n triple = f\"{d['relation']}-{d['property']}-{d['concept']}\"\n new_d = dict()\n new_d['quid'] = d['quid']\n new_d['listNr'] = d['listNr']\n new_d['description'] = d['question']\n new_d['exampleTrue'] = d['example_pos']\n new_d['exampleFalse'] = d['example_neg']\n new_d['run'] = run\n new_d['subList'] = 1\n new_d['completionUrl'] = url\n new_d['triple'] = triple\n new_d['name'] = pl_name\n new_dicts.append(new_d)\n to_csv(filepath, new_dicts, header=True)\n return filepath\n\n\ndef test_duplicates(input_dicts, batch_dicts, invalid_annotations):\n\n quids_batch = set([d['quid'] for d in batch_dicts])\n quids_input = set([d['quid'] for d in input_dicts])\n quids_invalid = set([d['quid'] for d in invalid_annotations])\n overlap = quids_batch.intersection(quids_input)\n valid_overlap = overlap.difference(quids_invalid)\n #print(f'Overlap between annotated and current batch: {len(valid_overlap)}')\n #print(valid_overlap)\n problematic_overlap = []\n for quid in valid_overlap:\n if quid.startswith('test') or quid.startswith('check'):\n continue\n else:\n problematic_overlap.append(quid)\n assert len(problematic_overlap) == 0, 'Already annotated questions in batch!'\n\ndef suggest_cost(n_questions):\n # UK min wage = 8.21\n # divided by 60 gives min wage per minute\n per_minute = 0.13\n time_per_question_seconds = 8\n price_per_question = (per_minute/60) *7\n final = n_questions * price_per_question\n estimated_time = (n_questions * time_per_question_seconds) / 60\n print(f'estimated time: {estimated_time} minutes')\n print(f'suggested price: {final}')\n return final, estimated_time\n\n\ndef print_task_intro(run):\n\n # load description:\n\n with open(f'../task_set_up/description_run{run}.txt') as infile:\n text_description = infile.read()\n\n with open(f'../task_set_up/instructions_run{run}.txt') as infile:\n text_instructions = infile.read()\n\n print('-------------------------------------\\n')\n print_des = input('Print description? (y/n)')\n if print_des == 'y':\n print('\\n----- Task description ------\\n')\n print(text_description)\n print_instructions = input('Print instructions? (y/n)')\n if print_instructions == 'y':\n print('\\n----- Instructions ------\\n')\n print(text_instructions)\n print_whitelist = input(\"Print whitelist (y/n)?\")\n if print_whitelist == 'y':\n with open('../task_set_up/whitelist.txt') as infile:\n whitelist = infile.read()\n print('------- Whitelist--------')\n print(whitelist)\n return print_whitelist\n\n\ndef update_log(new_log_dict):\n path = '../task_set_up/experiment_log.csv'\n log_dicts = read_csv(path)\n log_dicts.append(new_log_dict)\n to_csv(path, log_dicts)\n print(f'updated log: {path}')\n\ndef create_new_batch(run, experiment_name, url, n_participants, n_lists, n_qu=70, test=False):\n exp_dict = dict()\n all_input_dicts, input_dicts_batch, batch_numbers = read_input(run, experiment_name)\n print(batch_numbers)\n question_path = f'../questions/run{run}-all-restricted_True.csv'\n question_dicts = read_csv(question_path)\n selected_properties = read_group(experiment_name)\n\n\n print('available for batch:')\n questions_to_annotate_batch, invalid_annotations = get_available_questions(input_dicts_batch,\\\n question_dicts)\n print('availabel in total:')\n questions_to_annotate_total, invalid_annotations = get_available_questions(all_input_dicts,\\\n question_dicts)\n questions_in_selection = [d for d in questions_to_annotate_batch \\\n if d['property'] in selected_properties]\n questions_in_selection_total = [d for d in question_dicts \\\n if d['property'] in selected_properties]\n\n ### Get counts ###\n # Total dataset\n n_total = len(question_dicts)\n n_not_annotated = len(questions_to_annotate_total)\n n_annotated = n_total - n_not_annotated\n percent_total = round(n_annotated/n_total, 3) * 100\n ###\n\n # Experiment group\n n_experiment_group = len(questions_in_selection_total)\n n_not_annotated_experiment_group = len(questions_in_selection)\n n_annotated_experiment_group = n_experiment_group - n_not_annotated_experiment_group\n percent_experiment_group = round(n_annotated_experiment_group/n_experiment_group, 3) * 100\n ###\n ###########\n\n\n # Create new batch\n if batch_numbers:\n highest_batch_number = max(batch_numbers)\n else:\n highest_batch_number = 0\n if test == False:\n current_batch_n = highest_batch_number + 1\n else:\n current_batch_n = 'TEST'\n\n full_batch = []\n for n in range(n_lists):\n test_check_questions = get_check_and_test()\n new_batch = get_batch(questions_in_selection, n_qu = n_qu)\n # test for wrong number of questions\n test_for_wrong_questions(new_batch)\n # Add tests and checks (one randomly picked one each)\n new_batch.extend(test_check_questions)\n # Test if there are not duplicates\n test_duplicates(input_dicts_batch, new_batch, invalid_annotations)\n # Add listnr\n [d.update({'listNr' : str(n+1)}) for d in new_batch]\n print('Listnr', n+1, len(new_batch))\n full_batch.extend(new_batch)\n # Write batch to file\n batch_path = batch_to_file(full_batch, url, experiment_name, run, n_qu, current_batch_n)\n\n print(f'Number of questions in the total dataset: {n_total}')\n print(f'Number of questions in {experiment_name}: {n_experiment_group}')\n print(f'Number of annotated questions: {n_annotated}\\\n (of which in {experiment_name}: {n_annotated_experiment_group})')\n print(f'Percentage of annotated questions of the total: {percent_total}%')\n print(f'Percentage of annotated questions of {experiment_name}: {percent_experiment_group}%')\n print(f'Annotated {highest_batch_number} batches so far.')\n print(f'Created batch {current_batch_n} with {len(new_batch)} questions.')\n print(f'New batch written to: {batch_path}')\n pl_n = f'Agree or disagree (run{run}-{experiment_name}-batch{current_batch_n}-{n_qu}-{n_qu})'\n print(pl_n)\n p_whitelist = print_task_intro(run)\n print('\\n------ Cost ---------')\n print(f'\\nCost suggestion for {len(new_batch)} questions:\\n')\n sug_cost, t = suggest_cost(len(new_batch))\n\n total_cost_no_fee = sug_cost * n_participants\n # fill log dict:\n exp_dict['name_lingoturk'] = pl_n\n exp_dict['name_prolific'] = pl_n\n exp_dict['group'] = experiment_name\n exp_dict['batch'] = current_batch_n\n exp_dict['run'] = run\n exp_dict['n_questions'] = n_qu\n exp_dict['n_questions_batch'] = len(new_batch)\n exp_dict['n_participants'] = n_participants\n exp_dict['minutes_planned'] = t\n exp_dict['reward (pounds)'] = sug_cost\n total_cost = float(input('Enter total cost shown on Prolific: '))\n exp_dict['total_cost (pounds)'] = total_cost\n print(f'Prolific charges {float(total_cost) - total_cost_no_fee} pounds fees.')\n exp_dict['posted'] = 'yes'\n exp_dict['results_downloaded'] = ''\n exp_dict['summary downloaded'] = ''\n exp_dict['approved'] = ''\n exp_dict['comment'] = ''\n exp_dict['Code'] = url\n exp_dict['whitelist'] = p_whitelist\n if test == False:\n update_log(exp_dict)\n\n\ndef main():\n run = sys.argv[1]\n experiment_name = 'experiment3'\n url = sys.argv[2]\n # number of lists within batch\n n_lists = int(sys.argv[3])\n n_participants_per_batch = int(sys.argv[4])\n n_participants = n_lists * n_participants_per_batch\n\n #url = 'test'\n #purpose = 'test'\n if url == 'TEST':\n test = True\n url = 'https://piasommerauer.github.io/annotation'\n elif url != 'TEST':\n test = False\n\n #create_new_batch(run, experiment_name, url, n_participants, n_qu=3, test=test)\n create_new_batch(run, experiment_name, url, n_participants, n_lists, n_qu=3, test=test)\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/.ipynb_checkpoints/create_multibatches-checkpoint.py","file_name":"create_multibatches-checkpoint.py","file_ext":"py","file_size_in_byte":15008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119066767","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef getTargetColor(target):\n return 'b.' if target == 1 else 'r.'\n\n\ndef plot2dData(inData, targets):\n classA = inData[np.where(targets > 0)[0]]\n classB = inData[np.where(targets < 0)[0]]\n\n for c in [(classA, 'b.'), (classB, 'r.')]:\n plt.plot([p[0] for p in c[0]],\n [p[1] for p in c[0]],\n c[1]\n )\n\n plt.axis('equal')\n plt.show()\n\n\ndef plotDecBoundary(inData, targets, indicatorFunc):\n xGrid = np.linspace(-4, 8)\n yGrid = np.linspace(-4, 8)\n\n grid = np.array([[indicatorFunc(x, y)\n for x in xGrid]\n for y in yGrid])\n\n plt.contour(xGrid, yGrid, grid,\n (-1.0, 0.0, 1.0),\n colors=('red', 'black', 'blue'),\n linewidths=(1, 3, 1)\n )\n\n classA = inData[np.where(targets > 0)[0]]\n classB = inData[np.where(targets < 0)[0]]\n\n for c in [(classA, 'b.'), (classB, 'r.')]:\n plt.plot([p[0] for p in c[0]],\n [p[1] for p in c[0]],\n c[1]\n )\n\n # plt.xlim(-10, 10)\n # plt.ylim(-10, 10)\n plt.axis('equal')\n plt.show()\n","sub_path":"Lab2/PlotGenerator.py","file_name":"PlotGenerator.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"202994508","text":"import json\n\n\nclass Env(object):\n env = None\n\n def get(self, key: str) -> str:\n if self.env is None:\n self._readEnv()\n return self.env[key]\n\n def _readEnv(self):\n with open(\"env.json\") as json_data_file:\n self.env = json.load(json_data_file)\n","sub_path":"Bot/Env.py","file_name":"Env.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"369036822","text":"#----------|PROYECTO FINAL| ROAD FIGHTER | JULIAN ANDRÉS LOAIZA OSPINA |--------------------------------#\n\nfrom tkinter import *\nfrom tkinter import ttk, font\nimport time\nimport sys\nimport threading\nimport random \n\n#|Sonidos|---------------------------------------------------\n\nimport pygame\nfrom pygame.locals import *\n\npygame.mixer.init()\n\nsoundMenu = pygame.mixer.Sound(\"menu_theme.wav\")\nsoundBoton = pygame.mixer.Sound(\"menu.wav\")\nsoundOver = pygame.mixer.Sound(\"gameOver.wav\")\nsoundExplosion = pygame.mixer.Sound(\"carExplosion.wav\")\nsoundPass = pygame.mixer.Sound(\"carPass.wav\")\nsoundCar = pygame.mixer.Sound(\"car.wav\")\nsoundSticker = pygame.mixer.Sound(\"sticker.wav\")\nsoundBrake = pygame.mixer.Sound(\"carBrake.wav\")\nsoundInit1 = pygame.mixer.Sound(\"init1.wav\")\nsoundInit2 = pygame.mixer.Sound(\"init2.wav\")\n\n\n#|Menu principal|--------------------------------------------------------------\n\ndef main_menu():\n global raiz, fontMenu, butStart, butContinue, butSinglePlayer, butMultiPlayer, menu, difficulty, varContinue, sound\n \n #|Ventana|--------------------------------------------\n\n\n raiz = Tk()\n raiz.title(\"Road Fighter\")\n raiz.geometry(\"800x600\")\n raiz.resizable(width=False, height=False)\n bit = raiz.iconbitmap('RoadFighter.ico')\n \n #|Fuente|---------------------------------------------------\n\n fontMenu = font.Font(family='Haettenschweiler', size= 25)\n\n #|musica|-------------------------------------------\n\n soundMenu.play()\n\n\n #|Fondo|--------------------------------------------\n\n mainMenu = PhotoImage(file=\"mainMenu1.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0) \n raiz.update()\n time.sleep(1)\n\n mainMenu = PhotoImage(file=\"mainMenu2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0) \n raiz.update()\n time.sleep(0.01)\n \n mainMenu = PhotoImage(file=\"mainMenu3.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n time.sleep(0.01)\n \n mainMenu = PhotoImage(file=\"mainMenu4.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n time.sleep(0.01)\n \n mainMenu = PhotoImage(file=\"mainMenu.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0) \n \n #|Botones| ---------------------------------------------------------\n\n butMultiPlayer = Button(raiz, text=\"MULTI PLAYER\",bg = \"black\", fg = \"white\",\n font= fontMenu, command= multi_player).place(x= 304, y = 303, height= 56, width= 225)\n\n butContinue = Button(raiz, text=\"CONTINUE\",bg = \"black\", fg = \"white\", \n font= fontMenu, command = _continue_).place(x= 291, y = 426, height= 56, width= 225)\n \n #-----------------------------------------------------------------------\n varContinue = False\n\n #--------------------------------------------------------------\n\n raiz.mainloop()\n \n\n#|Menu multijugador|------------------------------------------------\n\ndef multi_player():\n global mainMenu, fondoMenu, archivo, raiz, difficulty,labelGasolina1, labelGasolina2, fontSubMenu, player1, player2, varContinue, movimiento\n \n movimiento = False\n soundBoton.play()\n\n #|Fondo|----------------------------------------\n \n mainMenu = PhotoImage(file=\"subMenu.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n \n #|fuente| \n \n fontSubMenu = font.Font(family='Agency FB', size= 18) \n\n #|Nombre Jugadores Entradas|----------------------------------------\n \n player1 = StringVar()\n player1.set(\"PLAYER 1\")\n\n \n\n player2 = StringVar()\n player2.set(\"PLAYER 2\")\n\n \n\n BoxPlayer1 = Entry(raiz, textvariable = player1, font= fontSubMenu, \n justify=\"center\").place(x= 50, y = 360,height= 30, width= 200)\n\n BoxPlayer2 = Entry(raiz, textvariable = player2, font= fontSubMenu, \n justify=\"center\").place(x= 528, y = 360,height= 30, width= 200)\n\n #|Dificultades RadioButtons|-------------------------------------------\n\n difficulty= IntVar()\n\n difficulty.set(2)\n \n RadButDtifficulty1= Radiobutton(raiz, value= 1, variable= difficulty,\n bg = \"black\").place(x= 320, y = 328)\n \n RadButDifficulty2= Radiobutton(raiz, value= 2, variable= difficulty, \n bg = \"black\", font= fontSubMenu).place(x= 390, y = 320)\n \n RadButDifficulty3= Radiobutton(raiz, value= 3, variable= difficulty,\n bg = \"black\").place(x= 350, y = 385)\n \n RadDifficulty4= Radiobutton(raiz, value= 4, variable= difficulty,\n bg = \"black\").place(x= 325, y = 440)\n\n RadDifficulty5= Radiobutton(raiz, value= 5, variable= difficulty,\n bg = \"black\").place(x= 390, y = 440)\n\n butStart = Button(raiz, text=\"Start\",bg = \"black\", fg = \"white\", \n font= fontMenu, command = play_multijugador).place(x= 278, y = 515, height= 56, width= 225)\n\n if varContinue == True:\n play_multijugador()\n \n #----------------------------------------------------------------------\n\n#Continue|-------------------------\n\ndef _continue_():\n global varContinue\n\n soundBoton.play()\n\n varContinue = True\n multi_player()\n return\n\ndef play_multijugador():\n global iniciar, mainMenu, fondoMenu, archivo, movimiento, sticker1, sticker2, stickerImage, dicc, movimiento ,varContinue, butSave, minimapY, raiz,canvasIzq, labelGasolina1, labelGasolina2, playerMinimap, canvasDer, dificultad, fontSubMenu, efecto, efectoManchaP1, efectoManchaP2, GameOverP1, GameOverP2, posicionXP1, posicionYP1, roadIzq, posicionXP2, posicionYP2, movimientoP1, movimientoP2, movimiento, posicionMinivanY, posicionMinivanX, iniciar, minivan1, minivan2, velocidadPlayer, imagePlayer1, cavasImagePlayer1, imagePlayer2, cavasImagePlayer2, posicionManchaY, posicionManchaX, posicionStickerY, posicionStickerX, posicionMinivanX_2, posicionMinivanY_2, posicionRunnerX, posicionRunnerY, posicionFighterX, posicionFighterX2, posicionFighterY, coords, coords2, contadorGasolina, contadorGasolina2, cont\n\n\n archivo = {}\n\n if varContinue == True:\n partida=open(\"Archivo.py\",\"r\")\n dicc= partida.readline()\n partida.close()\n\n posicionXP1 = (100) \n\n archivo[\"posicionXP1\"] = posicionXP1\n\n\n posicionYP1 = (550)\n\n archivo[\"posicionYP1\"] = posicionYP1\n\n posicionXP2 = (100)\n\n archivo[\"posicionXP2\"] = posicionXP2\n\n posicionYP2 = (550)\n\n archivo[\"posicionYP2\"] = posicionYP2\n\n velocidadPlayer = 1\n\n movimiento = True\n\n contadorGasolina = 200\n\n archivo[\"contadorGasolina\"] = contadorGasolina\n\n contadorGasolina2 = 200\n\n archivo[\"contadorGasolina2\"] = contadorGasolina2\n\n\n#--------Movimiento Player1 ------------------#\n \n\n movimientoP1 = \"none\"\n\n def aKey(event):\n global movimientoP1\n movimientoP1 = \"izq\"\n\n def dKey(event):\n global movimientoP1\n movimientoP1 = \"der\"\n\n def sKey(event):\n global movimientoP1\n movimientoP1 = \"none\"\n \n \n#----------Movimiento player2---------------------#\n \n movimientoP2 = \"none\"\n\n def jKey(event):\n global movimientoP2\n movimientoP2 = \"izq\"\n\n def lKey(event):\n global movimientoP2\n movimientoP2 = \"der\"\n\n def kKey(event):\n global movimientoP2\n movimientoP2 = \"none\"\n \n #|save|-------------------------------------------\n\n def _save_():\n global raiz, movimiento, archivo\n\n movimiento = False\n soundCar.stop()\n\n archivoTxt = open(\"Archivo.py\", \"w\")\n archivoTxt.write(str(archivo))\n archivoTxt.close\n\n raiz.bind('<space>', start)\n \n \n #|stop|-------------------------------------------#\n\n def stop(event):\n global raiz, movimiento, butSave\n movimiento = False\n soundCar.stop()\n\n raiz.bind('<space>', start)\n\n\n #|Interacciones|------------------------------------------------------------------------------\n \n GameOverP1 = False\n GameOverP2 = False\n efectoManchaP1 = False\n efectoManchaP2 = False\n\n def GameOver():\n global GameOverP1, GameOverP2, canvasIzq, mainMenu, fondoMenu, canvasDer, movimiento, imagePlayer1, imagePlayer2, cavasImagePlayer1, cavasImagePlayer2, velocidad, raiz, varContinue\n movimiento = False\n varContinue = False\n soundCar.stop()\n \n if GameOverP1 == \"Empate\":\n soundExplosion.play()\n\n ExplosionP1Image = PhotoImage(file= \"explosion1.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n\n ExplosionP2Image = PhotoImage(file= \"explosion1.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n \n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion2.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n\n ExplosionP2Image = PhotoImage(file= \"explosion2.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion3.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n \n ExplosionP2Image = PhotoImage(file= \"explosion3.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n\n time.sleep(0.1)\n\n imagePlayer1 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n raiz.update()\n\n imagePlayer2 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n raiz.update()\n\n ExplosionP1Image = PhotoImage(file= \"explosion4.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n \n ExplosionP2Image = PhotoImage(file= \"explosion4.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion5.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n \n ExplosionP2Image = PhotoImage(file= \"explosion5.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion6.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n \n ExplosionP2Image = PhotoImage(file= \"explosion6.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n\n imagePlayer2 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n raiz.update()\n imagePlayer1 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n raiz.update()\n\n mainMenu = PhotoImage(file=\"winP1P2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n \n \n elif GameOverP1 == True:\n soundExplosion.play()\n\n ExplosionP1Image = PhotoImage(file= \"explosion1.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion2.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion3.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n time.sleep(0.1)\n\n imagePlayer1 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n raiz.update()\n\n ExplosionP1Image = PhotoImage(file= \"explosion4.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion5.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP1Image = PhotoImage(file= \"explosion6.png\")\n ExplosionP1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = ExplosionP1Image)\n raiz.update()\n imagePlayer1 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n raiz.update()\n\n soundPass.play()\n\n for i in range(150):\n canvasDer.move(cavasImagePlayer2, 0, -6)\n raiz.update()\n time.sleep(0.005)\n\n mainMenu = PhotoImage(file=\"winP2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n \n elif GameOverP2 == True: \n soundExplosion.play()\n ExplosionP2Image = PhotoImage(file= \"explosion1.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP2Image = PhotoImage(file= \"explosion2.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP2Image = PhotoImage(file= \"explosion3.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n time.sleep(0.1)\n\n imagePlayer2 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n raiz.update()\n\n ExplosionP2Image = PhotoImage(file= \"explosion2.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP2Image = PhotoImage(file= \"explosion5.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n time.sleep(0.1)\n\n ExplosionP2Image = PhotoImage(file= \"explosion6.png\")\n ExplosionP2 = canvasDer.create_image(posicionXP2, posicionYP2, image = ExplosionP2Image)\n raiz.update()\n imagePlayer2 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n raiz.update()\n soundPass.play()\n for i in range(150):\n canvasIzq.move(cavasImagePlayer1, 0, -6)\n raiz.update()\n time.sleep(0.005)\n \n mainMenu = PhotoImage(file=\"winP1.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n\n\n \n elif GameOverP2 == \"fin\":\n soundPass.play()\n for i in range(150):\n canvasIzq.move(cavasImagePlayer1, 0, -6)\n canvasDer.move(cavasImagePlayer2, 0, -6)\n raiz.update()\n time.sleep(0.005)\n\n mainMenu = PhotoImage(file=\"winP1P2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n \n elif GameOverP1 == \"Fuel\":\n \n for i in range(3):\n canvasIzq.move(cavasImagePlayer1, 0, -1 + i)\n canvasDer.move(cavasImagePlayer2, 0, -1 + i)\n raiz.update()\n\n mainMenu = PhotoImage(file=\"winP1P2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n\n elif GameOverP1 == \"Fuel1\":\n soundPass.play()\n \n for i in range(150):\n canvasDer.move(cavasImagePlayer2, 0, -6)\n raiz.update()\n time.sleep(0.005)\n\n mainMenu = PhotoImage(file=\"winP2.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n\n elif GameOverP2 == \"Fuel2\":\n soundPass.play()\n\n for i in range(150):\n canvasIzq.move(cavasImagePlayer1, 0, -6)\n raiz.update()\n time.sleep(0.005)\n\n mainMenu = PhotoImage(file=\"winP1.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n raiz.update()\n\n soundMenu.stop()\n soundOver.play()\n\n GameOverP1 = False \n GameOverP2 = False\n\n \n butRestart = Button(raiz, text=\"RESTART\",bg = \"black\", fg = \"white\",\n font= fontMenu, command= play_multijugador1).place(x= 291, y = 263, height= 56, width= 225)\n\n butContinue = Button(raiz, text=\"CONTINUE\",bg = \"black\", fg = \"white\", \n font= fontMenu, command = _continue_).place(x= 291, y = 334, height= 56, width= 225)\n\n butMenu = Button(raiz, text=\"BACK\",bg = \"black\", fg = \"white\", \n font= fontMenu, command = multi_player1).place(x= 291, y = 408, height= 56, width= 225)\n\n raiz.mainloop()\n\n\n def deslizamientoP1Der():\n global imagePlayer1, cavasImagePlayer1\n soundBrake.play()\n raiz.update()\n imagePlayer1 = PhotoImage(file= \"+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.2)\n imagePlayer1 = PhotoImage(file= \"-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n imagePlayer1 = PhotoImage(file= \"player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n return\n\n\n\n def deslizamientoP1Izq():\n global imagePlayer1, cavasImagePlayer1\n soundBrake.play()\n imagePlayer1 = PhotoImage(file= \"-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.2)\n imagePlayer1 = PhotoImage(file= \"+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n imagePlayer1 = PhotoImage(file= \"player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n return\n \n\n def deslizamientoP2Der():\n soundBrake.play()\n global imagePlayer2, cavasImagePlayer2 \n imagePlayer2 = PhotoImage(file= \"+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.2)\n imagePlayer2 = PhotoImage(file= \"-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n imagePlayer2 = PhotoImage(file= \"player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n return\n \n\n def deslizamientoP2Izq():\n soundBrake.play()\n global imagePlayer2, cavasImagePlayer2 \n imagePlayer2 = PhotoImage(file= \"-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.2)\n imagePlayer2 = PhotoImage(file= \"+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n imagePlayer2 = PhotoImage(file= \"player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n return\n\n def efecto_ManchaP1():\n global efectoManchaP1, imagePlayer1, cavasImagePlayer1\n efectoManchaP1 = True\n imagePlayer1, cavasImagePlayer1\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"2+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"3+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"4+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"5+player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"+-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"5-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"4-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"3-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"2-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"-player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP1 == False:\n return\n imagePlayer1 = PhotoImage(file= \"player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n if movimiento == False and efectoManchaP1 == False:\n return\n\n efectoManchaP1 = True\n \n def efecto_ManchaP2():\n global efectoManchaP2, imagePlayer2, cavasImagePlayer2\n efectoManchaP2 = True\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2, cavasImagePlayer2\n imagePlayer2 = PhotoImage(file= \"+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"2+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"3+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"4+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"5+player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"+-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"5-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"4-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"3-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"2-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"-player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n time.sleep(0.1)\n if movimiento == False and efectoManchaP2 == False:\n return\n imagePlayer2 = PhotoImage(file= \"player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n if movimiento == False and efectoManchaP2 == False:\n return\n efectoManchaP2 = False\n\n\n\n #|Reanudar, start|---------------------------------------------------------------------------------------------------------------------------------------\n\n iniciar = True\n\n \n def start(event):\n global raiz, movimiento, iniciar, butSave, dicc, sticker1, sticker2, stickerImage, archivo, minimapY, varContinue, coords, coords2, canvasIzq, canvasDer, playerMinimap, labelGasolina1, velocidad, posicionMinivanX, posicionMinivanY, labelGasolina2, GameOverP1, GameOverP2, efecto, efectoManchaP1, efectoManchaP2, posicionXP2, posicionXP1, posicionYP1, posicionYP2, movimientoP1, movimientoP2, velocidadPlayer, imagePlayer1, cavasImagePlayer1, imagePlayer2, cavasImagePlayer2, posicionManchaY, posicionManchaX, posicionStickerY, posicionStickerX, posicionMinivanX_2, posicionMinivanY_2, posicionRunnerX, posicionRunnerY, posicionFighterX, posicionFighterX2, posicionFighterY, coords, coords2, contadorGasolina, contadorGasolina2, cont\n\n raiz.bind('<space>', stop)\n\n movimiento = True\n\n archivo[\"player1\"] = player1.get()\n\n archivo[\"player2\"] = player2.get()\n\n if iniciar == True:\n raiz.bind('<a>', aKey)\n raiz.bind('<d>', dKey)\n raiz.bind('<s>', sKey)\n raiz.bind('<j>', jKey)\n raiz.bind('<l>', lKey)\n raiz.bind('<k>', kKey)\n\n\n posicionManchaY = random.randint(-1000,-500)\n posicionManchaX = random.randint(30,165)\n\n archivo[\"posicionManchaY\"] = posicionManchaY\n archivo[\"posicionManchaX\"] = posicionManchaX\n\n\n posicionStickerY = random.randint(-7000,-4000)\n posicionStickerX = random.randint(30,165)\n\n archivo[\"posicionStickerY\"] = posicionStickerY\n archivo[\"posicionStickerX\"] = posicionStickerX\n\n posicionMinivanY = random.randint(-2000,-100)\n posicionMinivanX = random.randint(30,165)\n\n archivo[\"posicionMinivanY\"] = posicionMinivanY\n archivo[\"posicionMinivanX\"] = posicionMinivanX \n\n posicionMinivanY_2 = random.randint(-6000,-3000)\n posicionMinivanX_2 = random.randint(30,165)\n\n archivo[\"posicionMinivanY_2\"] = posicionMinivanY_2\n archivo[\"posicionMinivanX_2\"] = posicionMinivanX_2 \n\n posicionRunnerY = random.randint(-3000,-2000)\n posicionRunnerX = random.randint(30,165)\n\n archivo[\"posicionRunnerY\"] = posicionRunnerY\n archivo[\"posicionRunnerX\"] = posicionRunnerX\n\n posicionFighterY = random.randint(-7000,-4000)\n posicionFighterX = random.randint(30,165)\n posicionFighterX2 = posicionFighterX\n\n archivo[\"posicionFighterY\"] = posicionFighterY\n archivo[\"posicionFighterX\"] = posicionFighterX\n archivo[\"posicionFighterX2\"] = posicionFighterX2\n\n if varContinue == True:\n\n posicionManchaY = eval(dicc)[\"posicionManchaY\"]\n posicionManchaX = eval(dicc)[\"posicionManchaX\"]\n\n posicionStickerY = eval(dicc)[\"posicionStickerY\"]\n posicionStickerX = eval(dicc)[\"posicionStickerX\"]\n\n posicionMinivanY = eval(dicc)[\"posicionMinivanY\"]\n posicionManchaX = eval(dicc)[\"posicionMinivanX\"]\n\n posicionMinivanY_2 = eval(dicc)[\"posicionMinivanY_2\"]\n posicionMinivanX_2 = eval(dicc)[\"posicionMinivanX_2\"]\n\n posicionRunnerY= eval(dicc)[\"posicionRunnerY\"]\n posicionRunnerX = eval(dicc)[\"posicionRunnerX\"]\n\n posicionFighterY = eval(dicc)[\"posicionFighterY\"] \n posicionFighterX = eval(dicc)[\"posicionFighterX\"]\n posicionFighterX2 = eval(dicc)[\"posicionFighterX2\"]\n\n manchaImage = PhotoImage(file=\"mancha.png\") \n mancha1 = canvasDer.create_image(posicionManchaX, posicionManchaY, image = manchaImage)\n mancha2 = canvasIzq.create_image(posicionManchaX, posicionManchaY, image = manchaImage)\n\n\n\n stickerImage = PhotoImage(file=\"sticker.png\") \n sticker1 = canvasDer.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n sticker2 = canvasIzq.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n\n\n minivanImage = PhotoImage(file=\"minivan.png\") \n minivan1 = canvasDer.create_image(posicionMinivanX, posicionMinivanY, image = minivanImage)\n minivan2 = canvasIzq.create_image(posicionMinivanX, posicionMinivanY, image = minivanImage)\n\n minivan1_2 = canvasDer.create_image(posicionMinivanX_2, posicionMinivanY_2, image = minivanImage)\n minivan2_2 = canvasIzq.create_image(posicionMinivanX_2, posicionMinivanY_2, image = minivanImage)\n\n runnerImage = PhotoImage(file= \"runner.png\")\n runner1 = canvasDer.create_image(posicionRunnerX, posicionRunnerY, image = runnerImage)\n runner2 = canvasIzq.create_image(posicionRunnerX, posicionRunnerY, image = runnerImage)\n\n movimientoRunner = random.randint(0,1)\n\n velocidadRunner = 0.5\n\n fighterImage = PhotoImage(file= \"fighter.png\")\n fighter1 = canvasDer.create_image(posicionFighterX, posicionFighterY, image = fighterImage)\n fighter2 = canvasIzq.create_image(posicionFighterX2, posicionFighterY, image = fighterImage)\n\n velocidadFighter = 0.25\n\n #|Animación Init|----------------------------------\n\n soundCar.stop()\n \n for i in range(100):\n canvasIzq.move(cavasImagePlayer1, 0, -1)\n canvasDer.move(cavasImagePlayer2, 0, -1)\n time.sleep(0.01)\n raiz.update()\n \n initImage = PhotoImage(file=\"init1.png\")\n initMinimap = canvasMinimap.create_image(85, 215, image = initImage)\n raiz.update()\n soundInit1.play()\n\n initImage = PhotoImage(file=\"init2.png\")\n initMinimap = canvasMinimap.create_image(85, 215, image = initImage)\n time.sleep(1)\n raiz.update()\n soundInit1.play()\n\n initImage = PhotoImage(file=\"init3.png\")\n \n initMinimap = canvasMinimap.create_image(85, 215, image = initImage)\n time.sleep(1)\n raiz.update()\n soundInit2.play()\n\n\n #--------------------------------------------------\n\n \n movimiento = True\n\n \n#------------------------------------------------------------- \n iniciar = False\n\n velocidad = difficulty.get()\n\n archivo[\"velocidad\"] = velocidad\n\n\n coords = canvasDer.coords(roadDer)\n\n archivo[\"posicionRoadDer\"] = coords[1]\n\n coords2 = canvasIzq.coords(roadIzq)\n\n archivo[\"posicionRoadIzq\"] = coords2[1]\n\n cont = 0\n archivo[\"cont\"] = cont\n\n carSound = True\n\n if varContinue == True:\n\n velocidad = eval(dicc)[\"velocidad\"]\n contadorGasolina = eval(dicc)[\"contadorGasolina\"]\n contadorGasolina2 = eval(dicc)[\"contadorGasolina2\"]\n cont = eval(dicc)[\"cont\"]\n\n\n \n while True:\n if movimiento == True:\n if carSound == True:\n soundMenu.stop()\n soundOver.stop()\n soundCar.play()\n carSound = False\n \n canvasDer.move(roadDer, 0, velocidad)\n\n canvasIzq.move(roadIzq, 0, velocidad)\n\n coords = canvasDer.coords(roadDer)\n archivo[\"posicionRoadDer\"] = coords[1]\n\n coords2 = canvasIzq.coords(roadIzq)\n archivo[\"posicionRoadIzq\"] = coords2[1]\n\n #|gasolina y minimapa|--------------------------------------------\n\n\n cont += 1\n archivo[\"cont\"] = cont\n \n\n if cont % 100 == 0:\n\n \n contadorGasolina -= velocidad\n\n contadorGasolina2 -= velocidad\n\n\n if velocidad == 1:\n canvasMinimap.move(playerMinimap, 0, (-velocidad - 0.49)) \n minimapY -= (velocidad + 0.49) \n \n elif velocidad == 2:\n canvasMinimap.move(playerMinimap, 0, (-velocidad - 0.975)) \n minimapY -= velocidad + 0.975\n\n elif velocidad == 3:\n canvasMinimap.move(playerMinimap, 0, (-velocidad - 1.46))\n minimapY -= velocidad + 1.46\n\n elif velocidad == 4:\n canvasMinimap.move(playerMinimap, 0, (-velocidad - 1.95))\n minimapY -= velocidad + 1.95\n\n elif velocidad == 5:\n canvasMinimap.move(playerMinimap, 0, (-velocidad - 2.45)) \n minimapY -= velocidad + 2.45\n\n gasolina1.set(contadorGasolina)\n gasolina2.set(contadorGasolina2)\n\n\n\n raiz.update()\n \n #|Jugador1 y Jugador2 Efecto Explosión|------------------\n \n if ((posicionXP1 > 171.5) and (posicionXP2 > 171.5)) or ((posicionXP1 < 26) and (posicionXP2 < 26)) or ((posicionXP1 > 171.5) and (posicionXP2 < 26)) or ((posicionXP1 < 26) and (posicionXP2 > 171.5)):\n if efectoManchaP1 == True and efectoManchaP2 == True:\n efectoManchaP1 = False\n efectoManchaP2 = False\n GameOverP1 = \"Empate\"\n imagePlayer1 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n imagePlayer2 = PhotoImage(file=\"transparent.png\")\n cavasImagePlayer2 = canvasIzq.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n raiz.update()\n GameOver()\n\n \n #|MOVE JUGADOR1 |--------------------------------\n\n if movimientoP1 == \"der\" and posicionXP1 < 172:\n canvasIzq.move(cavasImagePlayer1, velocidadPlayer, 0)\n raiz.update()\n posicionXP1 += velocidadPlayer\n \n if (posicionXP1 > 171.5):\n canvasIzq.move(cavasImagePlayer1, -velocidadPlayer, 0)\n raiz.update()\n posicionXP1 -= velocidadPlayer\n\n deslizarP1Der = threading.Thread(target=deslizamientoP1Der)\n\n deslizarP1Der.start()\n\n movimientoP1 = \"izq\"\n\n contadorGasolina -= velocidad\n\n raiz.update()\n\n if efectoManchaP1 == True:\n GameOverP1 = True\n efectoManchaP1 = False\n raiz.update()\n GameOver()\n\n \n elif movimientoP1 == \"izq\" and posicionXP1 > 25:\n canvasIzq.move(cavasImagePlayer1, -velocidadPlayer, 0)\n raiz.update()\n posicionXP1 -= velocidadPlayer\n \n if (posicionXP1 < 26):\n canvasIzq.move(cavasImagePlayer1, velocidadPlayer, 0)\n\n deslizarP1Izq = threading.Thread(target=deslizamientoP1Izq)\n\n deslizarP1Izq.start()\n\n\n posicionXP1 += velocidadPlayer\n\n movimientoP1 = \"der\"\n\n contadorGasolina -= velocidad\n\n raiz.update()\n\n if efectoManchaP1 == True:\n efectoManchaP1 = False\n raiz.update()\n GameOverP1 = True\n GameOver()\n\n \n \n elif movimientoP1 == \"none\":\n canvasIzq.move(cavasImagePlayer1, 0, 0)\n raiz.update()\n\n #|MOVE JUGADOR2| -------------------------------\n\n if movimientoP2 == \"der\" and posicionXP2 < 172:\n canvasDer.move(cavasImagePlayer2, velocidadPlayer, 0)\n raiz.update()\n posicionXP2 += velocidadPlayer\n \n if (posicionXP2 > 171.5):\n canvasDer.move(cavasImagePlayer2, -velocidadPlayer, 0)\n\n deslizarP2Der = threading.Thread(target=deslizamientoP2Der)\n deslizarP2Der.start()\n\n posicionXP2 -= velocidadPlayer\n\n contadorGasolina2 -= velocidad\n \n raiz.update()\n \n movimientoP2 = \"izq\"\n\n if efectoManchaP2 == True:\n efectoManchaP2 = False\n raiz.update()\n GameOverP2 = True\n GameOver()\n \n \n elif movimientoP2 == \"izq\" and posicionXP2 > 25:\n canvasDer.move(cavasImagePlayer2, -velocidadPlayer, 0)\n raiz.update()\n posicionXP2 -= velocidadPlayer\n \n if (posicionXP2 < 26):\n canvasDer.move(cavasImagePlayer2, velocidadPlayer, 0)\n\n deslizarP2Izq = threading.Thread(target=deslizamientoP2Izq)\n\n deslizarP2Izq.start()\n raiz.update()\n\n posicionXP2 += velocidadPlayer\n\n contadorGasolina2 -= velocidad\n\n movimientoP2 = \"der\"\n\n if efectoManchaP2 == True:\n efectoManchaP2 = False\n raiz.update()\n GameOverP2 = True\n GameOver()\n \n \n elif movimientoP2 == \"none\":\n canvasDer.move(cavasImagePlayer2, 0, 0)\n raiz.update()\n\n #|Game Over|---------\n \n if (posicionXP1 in list(range(posicionMinivanX - 20, posicionMinivanX + 20))) and (posicionYP1 in list(range(posicionMinivanY - 50, posicionMinivanY + 50))) and (posicionXP2 in list(range(posicionMinivanX - 20, posicionMinivanX + 20))) and (posicionYP2 in list(range(posicionMinivanY - 50, posicionMinivanY + 50))):\n GameOverP1 = \"Empate\" \n GameOver()\n\n elif (posicionXP1 in list(range(posicionMinivanX - 20, posicionMinivanX + 20))) and (posicionYP1 in list(range(posicionMinivanY - 50, posicionMinivanY + 50))):\n \n GameOverP1 = True\n GameOver()\n\n\n elif (posicionXP2 in list(range(posicionMinivanX - 20, posicionMinivanX + 20))) and (posicionYP2 in list(range(posicionMinivanY - 50, posicionMinivanY + 50))):\n\n GameOverP2= True\n GameOver()\n\n if (posicionXP1 in list(range(posicionMinivanX_2 - 20, posicionMinivanX_2 + 20))) and (posicionYP1 in list(range(posicionMinivanY_2 - 50, posicionMinivanY_2 + 50))) and (posicionXP2 in list(range(posicionMinivanX_2 - 20, posicionMinivanX_2 + 20))) and (posicionYP2 in list(range(posicionMinivanY_2 - 50, posicionMinivanY_2 + 50))):\n GameOverP1 = \"Empate\" \n GameOver()\n\n elif (posicionXP1 in list(range(posicionMinivanX_2 - 20, posicionMinivanX_2 + 20))) and (posicionYP1 in list(range(posicionMinivanY_2 - 50, posicionMinivanY_2 + 50))):\n \n GameOverP1 = True\n GameOver()\n\n\n elif (posicionXP2 in list(range(posicionMinivanX_2 - 20, posicionMinivanX_2 + 20))) and (posicionYP2 in list(range(posicionMinivanY_2 - 50, posicionMinivanY_2 + 50))):\n\n GameOverP2= True\n GameOver()\n\n if (posicionXP1 in list(range(int(posicionRunnerX) - 20, int(posicionRunnerX) + 20))) and (posicionYP1 in list(range(posicionRunnerY - 50, posicionRunnerY + 50))) and (posicionXP2 in list(range(int(posicionRunnerX) - 20, int(posicionRunnerX) + 20))) and (posicionYP2 in list(range(posicionRunnerY - 50, posicionRunnerY + 50))):\n GameOverP1 = \"Empate\"\n GameOver()\n\n elif (posicionXP1 in list(range(int(posicionRunnerX) - 20, int(posicionRunnerX) + 20))) and (posicionYP1 in list(range(posicionRunnerY - 50, posicionRunnerY + 50))):\n \n GameOverP1 = True\n GameOver()\n\n\n elif (posicionXP2 in list(range(int(posicionRunnerX) - 20, int(posicionRunnerX) + 20))) and (posicionYP2 in list(range(posicionRunnerY - 50, posicionRunnerY + 50))):\n\n GameOverP2= True\n GameOver()\n\n if (posicionXP1 in list(range(int(posicionFighterX) - 20, int(posicionFighterX) + 20))) and (posicionYP1 in list(range(int(posicionFighterY) - 50, int(posicionFighterY) + 50))) and (posicionXP2 in list(range(int(posicionFighterX2) - 20, int(posicionFighterX2) + 20))) and (posicionYP2 in list(range(int(posicionFighterY) - 50, int(posicionFighterY) + 50))):\n GameOverP1 = \"Empate\"\n GameOver()\n\n elif (posicionXP1 in list(range(int(posicionFighterX) - 20, int(posicionFighterX) + 20))) and (posicionYP1 in list(range(int(posicionFighterY) - 50, int(posicionFighterY) + 50))):\n \n GameOverP1 = True\n GameOver()\n\n\n elif (posicionXP2 in list(range(int(posicionFighterX2) - 20, int(posicionFighterX2) + 20))) and (posicionYP2 in list(range(int(posicionFighterY) - 50, int(posicionFighterY) + 50))):\n\n GameOverP2= True\n GameOver()\n\n if (posicionXP1 in list(range(posicionManchaX - 20, posicionManchaX + 20))) and (posicionYP1 in list(range(posicionManchaY - 20, posicionManchaY + 20))) and (posicionXP2 in list(range(posicionManchaX - 20, posicionManchaX + 20))) and (posicionYP2 in list(range(posicionManchaY - 20, posicionManchaY + 20))):\n contadorGasolina -= velocidad\n contadorGasolina2 -= velocidad\n\n if efectoManchaP1 == False and efectoManchaP2 == False:\n ManchaP1 = threading.Thread(target=efecto_ManchaP1)\n soundBrake.play()\n\n ManchaP1.start()\n\n ManchaP2 = threading.Thread(target=efecto_ManchaP2)\n\n ManchaP2.start()\n\n if movimientoP1 == \"der\":\n movimientoP1 = \"izq\"\n elif movimientoP1 == \"izq\":\n movimientoP1 = \"der\"\n elif movimientoP1 == \"none\":\n movimientoP1 = random.choice([\"der\", \"izq\"])\n\n if movimientoP2 == \"der\":\n movimientoP2 = \"izq\"\n elif movimientoP2 == \"izq\":\n movimientoP2 = \"der\"\n elif movimientoP2 == \"none\":\n movimientoP2 = random.choice([\"der\", \"izq\"])\n \n \n\n elif (posicionXP1 in list(range(posicionManchaX - 20, posicionManchaX + 20))) and (posicionYP1 in list(range(posicionManchaY - 20, posicionManchaY + 20))):\n contadorGasolina -= velocidad\n\n if efectoManchaP1 == False:\n soundBrake.play()\n \n ManchaP1 = threading.Thread(target=efecto_ManchaP1)\n\n ManchaP1.start()\n\n if movimientoP1 == \"der\":\n movimientoP1 = \"izq\"\n elif movimientoP1 == \"izq\":\n movimientoP1 = \"der\"\n elif movimientoP1 == \"none\":\n movimientoP1 = random.choice([\"der\", \"izq\"])\n\n elif (posicionXP2 in list(range(posicionManchaX - 20, posicionManchaX + 20))) and (posicionYP2 in list(range(posicionManchaY - 20, posicionManchaY + 20))):\n contadorGasolina2 -= velocidad\n \n if efectoManchaP2 == False:\n soundBrake.play()\n\n ManchaP2 = threading.Thread(target=efecto_ManchaP2)\n\n ManchaP2.start()\n\n if movimientoP2 == \"der\":\n movimientoP2 = \"izq\"\n elif movimientoP2 == \"izq\":\n movimientoP2 = \"der\"\n elif movimientoP2 == \"none\":\n movimientoP2 = random.choice([\"der\", \"izq\"])\n\n\n #|Gasolina|----------------------------------------- \n\n\n raiz.update()\n \n if (posicionXP1 in list(range(posicionStickerX - 15, posicionStickerX + 15))) and (posicionYP1 in list(range(posicionStickerY - 15, posicionStickerY + 15))) and (posicionXP2 in list(range(posicionStickerX - 15, posicionStickerX + 15))) and (posicionYP2 in list(range(posicionStickerY - 15, posicionStickerY + 15))):\n contadorGasolina += 2\n contadorGasolina2 += 2\n\n soundSticker.play()\n\n stickerImage = PhotoImage(file=\"transparent.png\") \n sticker1 = canvasDer.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n raiz.update()\n stickerImage = PhotoImage(file=\"transparent.png\") \n sticker2 = canvasIzq.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n raiz.update()\n\n elif (posicionXP1 in list(range(posicionStickerX - 15, posicionStickerX + 15))) and (posicionYP1 in list(range(posicionStickerY - 15, posicionStickerY + 15))):\n \n contadorGasolina += 2\n soundSticker.play()\n\n stickerImage = PhotoImage(file=\"transparent.png\") \n sticker1 = canvasDer.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n raiz.update()\n\n elif (posicionXP2 in list(range(posicionStickerX - 15, posicionStickerX + 15))) and (posicionYP2 in list(range(posicionStickerY - 15, posicionStickerY + 15))):\n\n contadorGasolina2 += 2\n soundSticker.play()\n stickerImage = PhotoImage(file=\"transparent.png\") \n sticker2 = canvasIzq.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n raiz.update()\n \n #|Movimiento Objetos|--------------------------\n \n #|Sticker|---------------------------\n\n canvasIzq.move(sticker1, 0, velocidad)\n canvasDer.move(sticker2, 0, velocidad)\n posicionStickerY += velocidad\n raiz.update()\n\n\n if posicionStickerY > 810:\n posicionRandomY = random.randint(-8000,-7000)\n if posicionStickerX < 75:\n posicionRandomX = random.randint(-1,100)\n elif posicionStickerX > 75 and posicionStickerX < 120:\n posicionRandomX = random.randint(-75,75)\n elif posicionStickerX > 120:\n posicionRandomX = random.randint(-100,1)\n\n \n canvasIzq.move(sticker1, posicionRandomX, posicionRandomY)\n canvasDer.move(sticker2, posicionRandomX, posicionRandomY)\n posicionStickerY += posicionRandomY\n\n posicionManchaX += posicionRandomX\n\n stickerImage = PhotoImage(file=\"sticker.png\") \n sticker1 = canvasDer.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n \n sticker2 = canvasIzq.create_image(posicionStickerX, posicionStickerY, image = stickerImage)\n\n \n raiz.update()\n \n #|Manchas|--------------------------\n\n\n canvasIzq.move(mancha1, 0, velocidad)\n canvasDer.move(mancha2, 0, velocidad)\n posicionManchaY += velocidad\n raiz.update()\n\n\n if posicionManchaY > 810:\n posicionRandomY = random.randint(-6000,-4000)\n if posicionManchaX < 75:\n posicionRandomX = random.randint(-1,100)\n elif posicionManchaX > 75 and posicionManchaX < 120:\n posicionRandomX = random.randint(-75,75)\n elif posicionManchaX > 120:\n posicionRandomX = random.randint(-100,1)\n\n \n canvasIzq.move(mancha1, posicionRandomX, posicionRandomY)\n canvasDer.move(mancha2, posicionRandomX, posicionRandomY)\n posicionManchaY += posicionRandomY\n\n posicionManchaX += posicionRandomX\n\n \n raiz.update()\n\n\n \n #|minivan|-----------------------------\n\n canvasIzq.move(minivan1, 0, velocidad)\n canvasDer.move(minivan2, 0, velocidad)\n posicionMinivanY += velocidad\n raiz.update()\n\n\n if posicionMinivanY > 810:\n posicionRandomY = random.randint(-1000,-820)\n if posicionMinivanX < 75:\n posicionRandomX = random.randint(-1,100)\n elif posicionMinivanX > 75 and posicionMinivanX < 120:\n posicionRandomX = random.randint(-75,75)\n elif posicionMinivanX > 120:\n posicionRandomX = random.randint(-100,1)\n\n \n canvasIzq.move(minivan1, posicionRandomX, posicionRandomY)\n canvasDer.move(minivan2, posicionRandomX, posicionRandomY)\n posicionMinivanY += posicionRandomY\n\n posicionMinivanX += posicionRandomX\n\n \n raiz.update()\n\n\n canvasIzq.move(minivan1_2, 0, velocidad)\n canvasDer.move(minivan2_2, 0, velocidad)\n posicionMinivanY_2 += velocidad\n raiz.update()\n\n\n if posicionMinivanY_2 > 810:\n posicionRandomY = random.randint(-10000,-8000)\n if posicionMinivanX_2 < 75:\n posicionRandomX = random.randint(-1,100)\n elif posicionMinivanX_2 > 75 and posicionMinivanX_2 < 120:\n posicionRandomX = random.randint(-75,75)\n elif posicionMinivanX_2 > 120:\n posicionRandomX = random.randint(-100,1)\n\n \n canvasIzq.move(minivan1_2, posicionRandomX, posicionRandomY)\n canvasDer.move(minivan2_2, posicionRandomX, posicionRandomY)\n posicionMinivanY_2 += posicionRandomY\n\n posicionMinivanX_2 += posicionRandomX\n\n \n raiz.update()\n\n #|runner|-------------------------------\n\n if movimientoRunner == 1:\n canvasIzq.move(runner1, velocidadRunner, velocidad)\n canvasDer.move(runner2, velocidadRunner, velocidad)\n posicionRunnerY += velocidad\n posicionRunnerX += velocidadRunner\n if posicionRunnerX > 160:\n movimientoRunner = 0\n \n elif movimientoRunner == 0: \n canvasIzq.move(runner1, -velocidadRunner, velocidad)\n canvasDer.move(runner2, -velocidadRunner, velocidad)\n posicionRunnerY += velocidad\n posicionRunnerX -= velocidadRunner\n if posicionRunnerX < 40:\n movimientoRunner = 1\n\n raiz.update()\n\n\n if posicionRunnerY > 810:\n posicionRandomY = random.randint(-3000,-2000)\n if posicionRunnerX < 75:\n posicionRandomX = random.randint(-1,50)\n elif posicionRunnerX > 75 and posicionRunnerX < 120:\n posicionRandomX = random.randint(-50,50)\n elif posicionRunnerX > 120:\n posicionRandomX = random.randint(-50,1)\n\n \n canvasIzq.move(runner1, posicionRandomX, posicionRandomY)\n canvasDer.move(runner2, posicionRandomX, posicionRandomY)\n posicionRunnerY += posicionRandomY\n\n posicionRunnerX += posicionRandomX\n\n \n raiz.update()\n\n #|Fighter|--------------------------------------\n \n \n if posicionFighterX < posicionXP1:\n canvasIzq.move(fighter1, velocidadFighter, velocidad)\n posicionFighterX += velocidadFighter\n\n elif posicionFighterX > posicionXP1:\n canvasIzq.move(fighter1, -velocidadFighter, velocidad)\n posicionFighterX += -velocidadFighter\n\n elif posicionFighterX == posicionXP1:\n canvasIzq.move(fighter1, 0, velocidad)\n posicionFighterX += 0\n\n if posicionFighterX2 < posicionXP2:\n canvasDer.move(fighter2, velocidadFighter, velocidad)\n posicionFighterX2 += velocidadFighter\n \n elif posicionFighterX2 > posicionXP2:\n canvasDer.move(fighter2, -velocidadFighter, velocidad)\n posicionFighterX2 += -velocidadFighter\n \n\n elif posicionFighterX2 == posicionXP2: \n canvasDer.move(fighter2, 0, velocidad)\n posicionFighterX2 += 0\n \n posicionFighterY += velocidad\n\n\n\n if posicionFighterY > 810:\n posicionRandomY = random.randint(-5000,-4000)\n\n posicionFighterY += posicionRandomY\n \n if posicionFighterX < 75:\n posicionRandomX = random.randint(-1,50)\n elif posicionFighterX > 75 and posicionFighterX < 130:\n posicionRandomX = random.randint(-25,25)\n elif posicionFighterX > 130:\n posicionRandomX = random.randint(-50,1)\n raiz.update()\n \n canvasIzq.move(fighter1, posicionRandomX, posicionRandomY)\n posicionFighterX += posicionRandomX\n\n if posicionFighterX2 < 75:\n posicionRandomX = random.randint(-1,50)\n elif posicionFighterX2 > 75 and posicionFighterX2 < 130:\n posicionRandomX = random.randint(-25,25)\n elif posicionFighterX2 > 130:\n posicionRandomX = random.randint(-50,1)\n raiz.update()\n\n canvasDer.move(fighter2, posicionRandomX, posicionRandomY)\n posicionFighterX2 += posicionRandomX\n \n\n\n # |Fuel|---------------\n\n if (int(gasolina1.get()) <= 0) and (int(gasolina2.get()) <= 0):\n\n GameOverP1 = \"Fuel\"\n GameOver()\n\n elif int(gasolina1.get()) <= 0:\n GameOverP1 = \"Fuel1\"\n GameOver()\n elif int(gasolina2.get()) <= 0:\n GameOverP2 = \"Fuel2\"\n GameOver()\n \n\n #|Final|---------------------------------------\n\n if (coords[1]) >= 9230.0 :\n GameOverP2 = \"fin\"\n GameOver()\n \n elif movimiento == False:\n canvasDer.move(roadDer, 0, 0)\n \n canvasIzq.move(roadIzq, 0, 0)\n raiz.update()\n\n archivo[\"posicionManchaY\"] = posicionManchaY\n archivo[\"posicionManchaX\"] = posicionManchaX\n\n archivo[\"posicionStickerY\"] = posicionStickerY\n archivo[\"posicionStickerX\"] = posicionStickerX\n\n archivo[\"posicionMinivanY\"] = posicionMinivanY\n archivo[\"posicionMinivanX\"] = posicionMinivanX \n\n archivo[\"posicionMinivanY_2\"] = posicionMinivanY_2\n archivo[\"posicionMinivanX_2\"] = posicionMinivanX_2 \n\n archivo[\"posicionRunnerY\"] = posicionRunnerY\n archivo[\"posicionRunnerX\"] = posicionRunnerX\n\n archivo[\"posicionFighterY\"] = posicionFighterY\n archivo[\"posicionFighterX\"] = posicionFighterX\n archivo[\"posicionFighterX2\"] = posicionFighterX2\n\n archivo[\"posicionRoadDer\"] = coords[1]\n\n archivo[\"posicionRoadIzq\"] = coords2[1]\n\n archivo[\"contadorGasolina\"] = contadorGasolina\n\n archivo[\"contadorGasolina2\"] = contadorGasolina2\n\n archivo[\"cont\"] = cont\n\n archivo[\"posicionXP1\"] = posicionXP1\n\n archivo[\"posicionYP1\"] = posicionYP1\n\n archivo[\"posicionXP2\"] = posicionXP2\n\n archivo[\"posicionYP2\"] = posicionYP2\n\n archivo[\"minimapY\"] = minimapY\n\n\n #|Fondo|-------------------------------------------\n \n mainMenu = PhotoImage(file=\"playMultiPlayer.png\")\n fondoMenu= Label(raiz, image = mainMenu).place(x=0, y=0)\n\n fontSubMenu = font.Font(family='Harlow Solid Semiexpandida Negrita Cursiva', size= 10) \n fontSubMenu2 = font.Font(family='Harlow Solid Semiexpandida Negrita Cursiva', size= 12) \n\n \n\n #|Carretera derecha|-------------------------------------------------------\n \n roadImageDer = PhotoImage(file=\"derecha.png\")\n\n posicionRoadDer = -8850\n\n if varContinue == True:\n\n posicionRoadDer = eval(dicc)[\"posicionRoadDer\"]\n\n canvasDer = Canvas(raiz, width = 192, height = 600, borderwidth = 0)\n canvasDer.place(x= 555, y=0)\n\n roadDer = canvasDer.create_image(98, posicionRoadDer, image = roadImageDer)\n\n #|carretera izquierda|-------------------------------------------------------\n \n roadImageIzq = PhotoImage(file=\"izquierda.png\")\n\n canvasIzq = Canvas(raiz, width = 192, height = 600)\n canvasIzq.place(x= 47, y=0)\n\n posicionRoadIzq = -8850\n\n if varContinue == True:\n\n posicionRoadIzq = eval(dicc)[\"posicionRoadIzq\"]\n\n roadIzq = canvasIzq.create_image(98, posicionRoadIzq, image = roadImageIzq)\n\n #|jugadores|----------------------------------------------------------------------\n\n if varContinue == True:\n posicionXP1 = eval(dicc)[\"posicionXP1\"] \n\n posicionYP1 = eval(dicc)[\"posicionYP1\"] \n\n posicionXP2 = eval(dicc)[\"posicionXP2\"] \n\n posicionYP2 = eval(dicc)[\"posicionYP2\"] \n\n imagePlayer1 = PhotoImage(file=\"player1.png\")\n cavasImagePlayer1 = canvasIzq.create_image(posicionXP1, posicionYP1, image = imagePlayer1)\n\n imagePlayer2 = PhotoImage(file=\"player2.png\")\n cavasImagePlayer2 = canvasDer.create_image(posicionXP2, posicionYP2, image = imagePlayer2)\n\n if iniciar == True:\n canvasIzq.move(cavasImagePlayer1, 0, 100)\n canvasDer.move(cavasImagePlayer2, 0, 100)\n\n\n #|Gasolina|--------------------------------\n\n gasolina1 = StringVar()\n\n gasolina2 = StringVar()\n \n gasolina1.set(200)\n\n\n gasolina2.set(200)\n\n if varContinue == True:\n gasolina1.set(eval(dicc)[\"contadorGasolina\"])\n gasolina2.set(eval(dicc)[\"contadorGasolina\"])\n\n\n labelGasolina1 = Label(raiz, textvariable = gasolina1, fg = \"white\", bg = \"black\", font = fontSubMenu2).place(x=340, y=185)\n\n labelGasolina2 = Label(raiz, textvariable = gasolina2, fg = \"white\", bg = \"black\", font = fontSubMenu2).place(x=425, y=185)\n\n #|Minimap|-------------------------------------------------------------------\n \n minimapImage = PhotoImage(file=\"Minimap.png\")\n\n playerMinimapImage = PhotoImage(file=\"playersMinimap.png\")\n\n\n canvasMinimap = Canvas(raiz, width = 160, height = 390, borderwidth = -3, background= \"black\")\n \n\n canvasMinimap.place(x= 315, y=230)\n\n minimap = canvasMinimap.create_image(85, 180, image = minimapImage)\n\n minimapY = 180\n\n archivo[\"minimapY\"] = minimapY\n\n if varContinue == True:\n minimapY = eval(dicc)[\"minimapY\"]\n\n playerMinimap = canvasMinimap.create_image(84, minimapY, image = playerMinimapImage)\n\n #|Init|----------------------------------------------\n\n initImage = PhotoImage(file=\"init.png\")\n\n\n initMinimap = canvasMinimap.create_image(85, 215, image = initImage)\n\n #|Kilometraje|----------------------------------\n\n km = StringVar()\n variableKm = str((difficulty.get())*25) + \" km/h\"\n km.set(variableKm)\n archivo[\"km\"] = (km.get())\n if varContinue == True:\n km.set(eval(dicc)[\"km\"])\n kilometraje = Label(canvasMinimap, textvariable = km , fg = \"white\", bg = \"black\", font = fontSubMenu2).place(x=55, y=320)\n\n #|Botones|--------------------------------------------------------------------------\n fontSave = font.Font(family='Haettenschweiler', size= 13)\n\n butSave = Button(raiz, text=\"Save\",bg = \"black\", fg = \"white\", \n font= fontSave, command= _save_).place(x= 377, y = 241, height= 32, width= 48)\n\n\n butContinue = Button(raiz, text=\"Continue\",bg = \"black\", fg = \"white\", \n font= fontSave, command= _continue_).place(x= 377, y = 271, height= 32, width= 48)\n\n\n butRestart = Button(raiz, text=\"Restart\",bg = \"black\", fg = \"white\", \n font= fontSave, command= play_multijugador1).place(x= 377, y = 301, height= 32, width= 48)\n\n\n butBack = Button(raiz, text=\"Back\",bg = \"black\", fg = \"white\", \n font= fontSave, command= multi_player1).place(x= 377, y = 331, height= 32, width= 48)\n\n\n\n #----------------------------------------------------------------------------------\n\n if varContinue == True:\n\n player1.set(eval(dicc)[\"player1\"])\n player2.set(eval(dicc)[\"player2\"])\n \n\n #|Labels players|------------------------------------------------------------------------\n\n\n labelPlayer1 = Label(raiz, text = player1.get(), fg = \"red\", bg = \"black\", font = fontSubMenu).place(x=315, y=104)\n\n labelPlayer2 = Label(raiz, text = player2.get(), fg = \"cyan\", bg = \"black\", font = fontSubMenu).place(x=400, y=104)\n\n\n start(\"True\")\n \n\n raiz.mainloop()\n\n#|Funciones De Pausa| -------------------------------------------------------------------------------\n\ndef play_multijugador1():\n global varContinue\n varContinue = False\n play_multijugador()\ndef multi_player1():\n global varContinue\n varContinue = False\n multi_player()\n\n#---------------------------------------------------------------------------------------------------\n\nmain_menu()\n\n#Hola Maestra Ya hice mi primer Juego! xD","sub_path":"Road Fighter Project - Julián Loaiza/Road Fighter/Road Fighter.py","file_name":"Road Fighter.py","file_ext":"py","file_size_in_byte":67533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"432036536","text":"import pyglet.app\n\nfrom pycraft.window import Window\nfrom pycraft.world import World\nfrom pycraft.objects.player import Player\n\nWINDOW_DIMENSIONS = (800, 600)\nWINDOW_CAPTION = 'PyCraft'\nWINDOW_RESIZEABLE = True\nWINDOW_EXCLUSIVE_MOUSE = True\n\n\ndef main():\n window = Window(\n width=WINDOW_DIMENSIONS[0],\n height=WINDOW_DIMENSIONS[1],\n caption=WINDOW_CAPTION,\n resizable=WINDOW_RESIZEABLE\n )\n # Hide the mouse cursor and prevent the mouse from leaving the window.\n window.set_exclusive_mouse(WINDOW_EXCLUSIVE_MOUSE)\n world = World()\n window.set_world(world)\n player = Player()\n window.set_player(player)\n pyglet.app.run()\n","sub_path":"pycraft/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"568684137","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: callum\n#\n# Created: 24,02,2016\n# Copyright: (c) callum 2016\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\n# Order for Arrays: Type, Attack,defence, Duribility, mana cost, name, effect or not\n\n\n#attack left\n#durability right\n#mana midel\n\n\n#Smith:\n\nBroken_sword = [0,1,4,1,\"Broken sword\",0]\nGlass_blade = [0,5,1,3,\"Glass Blade\",0]\nSmithing_hammer = [0,1,5,4, \"Smithing Hammer\",1]\nLarge_hammer = [0,6,4,6, \"Large Hammer\",0]\n\nGloves = [1,2,2,2,\"Gloves\",0]\nSmithing_hands = [1,2,3,3,\"Smith's Hands\",1]\nForge_gloves = [1,1,4,6,\"Forge Gloves\",1]\n\nLeg_brace = [2,2,2,2,\"Leg brace\",0]\nSmiths_leg_guard = [2,3,4,3,\"Smith's Leg guards\",0]\nPeg_leg = [2,1,4,4,\"Peg Leg\",1]\n\nFace_mask =[3,2,2,2,\"Face Mask\",0]\nGoggles = [3,0,5,3,\"Goggles\",1]\nForge_guard = [3,4,5,4,\"Forge Guard\",0]\n\nSmiths_apron = [4,2,2,2,\"Smith's Apron\",0]\nAnvil = [4,3,3,4,\"Anvil\",1]\nForged_armour = [4,5,5,5,\"Forged Armour\",0]\n\n\n#Paladin\n\nHammer_of_dawn = [0,2,3,1,\"Hammer of Dawn\",0]\nLance = [0,2,5,3,\"Lance\",0]\nHoly_blade = [0,2,6,4,\"Holy Blade\",1]\nBlessed_lance = [0,7,4,6,\"Blessed Lance\",1]\n\nSteel_gauntlets = [1,1,3,2,\"steel Gauntlets\",0]\nValkyrie_gloves = [1,3,3,3,\"Valkyrie Gloves\",1]\nGigaSteel_gauntlets = [1,4,6,5,\"GigaSteel Gauntlets\",0]\n\nSteel_greaves = [2,3,2,2, \"Steel Greaves\",0]\nValkyrie_sandles = [2,3,3,3,\"Valkyrie Sandles\",0]\nGigaSteel_greaves = [2,4,6,5,\"GigaSteel Greaves\",0]\n\nSteel_helm= [3,3,2,2,\"Steel Helm\",0]\nValkyrie_helm = [3,3,3,3,\"Valkyrie helm\",1]\nGigaSteel_helm = [3,4,6,5, \"GigaSteel Helm\",1]\n\nSteel_mail = [4,3,2,2,\"Steel Mail\",0]\nValkyrie_armour = [4,4,3,4,\"Valkyrie Armour\",0]\nGigaSteel_platemail = [4,4,6,5, \"Gigasteel Plate-mail\",0]\n\n#Man at arms\n\nMace = [0,5,1,2, \"Mace\",0]\nHeavy_hammer = [0,6,2,4, \"Heavy hammer\",0]\nClaymore = [0,8,2,6,\"Claymore\",0]\nReckless_sword = [0,10,1,8, \"Reckless_sword\",0]\n\nBandages =[1,3,1,2,\"Bandages\",0]\nHand_guard = [1,2,4,4,\"Hand Guard\"]\nSpiked_shield = [1,4,4,6,\"Spiked Shield\",1]\n\nBoots = [2,3,1,2,\"Boots\",0]\nKilt = [2,2,4,4,\"Kilt\",0]\nSwift_boots = [2,4,3,6,\"Swift Boots\",1]\n\nEye_patch = [3,3,1,2,\"Eye patch\",0]\nSteel_mask = [3,2,4,4, \"Steel Mask\",0]\nConqurers_helm = [3,4,5,6,\"Conqurers Helm\",0]\n\nBlood_stained_wraps = [4,3,1,2,\"Blood Stained wraps\",0]\nWoad = [4,2,4,4,\"Woad\",0]\nConqurers_armour = [4,4,5,6,\"Conqurers armour\",0]\n\n#General\n\nStandard_sword=[0,2,3,2,\"Standard sword\",0]\nBuckler = [0,0,4,2,\"Buckler\",0]\n\n\n\nLeather_gloves\t=[1,1,2,1,\"Leather gloves\",0]\nIron_gauntlents\t=[1,3,5,4,\"Iron gauntlents\",0]\n\n\nLeather_greeves = [2,3,3,3,\"Leather greeves\",0]\nIron_greeves = [2,3,5,4, \"Iron greeves\",0]\n\n\nHeros_cap = [3,1,3,1,\"Hero's cap\",0]\nHeros_helm = [3,3,3,3,\"Hero's helm\",0]\n\n\nIron_armour = [4,3,5,4,\"Iron armour\",0]\nSteel_armour = [4,3,6,5,\"Steel armour\",0]\n\n\n","sub_path":"equipment list.py","file_name":"equipment list.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"286250837","text":"\"\"\"empty message\n\nRevision ID: 84ffaad552c0\nRevises: f77d1aa64bbc\nCreate Date: 2020-10-25 15:06:53.765097\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '84ffaad552c0'\ndown_revision = 'f77d1aa64bbc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('reminders', sa.Column('full_date', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('reminders', 'full_date')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/84ffaad552c0_.py","file_name":"84ffaad552c0_.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"479886416","text":"from keras.models import Model\nfrom keras.layers import Input, Dense, Conv1D, MaxPooling1D, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import backend as K\n\nimport random\nimport numpy as np\nfrom collections import defaultdict\n\nimport read\nimport tensorflow as tf\n\nnp.random.seed(1)\ntf.set_random_seed(2)\n\nnum_test_classes = 2\nmini_batch_size = 200\nbatch_size = 60\nsteps_per_epoch = mini_batch_size\nfeature_length = read.dct_length * 3 * len(read.imus)\nepochs = 10\nk = 3\nk_shot = 5\n\n\ndef get_neighbours(instance, dataset, n):\n return np.argsort(np.linalg.norm(dataset - instance, axis=1))[:n]\n\n\ndef get_triples_minibatch_indices_me(dictionary, _labels):\n triples_indices = []\n for k in dictionary.keys():\n for value in dictionary[k]:\n anchor = value\n positive = random.choice(dictionary[k])\n negative_labels = [l for l in _labels if l != k ]\n negative_label = random.choice(negative_labels)\n negative = random.choice(dictionary[negative_label])\n triples_indices.append([anchor, positive, negative])\n\n return np.asarray(triples_indices)\n\n\ndef get_triples_minibatch_data_u(x, dictionary, _labels):\n indices = get_triples_minibatch_indices_me(dictionary, _labels)\n return x[indices[:, 0]], x[indices[:, 1]], x[indices[:, 2]]\n\n\ndef triplet_generator_minibatch(x, y, no_minibatch, _labels):\n grouped = defaultdict(list)\n dict_list = []\n\n for i, label in enumerate(y):\n grouped[label].append(i)\n\n for k in list(grouped.keys()):\n random.shuffle(grouped[k])\n\n for j in range(no_minibatch):\n dictionary = {}\n\n for k in list(grouped.keys()):\n ran_sam = random.sample(grouped[k], 3)\n dictionary[k] = ran_sam\n\n dict_list.append(dictionary)\n\n i = 0\n\n while 1:\n x_anchor, x_positive, x_negative = get_triples_minibatch_data_u(x, dict_list[i], _labels)\n\n if i == (no_minibatch - 1):\n i = 0\n else:\n i += 1\n\n yield ({'anchor_input': x_anchor,\n 'positive_input': x_positive,\n 'negative_input': x_negative},\n None)\n\n\ndef triplet_loss(inputs, dist='sqeuclidean', margin='maxplus'):\n anchor, positive, negative = inputs\n positive_distance = K.square(anchor - positive)\n negative_distance = K.square(anchor - negative)\n if dist == 'euclidean':\n positive_distance = K.sqrt(K.sum(positive_distance, axis=-1, keepdims=True))\n negative_distance = K.sqrt(K.sum(negative_distance, axis=-1, keepdims=True))\n elif dist == 'sqeuclidean':\n positive_distance = K.mean(positive_distance, axis=-1, keepdims=True)\n negative_distance = K.mean(negative_distance, axis=-1, keepdims=True)\n loss = positive_distance - negative_distance\n if margin == 'maxplus':\n loss = K.maximum(0.0, 1 + loss)\n elif margin == 'softplus':\n loss = K.log(1 + K.exp(loss))\n return K.mean(loss)\n\n\ndef build_conv_model(input_shape):\n base_input = Input(input_shape)\n x = Conv1D(12, kernel_size=3, activation='relu')(base_input)\n x = MaxPooling1D(pool_size=2)(x)\n x = BatchNormalization()(x)\n x = Flatten()(x)\n x = Dense(1200, activation='relu')(x)\n embedding_model = Model(base_input, x, name='embedding')\n\n anchor_input = Input(input_shape, name='anchor_input')\n positive_input = Input(input_shape, name='positive_input')\n negative_input = Input(input_shape, name='negative_input')\n\n anchor_embedding = embedding_model(anchor_input)\n positive_embedding = embedding_model(positive_input)\n negative_embedding = embedding_model(negative_input)\n\n inputs = [anchor_input, positive_input, negative_input]\n outputs = [anchor_embedding, positive_embedding, negative_embedding]\n\n triplet_model = Model(inputs, outputs)\n triplet_model.add_loss(K.mean(triplet_loss(outputs)))\n triplet_model.compile(loss=None, optimizer='adam') # loss should be None\n\n return embedding_model, triplet_model\n\n\nfeature_data = read.read()\n\ntest_ids = list(feature_data.keys())\nall_labels = list(feature_data[test_ids[0]].keys())\n\nfor test_id in test_ids:\n # for a_label in all_labels:\n for _int in range(5):\n test_labels_indices = np.random.choice(len(all_labels), num_test_classes, False)\n test_labels = [a for ii, a in enumerate(all_labels) if ii in test_labels_indices]\n print(test_labels)\n train_labels = [a for ii, a in enumerate(all_labels) if ii not in test_labels_indices]\n print(train_labels)\n _train_data, _test_data = read.split(feature_data, test_id)\n _train_data = read.remove_class(_train_data, test_labels)\n\n _support_data, _test_data = read.support_set_split(_test_data, k_shot)\n\n _train_data, _train_labels = read.flatten(_train_data)\n _support_data, _support_labels = read.flatten(_support_data)\n\n _train_data = np.array(_train_data)\n _train_data = np.expand_dims(_train_data, 3)\n\n _support_data = np.array(_support_data)\n _support_data = np.expand_dims(_support_data, 3)\n\n _embedding_model, _triplet_model = build_conv_model((feature_length,1))\n\n _triplet_model.fit_generator(triplet_generator_minibatch(_train_data, _train_labels, mini_batch_size\n , train_labels),\n steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=1)\n\n _support_preds = _embedding_model.predict(_support_data)\n\n for _l in list(_test_data[test_id].keys()):\n _test_label_data = _test_data[test_id][_l]\n _test_labels = [_l for i in range(len(_test_label_data))]\n _test_label_data = np.array(_test_label_data)\n _test_label_data = np.expand_dims(_test_label_data, 3)\n _test_preds = _embedding_model.predict(_test_label_data)\n\n acc = read.cos_knn(k, _test_preds, _test_labels, _support_preds, _support_labels)\n result = 'tn_conv, 3nn,' + str(num_test_classes) + ',' + str(test_id) + ',' + ','.join([str(t) for t in test_labels]) + ',' + str(_l) + ',' + str(acc)\n read.write_data('tn_conv_oe_n.csv', result)\n\n\n","sub_path":"selfback/tn/tn_conv_oe_n.py","file_name":"tn_conv_oe_n.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"204530324","text":"import requests\nimport pandas as pd \nimport io\nfrom linearRegression import LinearRegression\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# The dataset that is going to train the ai\ndf = pd.read_csv(\"https://raw.githubusercontent.com/Baakchsu/LinearRegression/master/weight-height.csv\")\n#print(df.head())\nreg = LinearRegression()\n\ndef preprocess():\n x = (df['Weight']-df['Weight'].mean())/df['Weight'].std() #standardization of the dataset\n y = (df['Height']-df['Height'].mean())/df['Height'].std() #standardization of the dataset\n return(x, y)\n\ndef train():\n (x, y) = preprocess()\n (weights,intercept,parameter_cache) = reg.train(x[:-180],y[:-180], 500)\n print(parameter_cache)\n\ndef prediction():\n (x, y) = preprocess()\n pred = reg.predict(np.array(x[-180:]))\n plt.scatter(x[-180:],y[-180:])\n plt.plot(x[-180:],pred)\n plt.show()\n\ntrain()\nprediction()\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"303582753","text":"from django.urls import path\nfrom . import views\n\n# 定义项目和开票url\napp_name = 'projects'\n\nurlpatterns = [\n\n path('index/', views.index, name='index'),\n # 项目管理\n path('explist/', views.explist, name='pro_list'),\n path('add_exp/', views.add_exp, name='add_exp'),\n path('pro/<int:pro_id>/', views.pro_detail),\n path('del_pro/<int:pro_id>/', views.pro_del),\n\n # 开票回款\n path('invoice_payment/', views.invoice_payment, name='invoice_list'),\n path('invoice/<int:inv_id>/', views.invoice_detail),\n path('del_inv/<int:inv_id>/', views.inv_del),\n\n # 开票申请记录\n path('applylist/', views.applylist, name='apply_list'), # 定义开票申请列表链接\n path('apply/<int:apply_id>/', views.apply_detail, name='apply_detail'),\n path('del_apply/<int:apply_id>/', views.apply_del),\n # 审批开票申请\n path('approve_apply/<int:apply_id>/', views.approve_apply, name='approve_apply'),\n\n # 开票申请\n path('applyinvoice/', views.applyinvoice, name='apply_inv'), # 定义开票申请链接\n\n # 导出数据\n # path('export_pro/', views.export_pro_csv),\n # path('export_inv/', views.export_inv_csv),\n\n # 暂定为单位自动导入链接\n path('test/', views.test,),\n # 定义手动添加单位链接\n path('unit_list/', views.unit_list, name='unit_list'),\n path('add_unit/', views.add_unit, name='add_unit'),\n path('unit/<int:unit_id>/', views.unit_detail, name='unit_detail'),\n path('del_unit/<int:unit_id>/', views.unit_del),\n # 定义ajax测试链接\n path('ajax/', views.get),\n path('ajax_main/', views.ajax_main),\n\n]\n","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"441002467","text":"import sys\n\ndef write_col(source_lines, colunm_number,filename):\n\tcol=[]\n\tfor line in source_lines:\n\t\tcol.append(line.split()[colunm_number]+\"\\n\")\n\twith open(filename,\"w\")as writer:\n\t\twriter.writelines(col)\n\nf=open(\"chapter2.txt\")\nlines=f.readlines()\n\nwrite_col(lines,0,\"col1.txt\")\nwrite_col(lines,1,\"col2.txt\")\n","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"624364705","text":"import threading\nimport numpy as np\nimport math\n#FOR TESTING---------\nimport time\n#END TESTING---------\n\nEPS = 1e-8\n\n#This class handles doing a single search among every (MCTS_object, States_list) pair in the list MCTS_States_list\nclass Threading_MCTS():\n \n def __init__(self, args, nnet):\n self.nnet = nnet #needed for prediction\n self.args = args\n \n #exit_flag is an event object which will control when the threaded_search function STOPS.\n #When each threaded_search stops, we access the MCTS_object for the appropriate neural network inputs\n \n def getActionProbs (self, MCTS_States_list):\n # calls parallel_search numMCTS times and outputs prob. dist. for each (MCTS object, [States list]) pair in MCTSandStates \n # method is called by method Coach.advanceEpisodes\n \n #parallel search numMCTSsims times\n for i in range(self.args['numMCTSSims']):\n #TESTING-------------------------------\n #print('')\n #print('CURRENT BATCH SIMULATION:', i)\n #print('')\n #END TESTING---------------------------\n self.parallel_search(MCTS_States_list)\n \n #Once numMCTSsims parallel_searches have been done, all edge weights in each MCTS_obj have been updated, so we retrieve the probabilities\n #given by N(s,a). Put these new probabilities into a list.\n \n actionProbs = []\n \n for MCTS_object, State_list in MCTS_States_list:\n \n s = MCTS_object.game.keyRepresentation(State_list[-1])\n \n temp_counts = [MCTS_object.Nsa[(s,a)] if (s,a) in MCTS_object.Nsa else 0 for a in range(MCTS_object.game.getActionSize(self.args))]\n total_sum = float(sum(temp_counts))\n probs = [x/total_sum for x in temp_counts]\n actionProbs.append(probs)\n \n return actionProbs \n \n def parallel_search (self, MCTS_States_list):\n #1)For each MCTS_object in MCTS_States_list, traverse down from the current root to a leaf\n #2)For each MCTS_object, States_list in MCTS_States_list, update the leaf\n #3)For each MCTS_object, update the edges which were traversed in step 1. \n \n #FOR TESTING-----------------\n #print('------------------------------------------------------------')\n #print('BEGINNING SINGLE PARALLEL_SEARCH.....')\n #print('')\n #END TESTING-----------------\n \n #Conduct a search on every (MCTS_object, States_list) pair\n \n #FOR TESTING------------------\n #print('')\n #print('BEGINNING TRAVERSING DOWN TO LEAF FOR EACH MCTS_object, States_list pair...')\n #END TESTING------------------\n \n for MCTS_object, States_list in MCTS_States_list:\n \n current_root = States_list[-1]\n \n #reinitialize search_path to be empty(since this is a new search from root to leaf)\n MCTS_object.search_path = []\n \n #start recursive search to leaf. After traversetoLeaf is called, the search path traveled\n #for MCTS_object is saved in MCTS_object.search_path.\n self.search_traversetoLeaf(MCTS_object, current_root)\n \n #After all MCTS_object.search_path have been computed, make a batch prediction by \n #compiling each MCTS_object.search_path[-1] into a single query.\n \n pas_matrix, v_matrix = self.nnet.batch_predict(MCTS_States_list)\n \n #FOR TESTING-------------------------\n #print('')\n #print('The batch prediction returned the following:')\n #print(\"pas_matrix: \", pas_matrix)\n #print(\"v_matrix: \", v_matrix)\n #print('')\n #END TESTING--------------------------\n \n #Save the batch predictions into each MCTS_object and continue with MCTS search by updating the leaf node\n #of each MCTS_object. pas_matrix and v_matrix saves the predictions from searches which end\n #on a leaf and NOT a terminal node. Hence, as we loop through MCTS_States_list, we should skip\n #pairs in which the search ended on a terminal state. Hence, the if check in the loop below.\n \n i = 0\n\n for MCTS_object, States_list in MCTS_States_list:\n last_state = MCTS_object.search_path[-1]\n\n if MCTS_object.Es[last_state.keyRep] == 0:\n MCTS_object.batchquery_prediction = [pas_matrix[i,:], v_matrix[i]]\n \n #We only need to update the last state in search path if the search \n #ended on a leaf(and not a terminal node) \n \n self.search_updateLeaf(MCTS_object)\n i += 1\n \n #Note that for each MCTS_object, we need to update edge weights of \n #search path no matter if we ended on a terminal node or not during search\n self.search_updateTraversedEdges(MCTS_object)\n \n \n def search_traversetoLeaf(self, MCTS_object, State):\n #traverse from root to leaf and store the search path into\n #MCTS_object.search_path. MCTS_object.search_path is in the form of \n #[(state, a), (state2, a2), ..., leaf or terminal state]\n\n s = MCTS_object.game.keyRepresentation(State)\n \n #BASE CASES FOR TRAVERSE TO LEAF RECURSIVE SEARCH\n \n #1)Compute the terminal reward for state if not computed before\n if s not in MCTS_object.Es: # Note that MCTS_object.Es[s] not defined is NOT THE SAME as MCTS_object.Es[s] = 0\n MCTS_object.Es[s] = MCTS_object.game.getGameEnded(State, self.args, MCTS_object.game_args)\n \n #2)Check if the current state we are on is terminal or not. If terminal, \n #attach the node to our search path and return. \n if MCTS_object.Es[s] != 0:\n MCTS_object.search_path.append(State)\n \n return\n \n #3)Check if we are at leaf by checking if MCTS_object.Ps[s] is well defined\n if s not in MCTS_object.Ps:\n #Compute the features of the leaf \n State.compute_x_S_and_res(self.args, MCTS_object.game_args)\n #Save the features of this leaf to the MCTS_object for batch prediction later. Note that\n #from compute_x_S_and_res, we first save features in state object, and then we assign it in MCTS_object.features_s[s]\n MCTS_object.features_s[s] = State.feature_dic\n #\n MCTS_object.search_path.append(State)\n return \n \n #RECURSIVE CASE. If MCTS_object.Es[s] == 0 and MCTS_object.Ps[s] is well defined, then we are not yet at a leaf, so\n #we continue the search. \n valids = MCTS_object.Vs[s] #retrieve numpy vector of valid moves\n \n cur_best = -float('inf') #temp variable which holds the current highest UCB value\n best_act = -1 #temp variable which holds the current best action with largest UCB. Initialized to -1.\n \n for a in range(MCTS_object.game.getActionSize(self.args)): #iterate over all possible actions. \n if valids[a]:\n if (s,a) in MCTS_object.Qsa:\n u = MCTS_object.Qsa[(s,a)] + self.args['cpuct']*MCTS_object.Ps[s][a]*math.sqrt(MCTS_object.Ns[s])/(1+MCTS_object.Nsa[(s,a)]) #note here that MCTS_object.Ns[s] is number of times s \n #was visited. Note that MCTS_object.Ns[s] = sum over b of MCTS_object.Nsa[(s,b)], so the equation above is equal to surag nair's notes.\n else:\n #This line occurs if (s,a) is not in MCTS_object.Qsa, which means that if we take action a, then the next node next_s must be a leaf. This (s,a) will be added to MCTS_object.Qsa below and be assigned value of v.\n u = self.args['cpuct']*MCTS_object.Ps[s][a]*math.sqrt(MCTS_object.Ns[s] + EPS) \n \n #find the largest uct value\n if u > cur_best: \n cur_best = u\n best_act = a\n\n #append the (state,action) tuple to the search path\n MCTS_object.search_path.append((State,best_act))\n #get the next state and continue recursive search by calling traversetoLeaf\n next_s = MCTS_object.game.getNextState(State, best_act, MCTS_object.game_args, 1)\n self.search_traversetoLeaf(MCTS_object, next_s) #traverse from root to a leaf or terminal node using recursive search. \n \n\n def search_updateLeaf(self, MCTS_object):\n #This should be run after search_traversetoLeaf and batch_predict has been run on ALL MCTS_objects.\n #Note that the leaf only needs to be updated assuming traversetoLeaf landed on a leaf and NOT a terminal node.\n #From MCTS_object.search_path, we need to update the following variables in MCTS_object:\n #1)Compute MCTS_object.Ps[leaf] using batch neural network prediction.\n #2)Store valid moves for leaf in MCTS_object.Vs[leaf]\n #3)Initialize visit count to 0 in MCTS_object.Ns[leaf]\n \n leaf_state = MCTS_object.search_path[-1]\n leaf_key = leaf_state.keyRep\n \n #retrieve the computed MCTS_object.Ps[leaf_key]\n MCTS_object.Ps[leaf_key] = MCTS_object.batchquery_prediction[0]\n \n valids = MCTS_object.game.getValidMoves(leaf_state) #returns a numpy vector of 0 and 1's which indicate valid moves from the set of all actions\n MCTS_object.Ps[leaf_key] = MCTS_object.Ps[leaf_key]*valids # masking(hiding) invalid moves(this element wise product between two equally sized vectors creates a vector of probabilities of valid moves) the neural network may predict. \n sum_Ps_leaf = np.sum(MCTS_object.Ps[leaf_key]) \n \n #final assignment for MCTS_object.Ps[leaf_key]\n if sum_Ps_leaf > 0:\n MCTS_object.Ps[leaf_key] /= sum_Ps_leaf # renormalize\n else:\n # if all valid moves were masked make all valid moves equally probable\n \n # NB! All valid moves may be masked if either your NNet architecture is insufficient or you have overfitting or something else.\n # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process. \n print(\"All valid moves were masked, do workaround.\")\n \n MCTS_object.Ps[leaf_key] = MCTS_object.Ps[leaf_key] + valids #These two lines makes all valid moves equally probable. \n MCTS_object.Ps[leaf_key] /= np.sum(MCTS_object.Ps[leaf_key])\n \n #augment MCTS_object.Ps[leaf_key] with prior knowledge of the true solution x. EQUATE BETA TO 1 FOR TESTING!!!\n #------------------------------------------------------------------------------------------------\n x_I = np.ceil(abs(MCTS_object.game_args.sparse_vector)) #Since x is always between -1 to 1, x_I is the indicator vector corresponding to x\n x_I = np.append(x_I, 0) #append the stopping action to x_I, so x_I is the indicator for the support of x and includes a 1 for the stopping action. \n valid_xI = x_I*valids #component-wise multiplication of indicator x_I and valids\n MCTS_object.Ps[leaf_key] = MCTS_object.args['beta']*MCTS_object.Ps[leaf_key] + (1 - self.args['beta']) * (1/np.sum(valid_xI)) * valid_xI\n #------------------------------------------------------------------------------------------------\n \n MCTS_object.Vs[leaf_key] = valids #Store the valids for leaf\n MCTS_object.Ns[leaf_key] = 0 #Initialize visit count of leaf to 0. We will update this in search_updateTraversedEdges instead.\n\n \n def search_updateTraversedEdges(self, MCTS_object):\n #For each traversed edge (s,a) in the search, we update the following:\n #MCTS_object.Qsa[(s,a)]\n #MCTS_object.Nsa[(s,a)]\n \n #Propagate the true reward up search path if search path ended on a terminal node. Otherwise, propagate up the output of the neural network\n #Note that v is a 1 by 1 np array, so hence the [0] at the end.\n if MCTS_object.Es[MCTS_object.search_path[-1].keyRep] == 0:\n v = MCTS_object.batchquery_prediction[1][0]\n else: #if last state visited in the MCTS simulation is a terminal node. \n v = MCTS_object.Es[MCTS_object.search_path[-1].keyRep]\n \n \n #Update weights of all edges in the search_path. Also increment node values. Note that the loop omits the last element because the last element is a state and not a pair.\n for (State, a) in MCTS_object.search_path[:-1]:\n #Note that State.keyRep should be well defined since every State in each (State, a) pair have had search_traversetoLeaf called on it,\n #which calls game.keyRepresentation\n s = State.keyRep\n if (s,a) in MCTS_object.Qsa:\n #FOR TESTING----------------------\n #print(\"\")\n #print('(s,a) IS IN MCTS_object.Qsa !!!!!!!!')\n #print(\"MCTS_object.identifier:\", MCTS_object.identifier)\n #print(\"current_root:\", MCTS_object.search_path[0][0].col_indices)\n #print(\"(s, a):\", State.col_indices, a)\n #print(\"(s, a): \", s, a)\n #print(\"State.inverse: \", State.inverse)\n #print(\"State.ATy: \", State.ATy)\n #Check that the State.inverse*State.ATy is indeed the solution matching np.linalg.lstsq\n #if State.col_indices != []:\n # print(\"regression solution from product of inverse and ATy: \", np.matmul(State.inverse, State.ATy))\n # x = np.linalg.lstsq(MCTS_object.game_args.sensing_matrix[:, State.col_indices], MCTS_object.game_args.obs_vector)\n # print(\"regression solution and residual from np.linalg.lstsq: \", x[0], x[1])\n #print(\"BEFORE updating Qsa, Nsa for (s,a) %%%%%%%%%\")\n #print(\"v:\", v)\n #print(\"MCTS_object.Qsa[(s,a)]:\", MCTS_object.Qsa[(s,a)])\n #print(\"MCTS_object.Nsa[(s,a)]:\", MCTS_object.Nsa[(s,a)])\n #END TESTING----------------------\n \n MCTS_object.Qsa[(s,a)] = (MCTS_object.Nsa[(s,a)]*MCTS_object.Qsa[(s,a)] + v)/(MCTS_object.Nsa[(s,a)]+1) #The v in this equation could be the true terminal reward OR the predicted reward from NN, depending on whether the search ended on a leaf which is also a terminal node. \n MCTS_object.Nsa[(s,a)] += 1\n \n #FOR TESTING----------------------\n #print(\"AFTER updating Qsa, Nsa for (s,a) %%%%%%%%%\")\n #print(\"v:\", v)\n #print(\"MCTS_object.Qsa[(s,a)]:\", MCTS_object.Qsa[(s,a)])\n #print(\"MCTS_object.Nsa[(s,a)]:\", MCTS_object.Nsa[(s,a)])\n #print(\"Other Statistics.....\")\n #print(\"MCTS_object.Ps[s]:\", MCTS_object.Ps[s])\n #print(\"\")\n #END TESTING----------------------\n \n\n else: #if (s,a) is not in dictionary MCTS_object.Qsa, that means (s,a) has never been visited before. These are edges connected to leaves!! IOW N(s,a) = 0. Hence, by the formula 3 lines above, MCTS_object.Qsa[(s,a)] = v.\n #FOR TESTING----------------------\n #print(\"\")\n #print('(s,a) NOT IN MCTS_object.Qsa !!!!!!!!')\n #print(\"MCTS_object.identifier:\", MCTS_object.identifier)\n #print(\"current_root:\", MCTS_object.search_path[0][0].col_indices)\n #print(\"(s, a): \", State.col_indices, a)\n #print(\"(s, a): \", s, a)\n #print(\"State.inverse: \", State.inverse)\n #print(\"State.ATy: \", State.ATy)\n #Check that the State.inverse*State.ATy is indeed the solution matching np.linalg.lstsq\n #if State.col_indices != []:\n # print(\"regression solution from product of inverse and ATy: \", np.matmul(State.inverse, State.ATy))\n # x = np.linalg.lstsq(MCTS_object.game_args.sensing_matrix[:, State.col_indices], MCTS_object.game_args.obs_vector)\n # print(\"regression solution and residual from np.linalg.lstsq: \", x[0], x[1])\n #END TESTING----------------------\n \n MCTS_object.Qsa[(s,a)] = v \n MCTS_object.Nsa[(s,a)] = 1\n \n #FOR TESTING----------------------\n #print(\"AFTER updating Qsa, Nsa for (s,a) %%%%%%%%%\")\n #print(\"v:\", v)\n #print(\"MCTS_object.Qsa[(s,a)]:\", MCTS_object.Qsa[(s,a)])\n #print(\"MCTS_object.Nsa[(s,a)]:\", MCTS_object.Nsa[(s,a)])\n #print(\"Other Statistics.....\")\n #print(\"MCTS_object.Ps[s]:\", MCTS_object.Ps[s])\n #END TESTING----------------------\n \n MCTS_object.Ns[s] += 1\n \n #FOR TESTING--------------------------\n #last_state = MCTS_object.search_path[-1]\n #print('')\n #print('last state col indices: ', last_state.col_indices)\n #print('last state action indices: ', last_state.action_indices)\n #last_state_key = last_state.keyRep\n #print('last state key rep: ', last_state_key)\n #print(\"State.inverse: \", last_state.inverse)\n #print(\"State.ATy: \", last_state.ATy)\n #Check that the last_state.inverse*last_state.ATy is indeed the solution matching np.linalg.lstsq\n #if last_state.col_indices != []:\n # print(\"regression solution from product of inverse and ATy: \", np.matmul(last_state.inverse, last_state.ATy))\n # x = np.linalg.lstsq(MCTS_object.game_args.sensing_matrix[:, last_state.col_indices], MCTS_object.game_args.obs_vector)\n # print(\"regression solution and residual from np.linalg.lstsq: \", x[0], x[1])\n #print('The termreward currently stored for the last state is: ', last_state.termreward)\n #last_state.computeTermReward(MCTS_object.args, MCTS_object.game_args)\n #if last_state_key in MCTS_object.Ps:\n # print('last state updated Ps[s]: ', MCTS_object.Ps[last_state_key])\n #print('')\n #print('Generated vector y is: ', MCTS_object.game_args.obs_vector)\n #print('')\n #print('------------------------------------------------------------')\n #END TESTING--------------------------\n \n \n \n \n \n \n \n \n \n","sub_path":"prev_versions/batch+matrix_inversion/alphazero_compressedsensing_nonoise_hierarchical_v2/Threading_MCTS.py","file_name":"Threading_MCTS.py","file_ext":"py","file_size_in_byte":18569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"563509970","text":"from pyexpat import model\nfrom urllib import quote_plus\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models.query_utils import Q\nfrom django.http.response import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom notions.forms import UserRegisterForm\nfrom .models import Post\nfrom .forms import PostForm\n\n\n@login_required(login_url='/login/')\ndef post_create(request):\n form = PostForm(request.POST or None,request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n form.instance.user = request.user\n instance.save()\n messages.success(request, \"Successfully Created\")\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {\n \"form\": form,\n }\n return render(request, \"post_form.html\", context)\n\ndef post_detail(request,slug=None):\n instance = get_object_or_404(Post,slug=slug)\n share_string = quote_plus(instance.content)\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"share_string\":share_string,\n }\n return render(request,\"post_detail.html\",context)\n\n\ndef post_list(request):\n queryset_list = Post.objects.order_by(\"-timestamp\")\n if request.user.is_staff or request.user.is_superuser:\n queryset_list = Post.objects.all()\n\n query = request.GET.get(\"q\")\n if query:\n queryset_list = queryset_list.filter(\n Q(title__icontains=query) |\n Q(content__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query)\n ).distinct()\n paginator = Paginator(queryset_list, 9) # Show 9 contacts per page\n page_request_var = \"page\"\n page = request.GET.get(page_request_var)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"object_list\": queryset,\n \"title\": \"Shared Notions\",\n \"page_request_var\": page_request_var,\n }\n return render(request, \"post_list.html\", context)\n\n@login_required(login_url='/login/')\ndef post_update(request,slug=None):\n instance = get_object_or_404(Post,slug=slug)\n form = PostForm(request.POST or None,request.FILES or None,instance=instance,)\n if form.instance.user != request.user:\n response = HttpResponse(\"Oops! Only Owner of the post has this privilege\")\n response.status_code = 403\n return response\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"Successfully Updated\")\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"form\":form,\n }\n return render(request,\"post_form.html\",context)\n@login_required(login_url='/login/')\ndef post_delete(request,slug=None):\n\n instance = get_object_or_404(Post,slug=slug)\n if instance.user != request.user:\n response = HttpResponse(\"Oops! Only Owner of the post has this privilege\")\n response.status_code = 403\n return response\n instance.delete()\n messages.success(request,\"Successfully deleted\")\n return redirect(\"posts:list\")\n\nfrom django.contrib.auth import *\nfrom .forms import UserLoginForm\n\ndef login_view(request):\n title = \"Login\"\n form = UserLoginForm(request.POST or None)\n if form.is_valid():\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n login(request, user)\n print(request.user.is_authenticated())\n return redirect(\"posts:list\")\n return render(request, \"form.html\", {\"form\": form, \"title\": title})\n\n\ndef register_view(request):\n title = \"Register\"\n form = UserRegisterForm(request.POST or None)\n if form.is_valid():\n user = form.save(commit=False)\n password = form.cleaned_data.get('password')\n user.set_password(password)\n user.save()\n new_user = authenticate(username=user.username, password=password)\n login(request, new_user)\n return redirect(\"posts:list\")\n\n context = {\n \"form\": form,\n \"title\": title\n }\n return render(request, \"form.html\", context)\n\n\ndef logout_view(request):\n logout(request)\n return redirect(\"posts:list\")\n\n\n\n\n","sub_path":"notions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43977160","text":"#!/usr/bin/env python2\r\n# -*- coding: UTF-8 -*-\r\n# ---------------------------------------------------------------------------\r\n# ___ __ __ __ ___\r\n# / | \\ | \\ | \\ / Automatic\r\n# \\__ |__/ |__/ |___| \\__ Annotation\r\n# \\ | | | | \\ of\r\n# ___/ | | | | ___/ Speech\r\n# =============================\r\n#\r\n# http://sldr.org/sldr000800/preview/\r\n#\r\n# ---------------------------------------------------------------------------\r\n# developed at:\r\n#\r\n# Laboratoire Parole et Langage\r\n#\r\n# Copyright (C) 2011-2015 Brigitte Bigi\r\n#\r\n# Use of this software is governed by the GPL, v3\r\n# This banner notice must not be removed\r\n# ---------------------------------------------------------------------------\r\n#\r\n# SPPAS is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# SPPAS is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.\r\n#\r\n# ----------------------------------------------------------------------------\r\n# File: sndplayer.py\r\n# ----------------------------------------------------------------------------\r\n\r\n__docformat__ = \"\"\"epytext\"\"\"\r\n__authors__ = \"\"\"Brigitte Bigi\"\"\"\r\n__copyright__ = \"\"\"Copyright (C) 2011-2015 Brigitte Bigi\"\"\"\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# Imports\r\n# ----------------------------------------------------------------------------\r\n\r\nimport wx\r\nimport logging\r\nimport wx.media\r\n\r\nfrom wxgui.sp_images import PLAYER_BACKGROUND\r\n\r\nfrom wxgui.sp_icons import PLAYER_INFO\r\nfrom wxgui.sp_icons import PLAYER_INFO_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_EJECT\r\nfrom wxgui.sp_icons import PLAYER_EJECT_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_NEXT\r\nfrom wxgui.sp_icons import PLAYER_NEXT_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_REWIND\r\nfrom wxgui.sp_icons import PLAYER_REWIND_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_PLAY\r\nfrom wxgui.sp_icons import PLAYER_PLAY_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_PAUSE\r\nfrom wxgui.sp_icons import PLAYER_PAUSE_DISABLED\r\nfrom wxgui.sp_icons import PLAYER_STOP\r\nfrom wxgui.sp_icons import PLAYER_STOP_DISABLED\r\n\r\nfrom wxgui.sp_consts import TB_ICONSIZE\r\n\r\nfrom wxgui.ui.CustomEvents import FileWanderEvent\r\nimport wxgui.ui.KnobCtrl as KC\r\nfrom wxgui.structs.prefs import Preferences\r\nfrom wxgui.structs.themes import BaseTheme\r\n\r\nfrom wxgui.cutils.ctrlutils import CreateButton\r\nfrom wxgui.cutils.imageutils import spBitmap\r\n\r\nfrom wxgui.dialogs.sndinfodialog import SndInfoDialog\r\n\r\n\r\n# ---------------------------------------------------------------------------\r\n# Constants\r\n# ---------------------------------------------------------------------------\r\n\r\nTIMER_STEP = 10 # timer step event (in milliseconds)\r\nFORWARD_STEP = 1000 # forward step (in milliseconds)\r\nBACKWARD_STEP = 1000 # backward step (in milliseconds)\r\n\r\n# ---------------------------------------------------------------------------\r\n\r\nclass SndPlayer( wx.Panel ):\r\n \"\"\"\r\n Sound Player.\r\n \"\"\"\r\n\r\n def __init__(self, parent, orient=wx.VERTICAL, refreshtimer=TIMER_STEP, prefsIO=None):\r\n \"\"\" Create a new WavProperty instance. \"\"\"\r\n\r\n wx.Panel.__init__(self, parent)\r\n\r\n # members\r\n self._prefs = self._check_prefs(prefsIO)\r\n self._filename = None\r\n self._mediaplayer = None\r\n self._buttons = {}\r\n self._showpanel = None # panel to show information (clock, peakmeter, signal, ...)\r\n self._playbackSlider = None # slider (to change the position with the mouse)\r\n self._knob = None # volume control\r\n self._offsets = (0,0) # from/to offsets\r\n\r\n self.BMP_PLAYER_INFO = spBitmap( PLAYER_INFO, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_INFO_DISABLED = spBitmap( PLAYER_INFO_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_EJECT = spBitmap( PLAYER_EJECT, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_EJECT_DISABLED = spBitmap( PLAYER_EJECT_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_NEXT = spBitmap( PLAYER_NEXT, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_NEXT_DISABLED = spBitmap( PLAYER_NEXT_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_REWIND = spBitmap( PLAYER_REWIND, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_REWIND_DISABLED = spBitmap( PLAYER_REWIND_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_PLAY = spBitmap( PLAYER_PLAY, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_PLAY_DISABLED = spBitmap( PLAYER_PLAY_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_PAUSE = spBitmap( PLAYER_PAUSE, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_PAUSE_DISABLED = spBitmap( PLAYER_PAUSE_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_STOP = spBitmap( PLAYER_STOP, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n self.BMP_PLAYER_STOP_DISABLED = spBitmap( PLAYER_STOP_DISABLED, TB_ICONSIZE, theme=self._prefs.GetValue('M_ICON_THEME') )\r\n\r\n # create the audio bar\r\n if orient == wx.VERTICAL:\r\n sizer = self._build_audioadvanced()\r\n else:\r\n sizer = self._build_audiosimple()\r\n\r\n # events\r\n self.Bind(wx.EVT_SLIDER, self.onSeek)\r\n\r\n # timer, used to update the playing state\r\n self._timer = wx.Timer(self)\r\n self.Bind(wx.EVT_TIMER, self.onTimer)\r\n self._refreshTimer = refreshtimer\r\n\r\n self.SetBackgroundColour( self._prefs.GetValue(\"M_BG_COLOUR\") )\r\n self.SetForegroundColour( self._prefs.GetValue(\"M_FG_COLOUR\") )\r\n self.SetFont( self._prefs.GetValue(\"M_FONT\") )\r\n\r\n self.SetSizer( sizer )\r\n self.SetAutoLayout( True )\r\n self.Layout()\r\n\r\n # End __init__\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n def _build_showpanel(self, wave):\r\n \"\"\" Build or change the show panel. \"\"\"\r\n\r\n # a showpanel is already existing\r\n if self._showpanel is not None:\r\n self._showpanel.Destroy()\r\n\r\n # no wav: show a nice picture\r\n if wave is None:\r\n self._showpanel = wx.Panel(self, size=(320,120))\r\n img = wx.Image(PLAYER_BACKGROUND, wx.BITMAP_TYPE_ANY)\r\n img2 = wx.StaticBitmap(self._showpanel, wx.ID_ANY, wx.BitmapFromImage(img))\r\n else:\r\n # a wave is given:\r\n # show dynamic information while playing (clock, peakmeter, ...)\r\n # TO DO\r\n self._showpanel = wx.Panel(self, size=(320,120))\r\n img = wx.Image(PLAYER_BACKGROUND, wx.BITMAP_TYPE_ANY)\r\n img2 = wx.StaticBitmap(self._showpanel, wx.ID_ANY, wx.BitmapFromImage(img))\r\n\r\n\r\n def _build_audioadvanced(self):\r\n \"\"\" Build the audio controls. \"\"\"\r\n\r\n # create the main sizer.\r\n sizer = wx.GridBagSizer(4, 4)\r\n bgcolour = self._prefs.GetValue('M_BG_COLOUR')\r\n\r\n # 1st column\r\n self._buttons['eject'] = CreateButton(self, self.BMP_PLAYER_EJECT_DISABLED, self.onEject, sizer, colour=bgcolour)\r\n self._buttons['next'] = CreateButton(self, self.BMP_PLAYER_NEXT_DISABLED, self.onNext, sizer, colour=bgcolour)\r\n self._buttons['previous'] = CreateButton(self, self.BMP_PLAYER_REWIND_DISABLED,self.onRewind, sizer, colour=bgcolour)\r\n\r\n # 2nd column\r\n self._build_showpanel( None )\r\n\r\n # 3rd column\r\n self._buttons['play'] = CreateButton(self, self.BMP_PLAYER_PLAY_DISABLED, self.onPlay, sizer, colour=bgcolour)\r\n self._buttons['stop'] = CreateButton(self, self.BMP_PLAYER_STOP_DISABLED, self.onStop, sizer, colour=bgcolour)\r\n self._buttons['pause'] = CreateButton(self, self.BMP_PLAYER_PAUSE_DISABLED, self.onPause, sizer, colour=bgcolour)\r\n\r\n # 4th column\r\n minvalue = 0\r\n maxvalue = 101\r\n therange = 5\r\n self._knob = KC.KnobCtrl(self, -1, size=(80, 80))\r\n self._knob.SetTags(range(minvalue, maxvalue+1, therange))\r\n self._knob.SetAngularRange(-45, 225)\r\n self._knob.SetValue( int((minvalue+maxvalue+1)/2) )\r\n tickrange = range(minvalue, maxvalue+1, therange)\r\n self._knob.SetTags(tickrange)\r\n self.Bind(KC.KC_EVENT_ANGLE_CHANGED, self.onAngleChanged, self._knob)\r\n self._knobtracker = wx.StaticText(self, -1, \"Volume = %d\" % int((minvalue+maxvalue)/2))\r\n\r\n # sizer\r\n sizer.Add(self._buttons['eject'], (0,0), flag=wx.ALL, border=4)\r\n sizer.Add(self._buttons['next'], (1,0), flag=wx.ALL, border=4)\r\n sizer.Add(self._buttons['previous'],(2,0), flag=wx.ALL, border=4)\r\n sizer.Add(self._showpanel, (0,1),(3,1), flag=wx.EXPAND|wx.ALL, border=4)\r\n sizer.Add(self._buttons['play'], (0,2), flag=wx.ALL, border=4)\r\n sizer.Add(self._buttons['stop'], (1,2), flag=wx.ALL, border=4)\r\n sizer.Add(self._buttons['pause'], (2,2), flag=wx.ALL, border=4)\r\n sizer.Add(self._knob, (0,3), (2,1), flag=wx.EXPAND|wx.TOP, border=4)\r\n sizer.Add(self._knobtracker, (2,3), flag=wx.TOP, border=4)\r\n\r\n # create playback slider\r\n self._playbackSlider = wx.Slider(self, wx.ID_ANY, size=wx.DefaultSize, style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS)\r\n sizer.Add(self._playbackSlider, (3,0), (1,4), wx.ALL|wx.EXPAND, border=4)\r\n\r\n return sizer\r\n\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def _build_audiosimple(self):\r\n \"\"\" Build the audio controls. \"\"\"\r\n\r\n self._showpanel = None\r\n self._playbackSlider = None\r\n self._knob = None\r\n self._knobtracker = None\r\n\r\n # create the main sizer.\r\n sizer = wx.BoxSizer( wx.HORIZONTAL )\r\n\r\n # create the audio bar\r\n self._buttons['info'] = CreateButton(self, self.BMP_PLAYER_INFO_DISABLED, self.onInfo, sizer)\r\n self._buttons['play'] = CreateButton(self, self.BMP_PLAYER_PLAY_DISABLED, self.onPlay, sizer)\r\n self._buttons['pause'] = CreateButton(self, self.BMP_PLAYER_PAUSE_DISABLED, self.onPause, sizer)\r\n\r\n #self._buttons['next'] = CreateButton(self, self.BMP_PLAYER_NEXT_DISABLED, self.onNext, sizer)\r\n #self._buttons['previous'] = CreateButton(self, self.BMP_PLAYER_REWIND_DISABLED,self.onRewind,sizer)\r\n #self._buttons['stop'] = CreateButton(self, self.BMP_PLAYER_STOP_DISABLED, self.onStop, sizer)\r\n\r\n # sizer\r\n sizer.Add(self._buttons['info'], 1, flag=wx.ALL, border=2)\r\n sizer.Add(self._buttons['play'], 1, flag=wx.ALL, border=2)\r\n sizer.Add(self._buttons['pause'], 1, flag=wx.ALL, border=2)\r\n\r\n return sizer\r\n\r\n # End _build_audiosimple\r\n #-------------------------------------------------------------------------\r\n\r\n\r\n def _check_prefs(self, prefs):\r\n \"\"\"\r\n Check if preferences are set properly. Set new ones if required.\r\n Return the new version.\r\n \"\"\"\r\n if prefs is None:\r\n prefs = Preferences( BaseTheme() )\r\n\r\n else:\r\n try:\r\n bg = prefs.GetValue( 'M_BG_COLOUR' )\r\n fg = prefs.GetValue( 'M_FG_COLOUR' )\r\n font = prefs.GetValue( 'M_FONT' )\r\n icons = prefs.GetValue( 'M_ICON_THEME' )\r\n except Exception:\r\n self._prefsIO.SetTheme( BaseTheme() )\r\n return prefs\r\n\r\n #-------------------------------------------------------------------------\r\n\r\n\r\n\r\n #----------------------------------------------------------------------\r\n # Methods\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def FileSelected(self, filename):\r\n \"\"\"\r\n Set a sound file.\r\n \"\"\"\r\n logging.debug(' ... sndplayer file selected ...')\r\n # do not assign the same file!!!\r\n if filename == self._filename and self._mediaplayer is not None:\r\n logging.debug(' same file name/ Return!!!')\r\n return\r\n\r\n try:\r\n m = wx.media.MediaCtrl(self, style=wx.NO_BORDER)\r\n m.Load( filename )\r\n self._length = m.Length()\r\n if self._length == 0: # **** BUG of the MediaPlayer! ****\r\n import wave\r\n w = wave.Wave_read(filename)\r\n self._length = int( 1000 * float(w.getnframes())/float(w.getframerate()) )\r\n except Exception as e:\r\n logging.info(\" ... Error loading: %s\" % filename)\r\n wx.MessageBox('Error loading: '+filename+' '+str(e), 'Info', wx.OK | wx.ICON_INFORMATION)\r\n return False\r\n\r\n # set mediaplayer with the new one\r\n self._filename = filename\r\n self._mediaplayer = m\r\n\r\n #self._mediaplayer.SetInitialSize()\r\n self.ActivateButtons(True)\r\n self._offsets = (0,self._length)\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetRange(0, self._length)\r\n self._playbackSlider.SetTickFreq(int(self._length/10), 1)\r\n\r\n self._timer.Start( self._refreshTimer )\r\n\r\n self.Refresh()\r\n\r\n # End FileSelected\r\n #------------------------------------------------------------------------\r\n\r\n\r\n def FileDeSelected(self):\r\n \"\"\"\r\n Reset information.\r\n \"\"\"\r\n # take care... the current mediaplayer can be playing. Unset properly!!\r\n if self._mediaplayer is not None and self._mediaplayer.GetState() != wx.media.MEDIASTATE_STOPPED :\r\n self.onStop(None)\r\n\r\n if self._showpanel is not None:\r\n self._build_showpanel( None )\r\n if self._mediaplayer is not None:\r\n self._mediaplayer.Destroy()\r\n\r\n self._filename = None\r\n self._mediaplayer = None\r\n self._offsets = (0,0)\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetRange(0, 0)\r\n\r\n self.ActivateButtons(False)\r\n self.EnableButtons(False)\r\n\r\n self._timer.Stop()\r\n\r\n self.Layout()\r\n self.Refresh()\r\n\r\n # End FileDeSelected\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n def SetOffsetPeriod(self, start, end):\r\n \"\"\"\r\n Fix a start position and a end position to play the sound.\r\n \"\"\"\r\n if self._mediaplayer is not None and self._mediaplayer.GetState() == wx.media.MEDIASTATE_PLAYING:\r\n self.onStop(None)\r\n\r\n if self._mediaplayer is not None and end > self._length:\r\n end = self._length\r\n\r\n self._offsets = (start,end)\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetRange(start,end)\r\n\r\n # End SetOffsetPeriod\r\n #----------------------------------------------------------------------\r\n\r\n\r\n #----------------------------------------------------------------------\r\n # Callbacks\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onInfo(self, event):\r\n \"\"\" Display information about the selected Wave. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n pass\r\n try:\r\n dlg = SndInfoDialog( self, self._prefs, self._filename )\r\n except Exception as e:\r\n wx.MessageBox('No information available. %s'%str(e), 'Info', wx.OK | wx.ICON_INFORMATION)\r\n\r\n # End onInfo\r\n #-------------------------------------------------------------------------\r\n\r\n\r\n def onSeek(self,event):\r\n \"\"\" Seeks the media file according to the amount the slider has been adjusted. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n if self._playbackSlider is not None:\r\n offset = self._playbackSlider.GetValue()\r\n else:\r\n offset = self._offsets[0]\r\n\r\n self._mediaplayer.Seek( offset, mode=wx.FromStart )\r\n\r\n # End onSeek\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onEject(self, event):\r\n \"\"\" Eject the music. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n evt = FileWanderEvent()\r\n evt.SetEventObject(self)\r\n wx.PostEvent(self.GetParent(), evt)\r\n\r\n #self.FileDeSelected()\r\n\r\n # End onEject\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onNext(self, event):\r\n \"\"\" Go forward in the music. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n offset = self._mediaplayer.Tell()\r\n forward = offset + FORWARD_STEP\r\n (omin,omax) = self._offsets\r\n if forward > omax:\r\n forward = omin # come back at the beginning!\r\n\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetValue( forward )\r\n\r\n self._mediaplayer.Seek( forward, mode=wx.FromStart )\r\n\r\n # End onNext\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onRewind(self, event):\r\n \"\"\" Go backward in the music. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n offset = self._mediaplayer.Tell()\r\n backward = offset - BACKWARD_STEP\r\n (omin,omax) = self._offsets\r\n if backward < omin:\r\n backward = omax # loop\r\n\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetValue( backward )\r\n\r\n self._mediaplayer.Seek( backward, mode=wx.FromStart )\r\n\r\n # End onRewind\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onPause(self, event):\r\n \"\"\" Pauses the music. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n logging.debug(' PAUSE EVENT RECEIVED ')\r\n\r\n state = self._mediaplayer.GetState()\r\n\r\n if state == wx.media.MEDIASTATE_PLAYING:\r\n self._mediaplayer.Pause()\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE_DISABLED )\r\n\r\n elif state == wx.media.MEDIASTATE_PAUSED:\r\n self.onPlay(event)\r\n self._buttons['play'].SetBitmapLabel( self.BMP_PLAYER_PLAY )\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE )\r\n\r\n # End onPause\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onPlay(self, event):\r\n \"\"\" Plays the music. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n logging.debug('onPlay. Unable to play: No media player.')\r\n return\r\n if self._mediaplayer.GetState() == wx.media.MEDIASTATE_PLAYING:\r\n logging.debug('onPlay. Unable to play: already playing!')\r\n return\r\n\r\n # save current position\r\n offset = self._mediaplayer.Tell()\r\n omin,omax = self._offsets\r\n if self._playbackSlider is not None:\r\n offset = self._playbackSlider.GetValue()\r\n elif (offset < omin or offset > omax):\r\n offset = omin\r\n\r\n if not self._mediaplayer.Play():\r\n logging.debug('onPlay. Unable to play. offset=%d'%offset)\r\n wx.MessageBox(\"Unable to Play. Offset=%d\"%offset,\r\n \"ERROR\",\r\n wx.ICON_ERROR | wx.OK)\r\n return\r\n\r\n # force to play at the good position\r\n self._mediaplayer.Seek( offset, mode=wx.FromStart ) # required!\r\n\r\n if self._knob is not None:\r\n self._mediaplayer.SetVolume( float(self._knob.GetValue())/100.0 )\r\n\r\n self._buttons['play'].SetBitmapLabel( self.BMP_PLAYER_PLAY )\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE )\r\n\r\n self.Refresh()\r\n\r\n # End onPlay\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onStop(self, event):\r\n \"\"\" Stops the music and resets the play button. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n try:\r\n self._mediaplayer.Stop()\r\n s,e = self._offsets\r\n self._mediaplayer.Seek( s )\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetValue( s )\r\n except Exception:\r\n # provide errors like:\"ressource temporairement indisponible\"\r\n pass\r\n\r\n self._buttons['play'].SetBitmapLabel( self.BMP_PLAYER_PLAY )\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE )\r\n\r\n # End onStop\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onAngleChanged(self, event):\r\n \"\"\" Change the volume value. \"\"\"\r\n\r\n value = event.GetValue()\r\n self._knobtracker.SetLabel(\"Volume = \" + str(value))\r\n if self._mediaplayer:\r\n self._mediaplayer.SetVolume( float(value)/100.0 )\r\n\r\n # End onAngleChanged\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onTimer(self, event):\r\n \"\"\" Keeps the player slider updated. \"\"\"\r\n\r\n if self._mediaplayer is None:\r\n return\r\n\r\n offset = self._mediaplayer.Tell()\r\n # On MacOS, it seems that offset is not so precise we could expect...\r\n # It can be + or - 2 compared to the expected value!\r\n\r\n if self._mediaplayer.GetState() == wx.media.MEDIASTATE_PLAYING and self._playbackSlider is not None:\r\n self._playbackSlider.SetValue( offset )\r\n\r\n omin,omax = self._offsets\r\n if self._mediaplayer.GetState() == wx.media.MEDIASTATE_PLAYING and (offset < omin-3 or offset > omax+3):\r\n self.onStop(event)\r\n\r\n # End onTimer\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def onClose(self, event):\r\n \"\"\"\r\n Close (destructor).\r\n \"\"\"\r\n self._timer.Stop()\r\n self.Destroy()\r\n\r\n # End Close\r\n # ------------------------------------------------------------------------\r\n\r\n\r\n # -----------------------------------------------------------------------\r\n # GUI\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n def SetPreferences(self, prefs):\r\n \"\"\" Set new preferences. \"\"\"\r\n\r\n self._prefs = prefs\r\n self.SetBackgroundColour( self._prefs.GetValue(\"M_BG_COLOUR\") )\r\n self.SetForegroundColour( self._prefs.GetValue(\"M_FG_COLOUR\") )\r\n self.SetFont( self._prefs.GetValue(\"M_FONT\") )\r\n # apply bg on all buttons...\r\n for b in self._buttons.keys():\r\n self._buttons[b].SetBackgroundColour( self._prefs.GetValue(\"M_BG_COLOUR\") )\r\n\r\n #-------------------------------------------------------------------------\r\n\r\n\r\n def SetFont(self, font):\r\n \"\"\" Change font of all texts. \"\"\"\r\n\r\n wx.Window.SetFont( self,font )\r\n if self._knobtracker is not None:\r\n self._knobtracker.SetFont( font )\r\n\r\n # End SetFont\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n def SetBackgroundColour(self, colour):\r\n \"\"\" Change the background color of all objects. \"\"\"\r\n\r\n wx.Window.SetBackgroundColour( self,colour )\r\n\r\n for b in self._buttons:\r\n self._buttons[b].SetBackgroundColour( colour )\r\n\r\n if self._showpanel is not None:\r\n self._showpanel.SetBackgroundColour( colour )\r\n if self._knobtracker is not None:\r\n self._knobtracker.SetBackgroundColour( colour )\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetBackgroundColour( colour )\r\n\r\n self.Refresh()\r\n\r\n # End SetForegroundColour\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n def SetForegroundColour(self, colour):\r\n \"\"\" Change the foreground color of all objects. \"\"\"\r\n\r\n wx.Window.SetForegroundColour( self,colour )\r\n\r\n for b in self._buttons:\r\n self._buttons[b].SetForegroundColour( colour )\r\n\r\n if self._showpanel is not None:\r\n self._showpanel.SetForegroundColour( colour )\r\n if self._knobtracker is not None:\r\n self._knobtracker.SetForegroundColour( colour )\r\n if self._playbackSlider is not None:\r\n self._playbackSlider.SetForegroundColour( colour )\r\n\r\n self.Refresh()\r\n\r\n # End SetForegroundColour\r\n # -----------------------------------------------------------------------\r\n\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n def ActivateButtons(self, value=True):\r\n self.EnableButtons(False)\r\n if value is True:\r\n self._buttons['play'].SetBitmapLabel( self.BMP_PLAYER_PLAY )\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE )\r\n try:\r\n self._buttons['eject'].SetBitmapLabel( self.BMP_PLAYER_EJECT )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['info'].SetBitmapLabel( self.BMP_PLAYER_INFO )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['next'].SetBitmapLabel( self.BMP_PLAYER_NEXT )\r\n self._buttons['previous'].SetBitmapLabel( self.BMP_PLAYER_REWIND )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['stop'].SetBitmapLabel( self.BMP_PLAYER_STOP )\r\n except Exception:\r\n pass\r\n else:\r\n self._buttons['play'].SetBitmapLabel( self.BMP_PLAYER_PLAY_DISABLED )\r\n self._buttons['pause'].SetBitmapLabel( self.BMP_PLAYER_PAUSE_DISABLED )\r\n try:\r\n self._buttons['eject'].SetBitmapLabel( self.BMP_PLAYER_EJECT_DISABLED )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['info'].SetBitmapLabel( self.BMP_PLAYER_INFO_DISABLED )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['stop'].SetBitmapLabel( self.BMP_PLAYER_STOP_DISABLED )\r\n except Exception:\r\n pass\r\n try:\r\n self._buttons['previous'].SetBitmapLabel( self.BMP_PLAYER_REWIND_DISABLED )\r\n self._buttons['next'].SetBitmapLabel( self.BMP_PLAYER_NEXT_DISABLED )\r\n except Exception:\r\n pass\r\n\r\n def EnableButtons(self, value=True):\r\n for b in self._buttons:\r\n self._buttons[b].Enable( not value )\r\n\r\n# ----------------------------------------------------------------------------\r\n","sub_path":"sppas/src/wxgui/panels/sndplayer.py","file_name":"sndplayer.py","file_ext":"py","file_size_in_byte":27657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"529793734","text":"from django.db import models\nfrom eCube_UI_2.core.Add_Request.models import (DomainMasterBase, RequestModeBase, CountryMasterBase,\n FieldGroupMasterBase)\nfrom hotel.user_management.models import UserMaster, DomainMaster, CountryMaster\n\n\nclass RequestModeMaster(RequestModeBase):\n pass\n\n\nclass FieldGroupMaster(FieldGroupMasterBase):\n id = models.AutoField(db_column='GroupID', primary_key=True)\n name = models.CharField(db_column='GroupName', max_length=50)\n description = models.CharField(db_column='GroupDesc', max_length=50)\n active = models.BooleanField(db_column='Active')\n user_id = models.ForeignKey(UserMaster, db_column=\"FGM_UserId\", on_delete=models.DO_NOTHING)\n created_date = models.DateTimeField(auto_now=True, db_column='CreatedDate')\n bli_id = models.IntegerField(db_column='Bli_ID', default=0)\n\nclass temphotelmaster(models.Model):\n id = models.AutoField(db_column='index', primary_key=True)\n class Meta:\n managed = False\n db_table = 'temphotelmaster_excel'\n\n\n\nclass CityMaster(models.Model):\n id = models.AutoField(db_column='CityId', primary_key=True)\n code = models.CharField(db_column='CityCode', max_length=10)\n name = models.CharField(db_column='CityName',max_length=100)\n country = models.ForeignKey(CountryMaster, db_column=\"CountryId\", on_delete=models.CASCADE)\n _active = models.PositiveSmallIntegerField(db_column='Active', default=0)\n created_by = models.ForeignKey(UserMaster,db_column='CreatedBy', related_name='citymaster_createdby',on_delete = models.DO_NOTHING)\n created_date = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modified_by = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='citymaster_modifiedby' ,on_delete = models.DO_NOTHING)\n modified_date = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'Cities'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass AirportCodeMaster(models.Model):\n id = models.AutoField(db_column='AirportCodeId', primary_key=True)\n code = models.CharField(db_column='AirportCode', max_length=10)\n name = models.CharField(db_column='AirportName', max_length=100)\n country = models.ForeignKey(CountryMaster, db_column=\"CountryId\", on_delete=models.CASCADE)\n city = models.ForeignKey(CityMaster, db_column=\"CityId\", on_delete=models.CASCADE)\n _active = models.PositiveSmallIntegerField(db_column='Active', default=0)\n created_by = models.ForeignKey(UserMaster,db_column='CreatedBy', related_name='airportcodemaster_created_by' ,on_delete = models.DO_NOTHING)\n created_date = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modified_by = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='airportmaster_modified_by' , on_delete = models.DO_NOTHING)\n modified_date = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'AirportCodes'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass HotelGroupMaster(models.Model):\n id = models.AutoField(db_column='HotelGroupId', primary_key=True)\n group = models.CharField(db_column='HotelGroup', max_length=200)\n _active = models.PositiveSmallIntegerField(db_column='Active', default=0)\n created_by = models.ForeignKey(UserMaster,db_column='CreatedBy', related_name='hotelgroupmaster_createdby' ,on_delete = models.DO_NOTHING)\n created_date = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modified_by = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='hotelgroupmaster_modifiedby', on_delete = models.DO_NOTHING)\n modified_date = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'HotelGroups'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass PointOfSaleMaster(models.Model):\n id = models.AutoField(db_column='PointOfSaleId', primary_key=True)\n point_of_sale = models.CharField(db_column='PointOfSale', max_length=100)\n code = models.CharField(db_column='PointOfSaleCode', max_length=10)\n #country = models.ForeignKey(CountryMaster, db_column=\"CountryId\", on_delete=models.CASCADE)\n _active = models.PositiveSmallIntegerField(db_column='Active', default=0)\n created_by = models.ForeignKey(UserMaster,db_column='CreatedBy', related_name='pos_createdby', on_delete = models.DO_NOTHING)\n created_date = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modified_by = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='pos_modifiedby', on_delete = models.DO_NOTHING)\n modified_date = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'HotelPOS'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass CompetitorMaster(models.Model):\n id = models.AutoField(db_column='CompetitorId', primary_key=True)\n name = models.CharField(db_column='CompetitorName',max_length=256)\n _active = models.BooleanField(db_column='Active',default=True)\n createdDate = models.DateTimeField(db_column='CreatedDate',auto_now=True)\n updatedDate = models.DateTimeField(db_column='ModifiedDate', auto_now_add=True)\n #countries = models.ForeignKey(CountryMaster, db_column='Fk_CountryId', on_delete=models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'vw_competitor'\n\n\nclass StarRatingMaster(models.Model):\n id = models.AutoField(db_column='StarRatingId', primary_key=True)\n starrating = models.CharField(db_column='StarRating', max_length=50)\n starratingcode = models.CharField(db_column='StarRatingCode', max_length=50)\n _active = models.BooleanField(db_column='Active')\n createdby = models.ForeignKey(UserMaster, db_column='CreatedBy', related_name='starrratings_created_user',\n on_delete=models.DO_NOTHING)\n createddate = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modifiedby = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='starratings_modified_user',\n on_delete=models.DO_NOTHING)\n modifieddate = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'StarRatings'\n\n\nclass HotelMaster(models.Model):\n id = models.AutoField(db_column='HotelId', primary_key=True)\n website_hotel_id = models.CharField(db_column='WebSiteHotelId', max_length=255)\n name = models.CharField(db_column='HotelName', max_length=512)\n address1 = models.CharField(db_column='HotelAddress1', max_length=255)\n address2 = models.CharField(db_column='HotelAddress2', max_length=255)\n city = models.ForeignKey(CityMaster, db_column='CityId', on_delete=models.CASCADE)\n brand = models.CharField(db_column='HotelBrandName', max_length=50)\n competitorId = models.IntegerField(db_column='CompetitorId')\n star_rating = models.ForeignKey(StarRatingMaster, db_column='StarRatingId', on_delete=models.DO_NOTHING) #to do foriegn key\n post_code = models.CharField(db_column='HotelPostCode', max_length=255)\n match_status = models.PositiveSmallIntegerField(db_column='HotelMatchStatus')\n description = models.CharField(db_column='HotelDescription', max_length=100)\n is_processed = models.PositiveSmallIntegerField(db_column='isProceesed')\n match_hotel_name = models.CharField(db_column='matchhotelname', max_length=100)\n dipbag_sync_id = models.PositiveIntegerField(db_column='DipBagSyncId') #to do foriegn key\n is_mailed = models.PositiveSmallIntegerField(db_column='IsMailed')\n is_mailed1 = models.PositiveSmallIntegerField(db_column='IsMailed1')\n _active = models.PositiveSmallIntegerField(db_column='Active', default=0)\n created_by = models.ForeignKey(UserMaster,db_column='CreatedBy', related_name='hotelmaster_created_by' ,on_delete = models.DO_NOTHING)\n created_date = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modified_by = models.ForeignKey(UserMaster, db_column='ModifiedBy',related_name='hotelmaster_modified_by', on_delete = models.DO_NOTHING)\n modified_date = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n Longitude = models.CharField(db_column='Longitude', max_length=255)\n Latitude = models.CharField(db_column='Latitude', max_length=255)\n ContractManager = models.CharField(db_column='ContractManager', max_length=255)\n DemandGroup = models.CharField(db_column='DemandGroup', max_length=255)\n YieldManager = models.CharField(db_column='YieldManager', max_length=255)\n HotelStatusId = models.IntegerField(db_column='HotelStatusId')\n\n class Meta:\n managed = False\n db_table = 'Hotels'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass BoardTypeMaster(models.Model):\n id = models.AutoField(db_column='BoardTypeId', primary_key=True)\n boardtypecode = models.CharField(db_column='BoardTypeCode', max_length=50)\n boardtypedescription = models.CharField(db_column='BoardTypeDescription', max_length=50)\n _active = models.BooleanField(db_column='Active')\n createdby = models.ForeignKey(UserMaster, db_column='CreatedBy', related_name='boardtypes_created_user',\n on_delete=models.DO_NOTHING)\n createddate = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modifiedby = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='boardtypes_modified_user',\n on_delete=models.DO_NOTHING)\n modifieddate = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'BoardTypes'\n\n @property\n def active(self):\n return bool(self._active)\n\n\nclass RoomTypeMaster(models.Model):\n id = models.AutoField(db_column='RoomTypeId', primary_key=True)\n roomtype = models.CharField(db_column='RoomType', max_length=50)\n roomtypecode = models.CharField(db_column='RoomTypeCode', max_length=50)\n _active = models.BooleanField(db_column='Active')\n createdby = models.ForeignKey(UserMaster, db_column='CreatedBy', related_name='roomtypes_created_user',\n on_delete=models.DO_NOTHING)\n createddate = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modifiedby = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='roomtypes_modified_user',\n on_delete=models.DO_NOTHING)\n modifieddate = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'RoomTypes'\n\n\nclass BookingPeriodMaster(models.Model):\n id = models.AutoField(db_column='BookingPeriodID', primary_key=True)\n bookingperiod = models.CharField(db_column='BookingPeriod', max_length=256)\n _active = models.BooleanField(db_column='Active')\n createdby = models.ForeignKey(UserMaster, db_column='CreatedBy', related_name='bookingperiod_created_user',\n on_delete=models.DO_NOTHING)\n createddate = models.DateTimeField(db_column='CreatedDate', auto_now_add=True)\n modifiedby = models.ForeignKey(UserMaster, db_column='ModifiedBy', related_name='bookingperiod_modified_user',\n on_delete=models.DO_NOTHING)\n modifieddate = models.DateTimeField(db_column='ModifiedDatetime', auto_now=True)\n\n class Meta:\n managed = False\n db_table = 'BookingPeriod'","sub_path":"eCube_Hotel_2/eCube_UI_2/hotel/master/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"129413649","text":"\nimport time\nimport threading\n\nfrom devicecontroller.common import *\nfrom devicecontroller.MessageUtils import *\nfrom devicecontroller.config.DeviceControllerConfig import SystemId\n\n\"\"\"\nThis job collects important system information and sends back report.\nThis job is also an example of simple job, which returns just one value.\n\"\"\"\nclass JobGetSystemInfo(threading.Thread):\n requestData = None\n jobId = None\n jobManager = None\n isFinished = False\n stopped = False\n name = JOB_TYPE_SYSTEM_INFO\n startTime = None \n \n def setContext(self, jobManager, jobId, requestData):\n self.jobManager = jobManager\n self.jobId = jobId\n self.requestData = requestData\n self.isFinished = False\n self.stopped = False \n \n def run(self):\n self.startTime = int(time.time()*1000) \n self.stopped = False \n responsePayload = { \n \"hostName\": getSystemHostName(),\n \"systemId\": SystemId,\n \"softwareVersion\": SOFTWARE_VERSION,\n \"softwareName\": SOFTWARE_NAME,\n \"comProtocolVersion\": COM_PROTOCOL_VERSION,\n \"systemUptime\": getSystemUptime(), \n \"systemTime\": getSystemTime(), \n \"systemTimeZone\": getSystemTimeZone(), \n \"serviceUptime\": getServiceUptime(),\n \"jobBufferLenght\": len(self.jobManager.jobBuffer),\n \"jobsServed\": self.jobManager.index \n }\n responsePayload[\"duration\"] = int(time.time()*1000 - self.startTime);\n self.isFinished = True\n self.jobManager.dispatchResponse(getJobFinishedResponse(self.jobId, self.name, self.requestData[\"requestId\"], responsePayload))\n\n def onJobNotifyEvent(self, requestData):\n log.debug(\"onDataEvent:\" + str(requestData))\n \n def getJobTag(self):\n return self.requestData[\"jobTag\"] \n\n def stop(self):\n self.isFinished = True\n self.stopped = True \n\n\"\"\"\nThis job is used for test purposes only. It is an example of long running job\nwhich supports interaction with client. It is able to receive data during it's \nlife time, and send back messages as processing goes on. \nThis job may be aborted prematurely as well.\n\"\"\" \nclass JobLongRunningTest(threading.Thread):\n requestData = None\n jobId = None\n jobManager = None\n isFinished = False\n stopped = False\n name = JOB_TYPE_LONG_RUNNING_TEST\n notifyData = None\n startTime = None\n \n def setContext(self, jobManager, jobId, requestData):\n self.jobManager = jobManager\n self.jobId = jobId\n self.requestData = requestData\n self.isFinished = False\n self.stopped = False\n self.notifyData = \"\"\n \n def run(self):\n self.startTime = int(time.time()*1000) \n self.stopped = False\n log.debug(JOB_TYPE_LONG_RUNNING_TEST + \"job started ...\")\n maxCycles = int(self.requestData[\"requestPayload\"][\"maxCycles\"])\n log.debug(\"max cycles set to \" + str(maxCycles))\n for x in range(maxCycles):\n if (self.stopped == True):\n responseData = getJobAbortedResponse(self.jobId, self.name, self.requestData[\"requestId\"], { \"resultType\": \"intermediateResult\", \"calculatedValue\": x, \"notifyData\": self.notifyData });\n self.jobManager.dispatchResponse(responseData)\n self.isFinished = True\n return\n responseData = getJobEventResponse(self.jobId, self.name, self.requestData[\"requestId\"], { \"resultType\": \"intermediateResult\", \"calculatedValue\": x, \"notifyData\": self.notifyData });\n self.jobManager.dispatchResponse(responseData)\n time.sleep(1)\n self.isFinished = True\n responseData = getJobFinishedResponse(self.jobId, self.name, self.requestData[\"requestId\"], { \"resultType\": \"finalResult\", \"calculatedValue\": x, \"notifyData\": self.notifyData });\n self.jobManager.dispatchResponse(responseData)\n \n def onJobNotifyEvent(self, requestData):\n log.debug(\"onDataEvent:\" + str(requestData))\n self.notifyData = requestData[\"requestPayload\"][\"notifyData\"]\n\n def getJobTag(self):\n return self.requestData[\"jobTag\"] \n \n def stop(self):\n self.isFinished = True\n self.stopped = True \n \n","sub_path":"DeviceControllerPy/devicecontroller/Job.py","file_name":"Job.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412373899","text":"import uuid\nimport pandas as pd\nfrom typing import *\n\nimport torch\nfrom torch import nn\n\nfrom fastai.vision.all import *\n\n\nidx2lbl = {0:\"Cassava Bacterial Blight (CBB)\",\n 1:\"Cassava Brown Streak Disease (CBSD)\",\n 2:\"Cassava Green Mottle (CGM)\",\n 3:\"Cassava Mosaic Disease (CMD)\",\n 4:\"Healthy\"}\n\n# ========================================================\n# Data / Augmentation Utilities\n# ========================================================\ndef get_x(data): return data[\"filePath\"]\ndef get_y(data): return data[\"label\"]\n\n@delegates(DataBlock.dataloaders)\ndef get_dataloaders(data, item_tfms:list=None, batch_tfms:list=None, **kwargs):\n dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),\n splitter=ColSplitter(),\n get_x=get_x,\n get_y=get_y,\n item_tfms=item_tfms,\n batch_tfms=batch_tfms)\n \n dls = dblock.dataloaders(data, **kwargs)\n return dls\n \ndef get_dataset(pth:str, im_dir:str, curr_fold:int, shuffle:bool=True):\n 'loads the dataframe and formats it'\n assert curr_fold < 5\n data = pd.read_csv(pth)\n data[\"filePath\"] = [os.path.join(im_dir, data[\"image_id\"][idx]) for idx in range(len(data))]\n data[\"is_valid\"] = [data.kfold[n] == curr_fold for n in range(len(data))]\n data['label'].replace(idx2lbl, inplace=True)\n \n if shuffle: data = data.sample(frac=1).reset_index(drop=True, inplace=False)\n else : data = data.reset_index(drop=True, inplace=False)\n \n return data\n\nclass AlbumentationsTransform(RandTransform):\n 'fast.ai type transformations using albumentation transform functions'\n split_idx,order=None,2\n def __init__(self, train_aug, valid_aug): store_attr()\n \n def before_call(self, b, split_idx): self.idx = split_idx\n \n def encodes(self, img: PILImage):\n if self.idx == 0 : aug_img = self.train_aug(image=np.array(img))['image']\n else : aug_img = self.valid_aug(image=np.array(img))['image']\n return PILImage.create(aug_img)\n\n \n# ========================================================\n# General Utilities\n# ========================================================\ndef generate_random_id() -> str:\n 'returns a random id for the experiment'\n idx = uuid.uuid1()\n idx = str(idx).split(\"-\")[0]\n return idx\n\ndef cut_model(model: nn.Module, upto: int = -2) -> nn.Module:\n ls = list(model.children())[:upto]\n encoder = nn.Sequential(*ls)\n return encoder\n\ndef create_head(nf: int, n_out: int, lin_ftrs: int = 512, act: nn.Module = nn.ReLU(inplace=True)):\n 'create a custom head for a classifier'\n lin_ftrs = [nf, lin_ftrs, n_out]\n \n pool = AdaptiveConcatPool2d()\n\n layers = [pool, nn.Flatten()]\n\n layers += [\n nn.BatchNorm1d(lin_ftrs[0]),\n nn.Dropout(0.25),\n act,\n nn.Linear(lin_ftrs[0], lin_ftrs[1], bias=False),\n nn.BatchNorm1d(lin_ftrs[1]),\n nn.Dropout(0.5),\n act,\n nn.Linear(lin_ftrs[1], lin_ftrs[2], bias=False),\n ]\n return nn.Sequential(*layers)\n\ndef custom_splitter(net): return [params(net.encoder), params(net.decoder)]\n\n\n# ========================================================\n# Layers / Modules \n# ========================================================\nclass AdaptiveConcatPool2d(nn.Module):\n \"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`\"\n def __init__(self, size=None):\n super(AdaptiveConcatPool2d, self).__init__()\n self.size = size or 1\n self.ap = nn.AdaptiveAvgPool2d(self.size)\n self.mp = nn.AdaptiveMaxPool2d(self.size)\n\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\n \n@delegates(create_head)\nclass TransferLearningModel(nn.Module):\n \"Transfer Learning with pre-trained encoder.\"\n def __init__(self, encoder, num_classes, cut=-2, init=True, **kwargs):\n super(TransferLearningModel, self).__init__()\n self.encoder = cut_model(encoder, cut)\n \n ftrs = num_features_model(self.encoder) * 2\n self.decoder = create_head(nf=ftrs, n_out=num_classes, **kwargs)\n \n if init: apply_init(self.decoder, nn.init.kaiming_normal_)\n \n def forward(self, xb):\n feats = self.encoder(xb)\n logits = self.decoder(feats)\n return logits","sub_path":"experiments/cassava_utils.py","file_name":"cassava_utils.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292376195","text":"def permutations(lst, low, high):\n if low==high:\n return [lst]\n else:\n ls = []\n for i in permutations(lst[:-1],0,len(lst)-1):\n for j in range(len(lst)):\n ls.append(i[j:] + [lst[-1]] + i[:j])\n return ls\n \n","sub_path":"assignment/4/sl6728_hw4_q9.py","file_name":"sl6728_hw4_q9.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70418196","text":"import smtplib\nimport sys\nimport os\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.utils import COMMASPACE, formatdate\nfrom email import encoders\n\n# python3 smtp.py mail.to.send@gmail.com filename message\nif __name__ == \"__main__\":\n gmail_user = ''\n gmail_password = ''\n send_from = gmail_user \n send_to = [sys.argv[1]]\n file_name = sys.argv[2]\n message_body = sys.argv[3]\n\n os.chdir('/Users/koi/Desktop/ComputerNetwork/lab05')\n\n files = os.listdir()\n\n part = MIMEBase('application', \"octet-stream\")\n if(files.count(file_name) == 0):\n print('File not found')\n else:\n part.set_payload(open(file_name, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"{}\"'.format(file_name))\n\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = COMMASPACE.join(send_to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'Message'\n msg.attach(MIMEText(message_body, 'plain'))\n msg.attach(part)\n \n try:\n server_ssl = smtplib.SMTP_SSL('smtp.mail.ru', 465)\n server_ssl.ehlo()\n server_ssl.login(gmail_user, gmail_password)\n server_ssl.ehlo()\n server_ssl.sendmail(send_from, send_to, msg.as_string())\n server_ssl.close()\n\n print('Email sent!')\n except Exception:\n print(\"Something went wrong!\")\n","sub_path":"lab05/smtp.py","file_name":"smtp.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303283735","text":"import time\nimport matplotlib.pyplot as plt\n\nfrom EasyMCDM.models.Electre import Electre\nfrom EasyMCDM.models.Promethee import Promethee\nfrom EasyMCDM.models.Pareto import Pareto\nfrom EasyMCDM.models.WeightedSum import WeightedSum\nfrom EasyMCDM.models.Irmo import Irmo\n\ndata = {}\nindexes = [ 0, 1, 2, 3, 4, 5, 6 ]\nweights = [0.14,0.14,0.14,0.14,0.14,0.14,0.14]\nprefs = [\"min\",\"max\",\"min\",\"min\",\"min\",\"max\",\"min\"]\nvetoes = [45, 29, 550, 6, 4.5, 4.5, 4.5]\nindifference_threshold = 0.6\npreference_thresholds = [20, 10, 200, 4, 2, 2, 7]\n\npaddings = []\npromethee_values = []\nelectre_values = []\nweighted_values = []\npareto_values = []\n\ndata = {}\nnbindiv=200\nnbcrit=2000\n\npaddings = []\npromethee_values = []\nelectre_values = []\nweighted_values = []\npareto_values = []\n\nfor i in range(2,nbcrit,100):\n \n for j in range(nbindiv):\n data[str(j)] = [ 5 for j in range(i) ]\n \n print(i)\n indexes = [ j for j in range(i) ]\n weights = [ 1/i for j in range(i)]\n prefs = [\"min\" for j in range(i)]\n vetoes = [5 for j in range(i)]\n indifference_threshold = 0.6\n preference_thresholds = [20 for j in range(i)]\n paddings.append(i)\n\n print(len(indexes))\n print(len(weights))\n print(len(prefs))\n print(len(vetoes))\n print(len(preference_thresholds))\n print(len(data[\"0\"]))\n print(len(data))\n print()\n\n w = WeightedSum(data=data, verbose=False)\n pa = Pareto(data=data, verbose=False)\n pr = Promethee(data=data, verbose=False)\n e = Electre(data=data, verbose=False)\n\n start = time.time()\n re = e.solve(weights, prefs, vetoes, indifference_threshold, preference_thresholds)\n electre_values.append(time.time() - start)\n\n start = time.time()\n rpr = pr.solve(weights=weights, prefs=prefs)\n promethee_values.append(time.time() - start)\n\n start = time.time()\n rpa = pa.solve(indexes=indexes, prefs=prefs)\n pareto_values.append(time.time() - start)\n\n start = time.time()\n rw = w.solve(pref_indexes=indexes, prefs=prefs, weights=weights, target='min')\n weighted_values.append(time.time() - start)\n\nplt.plot(paddings, promethee_values, label=\"promethee\")\nplt.plot(paddings, electre_values, label=\"electre\")\nplt.plot(paddings, weighted_values, label=\"weighted\")\nplt.plot(paddings, pareto_values, label=\"pareto\")\nplt.legend()\nplt.savefig(\"benchmark_nb_crit.png\")","sub_path":"tests/benchmarks/benchmark_crit.py","file_name":"benchmark_crit.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2502322","text":"## import statements\nimport requests_oauthlib\nimport webbrowser\nimport json\nimport secret_data\nfrom datetime import datetime\nimport csv\n\n## CACHING SETUP\n\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S.%f\"\nDEBUG = True\nCACHE_FNAME = \"cache_contents.json\"\nCREDS_CACHE_FILE = \"creds.json\"\n\ntry:\n\twith open(CACHE_FNAME,'r') as cache_file:\n\t\tcache_json = cache_file.read()\n\t\tCACHE_DICTION = json.loads(cache_json)\nexcept:\n\tCACHE_DICTION = {}\n\ntry:\n\twith open(CREDS_CACHE_FILE,'r') as creds_file:\n\t\tcache_creds = creds_file.read()\n\t\tCREDS_DICTION = json.loads(cache_creds)\nexcept:\n\tCREDS_DICTION = {}\n\ndef has_cache_expired(timestamp_str, expire_in_days):\n\tnow = datetime.now()\n\tcache_timestamp = datetime.strptime(timestamp_str,DATETIME_FORMAT)\n\n\tdelta = now - cache_timestamp\n\tdelta_in_days = delta.days\n\n\tif delta_in_days > expire_in_days:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef get_from_cache(identifier, dictionary):\n\tidentifier = identifier.upper()\n\tif identifier in dictionary:\n\t\tdata_assoc_dict = dictionary[identifier]\n\t\tif has_cache_expired(data_assoc_dict['timestamp'],data_assoc_dict['expire_in_days']):\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"Cache has expired for {}\".format(identifier))\n\t\t\tdel dictionary[identifier]\n\t\t\tdata = None\n\t\telse:\n\t\t\tdata = dictionary[identifier]['values']\n\telse:\n\t\tdata = None\n\treturn data\n\ndef set_in_data_cache(identifier,data,expire_in_days):\n\tidentifier = identifier.upper()\n\tCACHE_DICTION[identifier] = {\n\t\t'values':data,\n\t\t'timestamp':datetime.now().strftime(DATETIME_FORMAT),\n\t\t'expire_in_days': expire_in_days\n\t}\n\n\twith open(CACHE_FNAME, 'w') as cache_file:\n\t\tcache_json = json.dumps(CACHE_DICTION)\n\t\tcache_file.write(cache_json)\n\ndef set_in_creds_cache(identifier, data, expire_in_days):\n\tidentifier = identifier.upper()\n\tCREDS_DICTION[identifier] = {\n\t\t'values' : data,\n\t\t'timestamp' : datetime.now().strftime(DATETIME_FORMAT),\n\t\t'expire_in_days' : expire_in_days\n\t}\n\n\twith open(CREDS_CACHE_FILE, 'w') as cache_file:\n\t\tcache_json = json.dumps(CREDS_DICTION)\n\t\tcache_file.write(cache_json)\n\n## ADDITIONAL CODE for program should go here...\n## Perhaps authentication setup, functions to get and process data, a class definition... etc.\n'''\ndef get_data_from_api(request_url,service_ident,params_diction, expire_in_days=7):\n\tident = create_request_identifier(request_url,params_diction)\n\tdata = get_from_cache(ident,CACHE_DICTION)\n\tif data:\n\t\tif DEBUG:\n\t\t\tprint(\"Loading from data cache: {}... data\".format(ident))\n\telse:\n\t\tif DEBUG:\n\t\t\tprint(\"Fetching new data from {}\".format(request_url))\n'''\n\nCLIENT_KEY = secret_data.client_key\nCLIENT_SECRET = secret_data.client_secret\n\nREQUEST_TOKEN_URL = \"https://www.tumblr.com/oauth/request_token\" \nBASE_AUTH_URL = \"https://www.tumblr.com/oauth/authorize\"\nACCESS_TOKEN_URL = \"https://www.tumblr.com/oauth/access_token\"\n\ndef get_tokens(client_key=CLIENT_KEY,client_secret=CLIENT_SECRET,request_token_url=REQUEST_TOKEN_URL,base_authorization_url=BASE_AUTH_URL,access_token_url=ACCESS_TOKEN_URL,verifier_auto=True):\n\toauth_inst = requests_oauthlib.OAuth1Session(client_key,client_secret=client_secret)\n\tfetch_response = oauth_inst.fetch_request_token(request_token_url)\n\n\tresource_owner_key = fetch_response.get('oauth_token')\n\tresource_owner_secret = fetch_response.get('oauth_token_secret')\n\n\tauth_url = oauth_inst.authorization_url(base_authorization_url)\n\twebbrowser.open(auth_url)\n\n\tif verifier_auto:\n\t\tverifier = input(\"Please input the verifier: \")\n\telse:\n\t\tredirect_result = input(\"Paste the full redirect URL here: \")\n\t\toauth_resp = oauth_inst.parse_authorization_response(redirect_result)\n\t\tverifier = oauth_resp.get('oauth_verifier')\n\n\toauth_inst = requests_oauthlib.OAuth1Session(client_key,client_secret=client_secret,resource_owner_key=resource_owner_key,resource_owner_secret=resource_owner_secret, verifier=verifier)\n\toauth_tokens = oauth_inst.fetch_access_token(access_token_url)\n\n\tresource_owner_key, resource_owner_secret = oauth_tokens.get('oauth_token'), oauth_tokens.get('oauth_token_secret')\n\treturn client_key, client_secret, resource_owner_key, resource_owner_secret, verifier\n\ndef get_tokens_from_service(service_name_ident, expire_in_days = 7):\n\tcreds_data = get_from_cache(service_name_ident, CREDS_DICTION)\n\tif creds_data:\n\t\tif DEBUG:\n\t\t\tprint(\"Loading creds from cache...\")\n\t\t\tprint()\n\n\telse:\n\t\tif DEBUG:\n\t\t\tprint(\"Fetching fresh credentials...\")\n\t\t\tprint(\"Prepare to log in via browser.\")\n\t\t\tprint()\n\t\tcreds_data = get_tokens()\n\t\tset_in_creds_cache(service_name_ident, creds_data, expire_in_days=expire_in_days)\n\treturn creds_data\n\ndef create_request_identifier(url, params_diction):\n\ttotal_ident = url + \"?api_key=\" + params_diction\n\treturn total_ident.upper()\n\ndef get_data_from_api(request_url, service_ident, params_diction, expire_in_days=7):\n\tident = create_request_identifier(request_url, params_diction)\n\tprint(ident)\n\tdata = get_from_cache(ident, CACHE_DICTION)\n\tif data :\n\t\tif DEBUG:\n\t\t\tprint(\"Loading from data cache: {}... data\".format(ident))\n\telse:\n\t\tif DEBUG:\n\t\t\tprint(\"Fetching new data from {}\".format(request_url))\n\t\tclient_key, client_secret, resource_owner_key, resource_owner_secret,verifier = get_tokens_from_service(service_ident)\n\n\t\toauth_inst = requests_oauthlib.OAuth1Session(client_key, client_secret=client_secret, resource_owner_key=resource_owner_key,resource_owner_secret=resource_owner_secret)\n\t\tresp = oauth_inst.get(request_url,params = params_diction)\n\t\tdata_str = resp.text\n\t\tdata = json.loads(data_str)\n\t\tset_in_data_cache(ident, data, expire_in_days)\n\treturn data\n\nif __name__ == \"__main__\":\n\tif not CLIENT_KEY or not CLIENT_SECRET:\n\t\tprint(\"You need to fill in client_key and client_secret in the secret_data.py file\")\n\t\texit()\n\tif not REQUEST_TOKEN_URL or not BASE_AUTH_URL:\n\t\tprint(\"You need to fill in this API's specific OAuth2 URLs in this file.\")\n\t\texit()\n\ntumblr_photo_search_baseurl = \"https://api.tumblr.com/v2/blog/newsweek.tumblr.com/posts/photo\"\ntumblr_text_search_baseurl = \"https://api.tumblr.com/v2/blog/newsweek.tumblr.com/posts/text\"\n\ntumblr_photo_result = get_data_from_api(tumblr_photo_search_baseurl,\"api_key\",CLIENT_KEY)\nphoto_dict = tumblr_photo_result\nphoto_id_list = []\nphoto_timestamp_list = []\nphoto_tags_list = []\nphoto_url_list = []\nphoto_width_list = []\nphoto_height_list = []\nphoto_caption_list = []\nphoto_note_count = []\nfor i in photo_dict['response']['posts']:\n\tphoto_id_list.append(i['id'])\n\tphoto_timestamp_list.append(i['timestamp'])\n\tphoto_tags_list.append(i['tags'])\n\tphoto_caption_list.append(i['caption'])\n\tphoto_note_count.append(i['note_count'])\n\tfor j in i['photos']:\n\t\tphoto_url_list.append(j['original_size']['url'])\n\t\tphoto_width_list.append(j['original_size']['width'])\n\t\tphoto_height_list.append(j['original_size']['height'])\nphoto_dimensions_list = []\nfor i in range(len(photo_width_list)):\n\tphoto_dimensions_list.append(str(photo_width_list[i]) + \" x \" + str(photo_height_list[i]))\n\ntumblr_text_result = get_data_from_api(tumblr_text_search_baseurl,\"api_key\",CLIENT_KEY)\ntext_dict = tumblr_text_result['response']['posts']\ntext_id_list = []\ntext_date_list = []\ntext_timestamp_list = []\ntext_title_list = []\ntext_content_list = []\ntext_tags_list = []\ntext_note_count = []\nfor i in text_dict:\n\ttext_id_list.append(i['id'])\n\ttext_date_list.append(i['date'])\n\ttext_timestamp_list.append(i['timestamp'])\n\ttext_title_list.append(i['title'])\n\ttext_tags_list.append(i['tags'])\n\ttext_note_count.append(i['note_count'])\n\tfor j in i['trail']:\n\t\ttext_content_list.append((j['content']).strip('/n'))\n\n## Make sure to run your code and write CSV files by the end of the program.\n\noutfile_photo = open(\"tumblr_photo.csv\",\"w\")\nwith open('tumblr_photo.csv',\"w\") as outfile_photo:\n\twriter = csv.writer(outfile_photo)\n\toutfile_photo.write('\"id\",\"timestamp\",\"captions\",\"tags\",\"url\",\"dimensions\",\"note count\"\\n')\n\tfor i in range(len(photo_dict['response']['posts'])):\n\t\twriter.writerow([photo_id_list[i],photo_timestamp_list[i],photo_caption_list[i],photo_tags_list[i],photo_url_list[i],photo_dimensions_list[i],photo_note_count[i]])\noutfile_photo.close()\n\noutfile_text = open(\"tumblr_text.csv\",\"w\")\nwith open('tumblr_text.csv','w') as outfile_text:\n\twriter = csv.writer(outfile_text)\n\toutfile_text.write('\"id\",\"date\",\"timestamp\",\"title\",\"tags\",\"content\",\"note count\"\\n')\n\tfor i in range(len(text_dict)):\n\t\twriter.writerow([text_id_list[i],text_date_list[i],text_timestamp_list[i],text_title_list[i],text_tags_list[i],text_content_list[i],text_note_count[i]])\noutfile_text.close()\n","sub_path":"SI507project5_code.py","file_name":"SI507project5_code.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468304642","text":"from pico2d import *\n\nimport game_framework\nimport game_world\nimport main_state\n\nfrom slave import Slave\nfrom cutyRab import CutyRab\nfrom defender import Defender\nfrom bokgy import Bokgy\nfrom meteo import Meteo\n\nclass UI:\n font = None\n\n def __init__(self):\n self.image = load_image('./resource/UI/UI.png')\n self.getSlave = load_image('./resource/UI/getSlave.png')\n self.getRecovery = load_image('./resource/UI/Recovery.png')\n self.mineral = load_image('./resource/UI/mineral.png')\n self.spirit = load_image('./resource/UI/spirit_v.png')\n self.selectBar = load_image('./resource/UI/selectBar.png')\n self.getCutyRab = load_image('./resource/UI/getCutyRab.png')\n self.getMeteo = load_image('./resource/UI/getMeteo.png')\n self.getDefender = load_image('./resource/UI/getDefender.png')\n self.getUpgrade = load_image('./resource/UI/getUpgrade.png')\n self.spirits = 0\n self.coin = 0\n\n if UI.font is None:\n UI.font = load_font('./ENCR10B.TTF', 25)\n\n def draw(self):\n self.image.draw(600, 500)\n self.selectBar.draw(245, 92.5)\n self.selectBar.draw(960, 92.5)\n self.mineral.draw(500, 95)\n self.spirit.draw(500, 135)\n\n self.getSlave.draw(105, 115)\n self.spirit.draw(80, 45)\n self.font.draw(110, 43, '15', (0, 0, 0)) # getSlave Cost\n\n self.getCutyRab.draw(199, 115)\n self.spirit.draw(177, 45)\n self.font.draw(207, 43, '70', (0, 0, 0)) # getCutyRab Cost\n\n self.getMeteo.draw(292, 115)\n self.spirit.draw(271, 45)\n self.font.draw(301, 43, '50', (0, 0, 0)) # getMeteo Cost\n\n\n self.getRecovery.draw(820, 115)\n self.mineral.draw(795, 45)\n self.font.draw(818, 43, '100', (0, 0, 0)) # getRecovery Cost\n\n self.getDefender.draw(914, 115)\n self.mineral.draw(888, 45)\n self.font.draw(912, 43, '200', (0, 0, 0)) # getDefender Cost\n\n self.getUpgrade.draw(1007, 115)\n self.mineral.draw(981, 45)\n self.font.draw(995, 43, '1500', (0, 0, 0)) # getUpgrade Cost\n\n self.font.draw(530, 135, 'Spirit: %d' % self.spirits, (0, 0, 0))\n self.font.draw(530, 95, 'Coin: %d' % self.coin, (0, 0, 0))\n\n #draw_rectangle(775,70,865,160)\n\n\n\n def update(self):\n pass\n\n def call(self, mosX, mosY):\n if 60 < mosX < 150 and 70 < mosY < 160 and self.spirits >= 15:\n slave = Slave()\n self.spirits -= 15\n game_world.add_object(slave, 1)\n\n elif 155 < mosX < 245 and 70 < mosY < 160 and self.spirits >= 70:\n cutyRab = CutyRab()\n cutyRabs = main_state.get_cutyRabs()\n self.spirits -= 70\n game_world.add_object(cutyRab, 1)\n cutyRabs.append(cutyRab)\n\n elif 250 < mosX < 340 and 70 < mosY < 160 and self.spirits >= 50:\n meteo = Meteo()\n self.spirits -= 50\n game_world.add_object(meteo, 1)\n\n elif 775 < mosX < 865 and 70 < mosY < 160 and self.coin >= 100:\n castle = main_state.get_castle()\n self.coin -= 100\n castle.hp += 100\n if castle.hp > castle.maxHP:\n castle.maxHP = castle.hp\n\n elif 870 < mosX < 960 and 70 < mosY < 160 and self.coin >= 200:\n defender = Defender()\n self.coin -= 200\n game_world.add_object(defender, 0)\n\n elif 965 < mosX < 1055 and 70 < mosY < 160 and self.coin >= 1500:\n bokgy = main_state.get_bokgy()\n if not bokgy.enhance:\n self.coin -= 1500\n bokgy.upgrade()\n\n\n\n def handle_event(self, event):\n pass\n\n","sub_path":"userInterface.py","file_name":"userInterface.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"421073036","text":"from django import forms\nfrom django.contrib.auth.models import Group\nfrom django.views.generic.edit import CreateView\nfrom django.views.generic.edit import DeleteView\nfrom django.views.generic.edit import UpdateView\nfrom django.views.generic import DetailView\nfrom django.views.generic import ListView\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.db.models import Count\n\nfrom general.views import BaseView\nfrom general.models import Task\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom userprofile.models import UserProfile\n\nimport constants as co\n\n\nclass ProfileForm(forms.ModelForm):\n def __init__(self, group_name=None, user_id=None, request=None, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n self.group_name = group_name\n self.user_id = user_id\n self.request = request\n if user_id:\n self.fields['password'].required = False\n self.fields['username'].required = False\n\n class Meta:\n model = UserProfile\n fields = ['username', 'password', 'first_name', 'last_name', 'email', 'gender',\n 'country', 'phone', 'site']\n\n def clean_site(self):\n \"\"\"Specifies default Host parameter.\"\"\"\n return self.request.get_host()\n \n def save(self, commit=True):\n if self.user_id:\n user = UserProfile.objects.get(pk=self.user_id)\n user.first_name=self.cleaned_data['first_name']\n user.last_name=self.cleaned_data['last_name']\n user.email=self.cleaned_data['email']\n user.gender=self.cleaned_data['gender']\n user.country=self.cleaned_data['country']\n user.phone=self.cleaned_data['phone']\n else:\n user = UserProfile.objects.create_user(**self.cleaned_data)\n user.groups.add(Group.objects.get(name=self.group_name))\n user.save()\n return user\n\n\nclass CreateProfileView(BaseView, CreateView):\n module_name = ''\n form_class = ProfileForm\n queryset = UserProfile.objects.all()\n template_name = 'userprofile/edit.html'\n group_name = ''\n\n def get_form_kwargs(self):\n kwargs = super(CreateProfileView, self).get_form_kwargs()\n kwargs['group_name'] = self.group_name\n kwargs['user_id'] = None\n kwargs['request'] = self.request\n return kwargs\n\n\nclass ListProfileView(BaseView, ListView):\n queryset = UserProfile.objects.all()\n template_name = 'userprofile/index.html'\n\n def get_context_data(self, **kwargs):\n context = super(ListProfileView, self).get_context_data(**kwargs)\n tasks_per_user = dict(Task.objects.all().values('owner').annotate(\n tasks=Count('owner')).values_list('owner', 'tasks'))\n # Count of user's tasks.\n context['user_tasks'] = tasks_per_user\n return context\n\n\nclass DetailProfileView(BaseView, DetailView):\n template_name = 'userprofile/detail.html' \n queryset = UserProfile.objects.all()\n\n def user_id(self):\n return self.get_object().pk\n\n\nclass UpdateProfileView(BaseView, UpdateView):\n template_name = 'userprofile/edit.html'\n form_class = ProfileForm\n queryset = UserProfile.objects.all()\n group_name = ''\n\n def get_form_kwargs(self):\n kwargs = super(UpdateProfileView, self).get_form_kwargs()\n kwargs['group_name'] = self.group_name\n kwargs['user_id'] = self.user_id()\n kwargs['request'] = self.request\n return kwargs\n\n def user_id(self):\n return self.get_object().pk\n\n\nclass RemoveProfileView(BaseView, DeleteView):\n queryset = UserProfile.objects.all()\n template_name = 'userprofile/delete.html'\n\n def get_success_url(self):\n group = self.object.get_group()\n if group == co.ADMIN_GROUP:\n return reverse_lazy('admins')\n elif group == co.EDITOR_GROUP:\n return reverse_lazy('editors')\n elif group == co.WRITER_GROUP:\n return reverse_lazy('writers')\n else:\n return reverse_lazy('customers')\n\n def form_invalid(self, form):\n messages.add_message(self.request, messages.ERROR, str(form.errors))\n return HttpResponseRedirect(self.get_success_url())\n\n def user_id(self):\n return self.get_object().pk\n\n","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"556467596","text":"import urllib2\nimport json\nimport logging\nimport operator\n\nfrom mycls.ntcls import User\nfrom mycls import dbcls\n\nfrom google.appengine.api import memcache\n\n\ndef call_api(id, api, ext):\n api_dict = {'LOL': [\"http://api.captainteemo.com/\", {'USER': 'player/na/%s', 'LEAGUES': 'player/na/%s/leagues', 'STATUS': 'player/na/%s/ingame', 'HONOR': 'player/na/%s/honor', 'IP': 'player/na/%s/influence_points'}],\n 'TWITCH': [\"https://api.twitch.tv/kraken/\", {'ACTIVE': 'streams/%s', 'CHANNEL': 'channels/%s'}]}\n url = api_dict[api][0] + api_dict[api][1][ext] % id\n try:\n r = urllib2.urlopen(url)\n except:\n logging.error(('FAILED API CALL FOR ' + api + ' API - EXT. ' + ext + 'USER ' + id).upper())\n return None\n return json.loads(r.read())\n\n\ndef update_results(key, sort, func):\n userList, results = get_user_list(), []\n if func == 'HONOR':\n results = memcache.get('Honor')\n else:\n for user in userList:\n results.append(func(user))\n results = sorted(results, key=operator.attrgetter(sort), reverse=True)\n if results is not None:\n memcache.set(key, results)\n logging.info(key + ' Updated')\n\n\ndef get_user_list(update=False):\n userList = memcache.get(\"userList\")\n if userList is None or update:\n cachedUsers = []\n userList = dbcls.User.all()\n logging.info('USERS HAVE BEEN LOADED FROM DATABASE!')\n for user in userList:\n cachedUsers.append(User(user.internalName, user.displayName, user.iconID))\n memcache.set(\"userList\", cachedUsers)\n return cachedUsers\n return userList\n\n\ndef rawify(text):\n return text.replace(\" \", \"\").lower()\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"411425624","text":"from tkinter import *\nfrom tkinter import messagebox\nimport tkinter.ttk as ttk\nimport subprocess\nimport sys\nimport time\nimport io\n\ntry:\n p=subprocess.Popen([r'C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe', '-ExecutionPolicy', 'Unrestricted', '-windowstyle', 'hidden', r'C:\\Users\\odss9\\Desktop\\test.ps1'])\n p.wait()\nexcept:\n pass\n\n\n\nmaster=Tk()\nmaster.title(\"THE AD\")\nmaster.geometry(\"500x500\")\nmaster.resizable(1,0)\n\n\nLabel(master, text=\"First Name\").grid(row=0, column=0,sticky=W)\nLabel(master, text=\"Last Name\").grid(row=0, column=3, padx=10)\nLabel(master, text=\"SamAccountName\").grid(row=2, column=0, pady=10,sticky=W)\nLabel(master, text=\"Comapny Name\").grid(row=3, column=0, pady=10,sticky=W)\nLabel(master, text=\"Email\").grid(row=2, column=3, padx=10)\nLabel(master, text=\"Groups\").grid(row=3, column=3, pady=10)\n\ne1 = Entry(master)\ne2 = Entry(master)\ne3 = Entry(master)\ne4 = Entry(master)\n\n\ne1.grid(row=0, column=1)\ne2.grid(row=0, column=4)\ne3.grid(row=2, column=1)\ne4.grid(row=2, column=4)\n\n\ntkvar = StringVar(master)\ntkvar1= StringVar(master)\n\n##Changes to Tuple collection from Dicitonary in order to maaintain sequence order\n\nlist1=[]\n\nwith io.open(r'C:\\Users\\odss9\\Desktop\\cc.txt', 'rb') as f:\n myNames = [line.strip() for line in f]\n\np1=subprocess.Popen([r'C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe', '-ExecutionPolicy', 'Unrestricted', '-windowstyle', 'hidden', r'C:\\Users\\odss9\\Desktop\\test1.ps1 $x'])\np1.wait()\n\nwith io.open(r'C:\\Users\\odss9\\Desktop\\cc1.txt', 'rb') as f:\n myNames1 = [line.strip() for line in f]\n\n\ntkvar.set(myNames[0])\ntkvar1.set(myNames1[0])\n\ndef create():\n print(x)\n messagebox.showinfo(title='Status', message=\"User created Successfully.\")\n \n\n\nb1 = Button(master, text=\"Create User\", command=create).grid(row=5,column=0,sticky=W,pady=50)\n\nb2 = ttk.OptionMenu(master, tkvar, myNames[0], *myNames).grid(row=3,column=1,padx=30)\n\nb3 = ttk.OptionMenu(master, tkvar1, myNames1[0], *myNames1).grid(row=3, column=4,padx=10)\n \nx=\"\"\n\ndef change_dropdown(*args):\n print(tkvar.get())\n global x\n x=tkvar.get()\n return x \n \n\n\n# link function to change dropdown\n\ntkvar.trace('w', change_dropdown)\n\nmaster.mainloop()\n","sub_path":"Nov3/template1.py","file_name":"template1.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"144772748","text":"# Задание-1: уравнение прямой вида y = kx + b задано в виде строки.\n# Определить координату y точки с заданной координатой x.\n\nequation = 'y = -12x + 11111140.2121'\nx = 2.5\n# вычислите и выведите y\n\n\n# Задание-2: Дата задана в виде строки формата 'dd.mm.yyyy'.\n# Проверить, корректно ли введена дата.\n# Условия корректности:\n# 1. День должен приводиться к целому числу в диапазоне от 1 до 30(31)\n# (в зависимости от месяца, февраль не учитываем)\n# 2. Месяц должен приводиться к целому числу в диапазоне от 1 до 12\n# 3. Год должен приводиться к целому положительному числу в диапазоне от 1 до 9999\n# 4. Длина исходной строки для частей должна быть в соответствии с форматом \n# (т.е. 2 символа для дня, 2 - для месяца, 4 - для года)\n\n# Пример корректной даты\ndate = '01.11.1985'\n\n# Примеры некорректных дат\ndate = '01.22.1001'\ndate = '1.12.1001'\ndate = '-2.10.3001'\n\n\n# Задание-3: \"Перевёрнутая башня\" (Задача олимпиадного уровня)\n#\n# Вавилонцы решили построить удивительную башню —\n# расширяющуюся к верху и содержащую бесконечное число этажей и комнат.\n# Она устроена следующим образом — на первом этаже одна комната,\n# затем идет два этажа, на каждом из которых по две комнаты, \n# затем идёт три этажа, на каждом из которых по три комнаты и так далее:\n# ...\n# 12 13 14\n# 9 10 11\n# 6 7 8\n# 4 5\n# 2 3\n# 1\n#\n# Эту башню решили оборудовать лифтом --- и вот задача:\n# нужно научиться по номеру комнаты определять,\n# на каком этаже она находится и какая она по счету слева на этом этаже.\n#\n# Входные данные: В первой строчке задан номер комнаты N, 1 ≤ N ≤ 2 000 000 000.\n#\n# Выходные данные: Два целых числа — номер этажа и порядковый номер слева на этаже.\n#\n# Пример:\n# Вход: 13\n# Выход: 6 2\n#\n# Вход: 11\n# Выход: 5 3\n\n#1\n\nequation = 'y = -12x + 11111140.2121'\nx = 2.5\nb = 1\n\nequation = equation.split(' ')\nprint(equation)\n\nfor num in equation:\n if num == 'y':\n continue\n elif num == '=':\n continue\n elif num == '+':\n operation = True\n continue\n elif num == \"-\":\n operation = False\n continue\n elif num.find('x') >= 0:\n k = num.split('x')\n k = k[0]\n k = float(k)\n else:\n b = num\n b = float(b)\n\n\nif operation:\n function = k*x + b\n print('{} = {} * {} + {}'.format(function, k, x, b))\nelse:\n function = k*x - b\n print('{} = {} * {} - {}'.format(function, k, x, b))\n\n\n\n#2\n\ndate = '01.11.1985'\n\nmonth_max_len = 0\nmonth_name = ''\nerror = 0\nerror_d = 0\nerror_y = 0\n\nmonths = [['01','Январь',31],['02','Февраль',28],['03','Март',31],['04','Апрель',30],['05','Май',31],['06','Июнь',30],['07','Июль',31],['08','Август',31],['09','Сентябрь',30],['10','Октябрь',31],['11','Ноябрь',30],['12','Декабрь',31]]\n\ndate = date.split('.')\n\nfor month in months:\n if month[0] == date[1]:\n A = True\n month_max_len = month[2]\n month_name = month[1]\n break\n else:\n error = error +1\nif error == 12:\n print('Ошибка ввода месяца')\n\nif int(date[0]) < 1 | int(date[0]) > month_max_len | len(date[0]) != 2:\n print('Ошибка ввода дня')\n error_d = 1\nelif int(date[2]) < 1 | int(date[2]) > 9999 | len(date[2]) != 4:\n print('Ошибка ввода года')\n error_y = 1\nelif (error != 12) | (error_y != 1) | (error_d != 1):\n print('GOOD')\n\n\n#3\n\nimport math\n\nmax_flat = 2 * 10 ** 9\n\nN_Y = True\ndata = []\nstart_floor_of_cell = 1\nsum_flats = 0\ncell = 1\nposition = 0\nremains = 0\nfloor = 0\nmax_flat_in_cell = 0\ncount = 2\n\nwhile sum_flats <= max_flat:\n sum_flats = sum_flats + cell ** 2\n start_floor_of_cell = start_floor_of_cell + cell - 1\n data[len(data):] = [cell, start_floor_of_cell, sum_flats]\n cell = cell + 1\n\nwhile N_Y:\n number = int(input('Введите номер квартиры от 1 до 2*10^9 : '))\n\n while ((number <= 1) | (number >= 2 * 10 ** 9)):\n print('Ошибка')\n number = int(input('Введите номер квартиры от 1 до 2*10^9 : '))\n\n while number > max_flat_in_cell:\n max_flat_in_cell = data[count]\n count = count + 3\n\n cell_of_number = data[count - 5]\n start_floor_of_number = data[count - 4]\n\n floor = start_floor_of_number + (cell_of_number - 1 - ((max_flat_in_cell - number) // cell_of_number))\n remains = ((max_flat_in_cell - number) % cell_of_number)\n\n if remains > 0:\n position = cell_of_number - remains\n\n else:\n position = cell_of_number\n\n print('На {} этаже {} по счету слева'.format(floor, position))\n while True:\n answer = input('Повторим N \\ Y ? : ')\n if (answer == 'N') | (answer == 'n'):\n N_Y = False\n break\n elif (answer == 'y') | (answer == 'Y'):\n N_Y = True\n break\n else:\n print('Ошибка')\n\n \n","sub_path":"lesson02/home_work/hw02_hard.py","file_name":"hw02_hard.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"200080309","text":"from torch_geometric.data import InMemoryDataset\nfrom torch_geometric.utils import to_undirected\nimport torch_geometric.transforms as T\nimport torch_geometric\nimport pandas as pd\nimport shutil, os\nimport os.path as osp\nimport torch\nimport numpy as np\nimport networkx as nx\nimport random\nfrom copy import deepcopy\n\nclass EmailDataset(InMemoryDataset):\n def __init__(self, root = 'dataset', transform=None, pre_transform=None):\n '''\n - name (str): name of the dataset\n - root (str): root directory to store the dataset folder\n ''' \n self.root = root\n self.edge_file = \"email-Eu-core-temporal.txt\"\n self._num_nodes = 986\n self._num_static_edges = 24929\n self._num_temporal_edges = 332334\n \n super(EmailDataset, self).__init__(self.root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n self.edge_set = torch.load(self.processed_paths[1])\n self.edge_split = torch.load(self.processed_paths[2])\n\n @property\n def raw_file_names(self):\n return [self.edge_file]\n\n @property\n def processed_file_names(self):\n return ['email_geometric_data_processed.pt', 'email_edge_set.pt', 'email_edge_split.pt']\n\n def process(self):\n np.random.seed(42)\n edge_df = pd.read_csv(self.edge_file, sep=' ', header=None, names=['src', 'dst', 'unixts']).sort_values('unixts')\n uniq_edge_df = edge_df[~edge_df.duplicated(subset=['src', 'dst'], keep='last')]\n \n G = nx.OrderedDiGraph()\n for idx, row in uniq_edge_df.iterrows():\n src, dst, timestamp = row.loc['src'], row.loc['dst'], row['unixts']\n G.add_edge(src, dst, time=timestamp)\n \n nodes = np.unique(np.concatenate([edge_df['src'], edge_df['dst']]))\n for node in nodes:\n src_times = np.array(edge_df[edge_df['src'] == node]['unixts'])\n dst_times = np.array(edge_df[edge_df['dst'] == node]['unixts'])\n all_times = np.concatenate([src_times, dst_times])\n # randomly select 256 or length of total times\n num_times = min(len(all_times), 256)\n feature = np.concatenate([np.random.permutation(all_times)[:num_times], np.zeros(256-num_times)])\n mask = np.array([True] * num_times + [False] * (256-num_times))\n \n G.nodes[node][\"feature\"] = feature\n G.nodes[node][\"mask\"] = mask\n \n G = nx.convert_node_labels_to_integers(G, label_attribute='original')\n \n data_edges = list(G.edges(data=True))\n data_edges = [(u, v, t['time']) for u, v, t in data_edges]\n data_edges = sorted(data_edges, key=lambda x: x[2])\n edges = np.array([(u, v) for u, v, t in data_edges])\n \n self.edge_set = set(tuple(e) for e in edges)\n assert len(self.edge_set) == self._num_static_edges\n torch.save(self.edge_set, self.processed_paths[1])\n \n edge_split = self.make_edge_split(self.edge_set, edges)\n torch.save(edge_split, self.processed_paths[2])\n \n edge_index = edge_split['train']['edge'].t()\n data = {}\n \n for i, (_, feat_dict) in enumerate(G.nodes(data=True)):\n for key, value in feat_dict.items():\n data[key] = [value] if i == 0 else data[key] + [value]\n \n for key, item in data.items():\n item = np.array(item).astype(np.float32)\n data[key] = torch.tensor(item)\n\n data['edge_index'] = edge_index.view(2, -1)\n data['num_nodes'] = G.number_of_nodes()\n data['x'] = data['feature']\n del data['feature']\n assert data['num_nodes'] == self._num_nodes\n data = torch_geometric.data.Data.from_dict(data)\n \n data = data if self.pre_transform is None else self.pre_transform(data)\n\n print('Saving...')\n torch.save(self.collate([data]), self.processed_paths[0])\n \n def get_edge_split(self):\n return self.edge_split\n \n def make_edge_split(self, edge_set, edges):\n np.random.seed(42)\n random.seed(42)\n n = self._num_static_edges\n edges = torch.tensor(edges)\n \n train = edges[:int(0.8*n)]\n valid = edges[int(0.8*n):int(0.9*n)]\n test = edges[int(0.9*n):]\n \n exist = deepcopy(self.edge_set)\n \n valid_neg = []\n for i, p in enumerate(pair_generator(range(self._num_nodes), exist)):\n if i == 2500:\n break\n valid_neg.append(p)\n \n exist |= set(valid_neg)\n \n valid_neg = torch.from_numpy(np.array(valid_neg))\n \n test_neg = []\n for i, p in enumerate(pair_generator(range(self._num_nodes), exist)):\n if i == 2500:\n break\n test_neg.append(p)\n \n test_neg = torch.from_numpy(np.array(test_neg))\n \n return {\"train\": {\"edge\": train}, \n \"valid\": {\"edge\": valid, \"edge_neg\": valid_neg}, \n \"test\": {\"edge\": test, \"edge_neg\": test_neg}}\n\n def __repr__(self):\n return '{}()'.format(self.__class__.__name__)\n\ndef pair_generator(numbers, existing_pairs=set()): \n \"\"\"Return an iterator of random pairs from a list of numbers.\"\"\" \n # Keep track of already generated pairs \n used_pairs = existing_pairs\n\n while True: \n pair = tuple(random.sample(numbers, 2))\n if pair not in used_pairs:\n used_pairs.add(pair)\n yield pair\n\nif __name__ == \"__main__\":\n dataset = EmailDataset()\n print(dataset.get_edge_split())\n data = dataset[0]\n print(data)\n print(len(dataset.edge_set))\n breakpoint()","sub_path":"email_data/.ipynb_checkpoints/data-checkpoint.py","file_name":"data-checkpoint.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"531923217","text":"import os\n\nimport django\nimport pytest\n\n\ndef pytest_configure():\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')\n django.setup()\n\n\n@pytest.fixture\ndef json_error():\n data = {'success': False, 'error': {'info': 'Invalid json'}}\n return data\n\n\n@pytest.fixture\ndef json_success():\n data = {\n \"summary_rates\": [\n {\n \"country_code\": \"US\",\n \"country\": \"United States\",\n \"region_code\": \"CA\",\n \"region\": \"California\",\n \"minimum_rate\": {\n \"label\": \"State Tax\",\n \"rate\": 0.065\n },\n \"average_rate\": {\n \"label\": \"Tax\",\n \"rate\": 0.0827\n }\n },\n {\n \"country_code\": \"CA\",\n \"country\": \"Canada\",\n \"region_code\": \"BC\",\n \"region\": \"British Columbia\",\n \"minimum_rate\": {\n \"label\": \"GST\",\n \"rate\": 0.05\n },\n \"average_rate\": {\n \"label\": \"PST\",\n \"rate\": 0.12\n }\n },\n {\n \"country_code\": \"UK\",\n \"country\": \"United Kingdom\",\n \"region_code\": None,\n \"region\": None,\n \"minimum_rate\": {\n \"label\": \"VAT\",\n \"rate\": 0.2\n },\n \"average_rate\": {\n \"label\": \"VAT\",\n \"rate\": 0.2\n }\n }\n ]\n }\n return data\n\n\n@pytest.fixture\ndef json_types_success():\n data = {\n \"categories\": [\n {\n \"name\": \"Clothing\",\n \"product_tax_code\": \"20010\",\n \"description\": \" All human wearing apparel suitable for general use\"\n },\n {\n \"name\": \"Software as a Service\",\n \"product_tax_code\": \"30070\",\n \"description\": \"Pre-written software, delivered electronically, but access remotely.\"\n },\n {\n \"name\": \"Digital Goods\",\n \"product_tax_code\": \"31000\",\n \"description\": \"Digital products transferred electronically, meaning obtained by the purchaser by means other than tangible storage media.\"\n },\n {\n \"name\": \"Candy\",\n \"product_tax_code\": \"40010\",\n \"description\": \"Candy and similar items\"\n },\n {\n \"name\": \"Supplements\",\n \"product_tax_code\": \"40020\",\n \"description\": \"Non-food dietary supplements\"\n },\n {\n \"name\": \"Food & Groceries\",\n \"product_tax_code\": \"40030\",\n \"description\": \"Food for humans consumption, unprepared\"\n },\n {\n \"name\": \"Soft Drinks\",\n \"product_tax_code\": \"40050\",\n \"description\": \"Soft drinks, soda, and other similar beverages. Does not include fruit juices and water.\"\n },\n {\n \"name\": \"Bottled Water\",\n \"product_tax_code\": \"40060\",\n \"description\": \"Bottled, drinkable water for human consumption.\"\n },\n {\n \"name\": \"Prepared Foods\",\n \"product_tax_code\": \"41000\",\n \"description\": \"Foods intended for on-site consumption. Ex. Restaurant meals.\"\n },\n {\n \"name\": \"Non-Prescription\",\n \"product_tax_code\": \"51010\",\n \"description\": \"Drugs for human use without a prescription\"\n },\n {\n \"name\": \"Prescription\",\n \"product_tax_code\": \"51020\",\n \"description\": \"Drugs for human use with a prescription\"\n },\n {\n \"name\": \"Books\",\n \"product_tax_code\": \"81100\",\n \"description\": \"Books, printed\"\n },\n {\n \"name\": \"Textbook\",\n \"product_tax_code\": \"81110\",\n \"description\": \"Textbooks, printed\"\n },\n {\n \"name\": \"Religious Books\",\n \"product_tax_code\": \"81120\",\n \"description\": \"Religious books and manuals, printed\"\n },\n {\n \"name\": \"Magazines & Subscriptions\",\n \"product_tax_code\": \"81300\",\n \"description\": \"Periodicals, printed, sold by subscription\"\n },\n {\n \"name\": \"Magazine\",\n \"product_tax_code\": \"81310\",\n \"description\": \"Periodicals, printed, sold individually\"\n },\n {\n \"name\": \"Other Exempt\",\n \"product_tax_code\": \"99999\",\n \"description\": \"Item is exempt\"\n }\n ]\n }\n return data\n\n\n@pytest.fixture\ndef json_success_for_address():\n data = {\n \"rate\": {\n \"zip\": \"05495-2086\",\n \"country\": \"US\",\n \"country_rate\": \"0.0\",\n \"state\": \"VT\",\n \"state_rate\": \"0.06\",\n \"county\": \"CHITTENDEN\",\n \"county_rate\": \"0.0\",\n \"city\": \"WILLISTON\",\n \"city_rate\": \"0.0\",\n \"combined_district_rate\": \"0.01\",\n \"combined_rate\": \"0.07\",\n \"freight_taxable\": True\n }\n }\n return data\n\n\n@pytest.fixture\ndef json_success_for_order():\n data = {\n \"tax\": {\n \"order_total_amount\": 16.5,\n \"shipping\": 1.5,\n \"taxable_amount\": 15,\n \"amount_to_collect\": 1.35,\n \"rate\": 0.09,\n \"has_nexus\": True,\n \"freight_taxable\": False,\n \"tax_source\": \"destination\",\n \"breakdown\": {\n \"taxable_amount\": 15,\n \"tax_collectable\": 1.35,\n \"combined_tax_rate\": 0.09,\n \"state_taxable_amount\": 15,\n \"state_tax_rate\": 0.0625,\n \"state_tax_collectable\": 0.94,\n \"county_taxable_amount\": 15,\n \"county_tax_rate\": 0.0025,\n \"county_tax_collectable\": 0.04,\n \"city_taxable_amount\": 0,\n \"city_tax_rate\": 0,\n \"city_tax_collectable\": 0,\n \"special_district_taxable_amount\": 15,\n \"special_tax_rate\": 0.025,\n \"special_district_tax_collectable\": 0.38,\n \"line_items\": [\n {\n \"id\": \"1\",\n \"taxable_amount\": 15,\n \"tax_collectable\": 1.35,\n \"combined_tax_rate\": 0.09,\n \"state_taxable_amount\": 15,\n \"state_sales_tax_rate\": 0.0625,\n \"state_amount\": 0.94,\n \"county_taxable_amount\": 15,\n \"county_tax_rate\": 0.0025,\n \"county_amount\": 0.04,\n \"city_taxable_amount\": 0,\n \"city_tax_rate\": 0,\n \"city_amount\": 0,\n \"special_district_taxable_amount\": 15,\n \"special_tax_rate\": 0.025,\n \"special_district_amount\": 0.38\n }\n ]\n }\n }\n }\n return data\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"146645417","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom urllib.parse import urlparse\nimport re\nimport csv\nfrom pathlib import Path\nimport traceback\nimport sys\n\nproxies = {'http': 'http://localhost:1080'}\nBS_PARSER = 'html.parser'\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,ja;q=0.6',\n 'Cache-Control': 'max-age=0',\n 'Host': 'bj.ganji.com',\n 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'\n}\ncommunity_attr_key_list = ['小区名称', '房价', '区域商圈:', '详细地址:', '建筑类型:', '物业费用:',\n '产权类别:', '容积率:', '总户数:', '绿化率:', '建��年代:', '停车位:', '开发商:', '物业公司:',\n '经度', '纬度']\n# 更改当前目录为文件锁在目录\ncurrent_path = os.path.abspath(__file__)\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(current_dir)\n\n\ndef fetch(url, **kwargs):\n url = str(url)\n if url.startswith('//'):\n url = url.replace('//', 'http://', 1)\n while True:\n try:\n # r = requests.get(url, timeout=10, proxies=proxies, **kwargs)\n r = requests.get(url, timeout=10, **kwargs)\n while r.text.strip() == '':\n print('请求内容为空,重新请求')\n r = requests.get(url, timeout=10, **kwargs)\n while '进行验证码校验' in r.text:\n print(url)\n print('程序暂停,请访问链接进行人机验证!')\n os.system(\"pause\")\n r = requests.get(url, timeout=10, **kwargs)\n return r\n except TimeoutError as e:\n print(url)\n print(e)\n print('访问超时')\n except Exception as e:\n print(url)\n print(e)\n print('获取页面内容时出错了')\n\n\ndef geocode(geo_name, city=''):\n url = (\n f'http://api.map.baidu.com/geocoding/v3/?address={geo_name}'\n f'&city={city}&output=json&ak=edUmWCLHPjvuY0OXdRrNna38b4GX30VV'\n )\n r = requests.get(url)\n r = r.json()\n if r['status'] != 0:\n print(r)\n raise Exception('这里有个异常,看看吧')\n return [r['result']['location']['lng'], r['result']['location']['lat']]\n\n\ndef main(city):\n community_list = []\n city_file_path = Path(city+'.csv')\n is_exist = False\n if city_file_path.exists():\n is_exist = True\n with open(str(city_file_path), 'r') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n community_list.append(row[0])\n\n city_file = open(str(city_file_path), 'a+', newline='')\n writer = csv.writer(city_file)\n if not is_exist:\n writer.writerow(community_attr_key_list)\n\n # 赶集网城市选择首页\n index_r = fetch('http://www.ganji.com/index.htm')\n\n # html.parser、lxml HTML、lxml XML、html5lib解析器区别\n index_bs = BeautifulSoup(index_r.text, BS_PARSER)\n index_a = index_bs.find('a', text=city)\n\n # 对应城市的子域名\n index_href = index_a['href']\n\n\n page = 0\n # TODO:在这里改变链接,来爬取不同区域的房租列表\n list_href = index_href + 'chuzu/pn1'\n\n # --------\n list_r = requests.get(index_href + 'chuzu/pn1')\n list_bs = BeautifulSoup(list_r.text, BS_PARSER)\n subarea_list = [[x['href'] + 'pn1',x.get_text(strip=True)] for x in list_bs.select('.thr-list a')][1:]\n for subarea in subarea_list:\n list_href = subarea[0]\n while True:\n # 出租页面列表\n page += 1\n # 不能直接访问chuzu,学校IP访问次数限制。浏览器代理访问所以不受影响\n # 翻页的时候停一下\n time.sleep(0.5)\n\n list_r = fetch(list_href)\n list_bs = BeautifulSoup(list_r.text, BS_PARSER)\n\n time.sleep(1)\n\n # ...\n \n # 链接列表\n list_a_list = list_bs.select('.ershoufang-list dd.dd-item.title a')\n\n # print(list_bs.select('.ershoufang-list'))\n\n # .address-eara:last-child\n list_list = [[l.select_one('dd.dd-item.title a'),\n l.select_one('.address-eara').get_text(strip=True).replace('...', '')]\n for l in list_bs.select('.ershoufang-list')]\n for [list_a, list_community] in list_list:\n # 暂停进行人机验证,不挂起\n # time.sleep(0.5)\n if list_community in community_list:\n print(f'{list_community}已在列表中,跳过')\n continue\n try:\n # &key=¶ms=ra 这里乱码\n item_href = list_a['href']\n # item_href = re.compile(r'entinfo=(\\d+)_0').findall(item_href)[0]\n item_href = re.compile('key=.*&').sub('', item_href)\n\n # item_r = fetch('https://jxjump.58.com/service?target=FCADV8oV3os7xtAj_6pMK7rUlrztK2I2XgGp_ir8Za1X4Ap_Zpk1zEffDjvZrKof8T9H1Etf3gzeNV-h7xP23s1aDYDtaGQMt0V1-x_7k2_EqtKo7wITX0Rw7HMRl9KE7zsZb7bPyKFQTpiGJTuRsjMqTHZzOMkNr3kS6ZtAUEnEhkw5aqSx4Uvrv63dFoqNyNhGQgafNqWfm3ZKqr6XLq3dz06ApDR5hWRpH8YZE29olTwTVbjAbE8amfg&pubid=0&apptype=10&psid=150065938206327100784721924&entinfo=40209270904961_0&cookie=|||9094ac41d9b2fdad39356072c0cc78a8&fzbref=0&key=¶ms=rankbusitime0099^desc&gjcity=bj')\n # item_r = fetch('https://jxjump.58.com/service?target=FCADV8oV3os7xtAj_6pMK7rUlrztK2I2XgGp_ir8Za1X4Ap_Zpk1zEffDjvZrKof8T9H1Etf3gzeNV-h7xP23s1aDYDtaGQMt0V1-x_7k2_EqtKo7wITX0Rw7HMRl9KE7zsZb7bPyKFQTpiGJTuRsjMqTHZzOMkNr3kS6ZtAUEnEhkw5aqSx4Uvrv63dFoqNyNhGQgafNqWfm3ZKqr6XLq3dz06ApDR5hWRpH8YZE29olTwTVbjAbE8amfg&pubid=0&apptype=10&psid=150065938206327100784721924&entinfo=40209270904961_0&gjcity=bj')\n item_r = fetch(item_href)\n item_bs = BeautifulSoup(item_r.text, BS_PARSER)\n\n # 小区链接\n item_a = item_bs.select_one(\n 'ul.er-list-two.f-clear li.er-item.f-fl .content a')\n if item_a is None:\n print(item_href)\n # 在自己封装的fetch函数中处理验证问题\n # if '验证' in item_r.text:\n # print('程序暂停,请访问链接进行人机验证!')\n # os.system(\"pause\")\n # # 这个小区也不放过\n # item_r = fetch(item_href)\n # item_bs = BeautifulSoup(item_r.text, BS_PARSER)\n # item_a = item_bs.select_one(\n # 'ul.er-list-two.f-clear li.er-item.f-fl .content a')\n \n print('此房的小区无信息')\n continue\n community_r = fetch(item_a['href'])\n community_bs = BeautifulSoup(community_r.text, BS_PARSER)\n if community_bs.select_one('.card-top .card-title') is None:\n print('小区页面错误:')\n print(community_r.text)\n print(item_a['href'])\n\n community_title = community_bs.select_one(\n '.card-top .card-title')['title']\n\n print(\n f'区域:{subarea[1]},页数:{page},已搜索{len(community_list)}个小区,找到小区:{community_title}')\n if community_title in community_list:\n continue\n # 字符串也是子节点\n community_price = community_bs.select_one(\n 'span.price').contents[0]\n # 是L不是1\n # 可以使用双层列表生成式\n # community_attr_list = [s for attr in community_bs.select('li.item.f-fl .content') for s in attr.stripped_strings]\n # 属性值\n community_attr_value_list = [re.sub(r'\\s+', ' ', attr.get_text(strip=True))\n for attr in community_bs.select('li.item.f-fl .content')]\n # 属性键\n # community_attr_key_list = [re.sub(r'\\s+', '', attr.get_text(strip=True))\n # for attr in community_bs.select('li.item.f-fl :first-child')]\n # print(community_attr_key_list)\n # ['区域商圈:', '详细地址:', '建筑类型:', '物业费用:', '产权类别:', '容积率:', '总户数:', '绿化率:', '建筑年代:', '停车位:', '开发商:', '物业公司:']\n\n # 根据地址获取经纬度信息\n address = city + community_attr_value_list[1] + community_title\n\n writer.writerow([community_title, community_price,\n *community_attr_value_list, *geocode(address, city)])\n community_list.append(community_title)\n except Exception as e:\n print(e)\n ex_type, ex_val, ex_stack = sys.exc_info()\n print(ex_type)\n print(ex_val)\n for stack in traceback.extract_tb(ex_stack):\n print(stack)\n continue\n # raise Exception('停停停!')\n # 下一页的链接\n list_href = list_bs.find('a', text='下一页')\n print(list_href)\n if list_href is None:\n break\n list_href = list_href['href']\n # --------\n\n\n\n city_file.close()\n\n\nif __name__ == \"__main__\":\n # city_list = [x.name for x in Path('D:\\Document\\HousePricing\\公司').iterdir()]\n # ['上海', '东莞', '北京', '南京', '厦门', '合肥', '大连', '天津', '广州', '成都', '杭州', '武汉', '沈阳', '济南', '深圳', '烟台', '苏州', '重庆', '长沙', '青岛']\n city_list = ['上海', '东莞', '北京', '南京', '厦门', '合肥', '大连', '天津', '广州', '成都', '杭州', '武汉', '沈阳', '济南', '深圳', '烟台', '苏州', '重庆', '长沙', '青岛']\n print(city_list)\n for city in city_list:\n main(city)\n","sub_path":"demo_subarea.py","file_name":"demo_subarea.py","file_ext":"py","file_size_in_byte":10813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"202584088","text":"# -*- coding: UTF-8 -*-\n# 文件名:Server.py\n\nimport time\nimport queue\nimport socket\nimport sqlite3\nimport threading\n\nclass initial_server(object):\n def __init__(self, addr=\"localhost\", port=12345):\n self.addr = addr\n self.port = port\n\n self.connections = []#已建立的连接\n self.name = {}#已建立连接的客户端套接字,ip:port字符串\n self.nametoconn = {}\n self.userlist = []#已登录用户\n self.queue = queue.Queue()\n\n self.dbconn = sqlite3.connect('UserInfo.db')#用户信息数据库\n self.dbcursor = self.dbconn.cursor()#创建数据库连接的游标\n #游标.执行sql语句\n self.dbcursor.execute('''CREATE TABLE IF NOT EXISTS USERINFO\n (USERNAME VARCHAR(20) PRIMARY KEY NOT NULL,\n PASSWORD VARCHAR(20) NOT NULL,\n LASTLOGIN VARCHAR(50) NOT NULL,\n STATUS INT(1) NOT NULL\n );''')\n self.dbcursor.execute(\"UPDATE USERINFO set STATUS = 0\")\n self.dbconn.commit()\n\n # 服务器TCP套接字\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n self.s.bind((self.addr, self.port)) \n print(\"Server is listening on \" + str(port) + \"...\")\n\n def listen_to_port(self):\n # 建立的连接s监听TCP传入连接\n self.s.listen(10) \n while True:\n # 新连接是真正的连接,s还在监听\n connection_new, address = self.s.accept() \n connection_new.settimeout(0.000001) #建立的这个链接的超时时间\n add = address[0] + \":\" + str(address[1]) #IP套接字,地址信息是(ip, port)\n # 已建立的连接\n self.connections.append(connection_new) \n # key=客户端套接字,value=用户名\n self.name[add] = add\n\n def msg_queue(self):\n while True:\n # 遍历已建立的连接,\n for c in self.connections:\n try:\n # 从这个连接接受msg\n msg_recv = eval(c.recv(1024))\n # 客户端接受数据产生超时异常\n except socket.timeout: \n continue\n except SyntaxError:\n pass\n except socket.error as err:\n # 软件导致连接退出/连接被重置\n if err.errno == 10053 or err.errno == 10054:\n self.remove_connection(c) \n except ValueError:\n pass\n # 正常收到信息\n else:\n # 这个连接的(ip,port)\n addr = c.getpeername() \n # 消息入队:(发方套接字,消息,发方的连接)\n self.queue.put((addr, msg_recv, c)) \n if msg_recv[\"type\"] == \"msglen\":\n length = msg_recv[\"len\"]\n time.sleep(length * 0.0000001) \n received_length = 0\n while msg_recv[\"type\"] != \"usermsg\":\n try:\n msg_recv = \"\".encode()\n # 直至收到事先给予的长度\n while received_length < length: \n try:\n msg_recv_ = c.recv(length) #收给定长度的\n msg_recv = msg_recv + msg_recv_ #收到的总消息\n received_length = received_length + len(msg_recv_) #已收到的长度\n msg_recv = eval(msg_recv) #收到的总消息\n time.sleep(length * 0.00000001)\n #超时,继续收\n except socket.timeout:\n continue\n except SyntaxError:\n continue\n # 收到给定长度\n else: \n break\n except socket.timeout:\n continue\n except socket.error as err:\n if err.errno == 10053 or err.errno == 10054:\n self.remove_connection(c)\n except ValueError:\n pass\n # length长的消息是用户消息,入队\n self.queue.put((addr, msg_recv, c))\n\n def login_request(self, msg_recv, socket_tuple):\n username = msg_recv[\"name\"]\n self.dbcursor.execute(\"SELECT * from USERINFO where USERNAME = \\\"{Uname}\\\"\".format(Uname = username)) #通过用户名检索出对应的用户实体\n # 查询结果的下一整行\n Userinfo = self.dbcursor.fetchone()\n \n # 判断是否登录成功,给客户端反馈\n # 用户不存在/密码错误\n if Userinfo == None or Userinfo[1] != msg_recv[\"password\"]: \n flag = False\n back = {\"type\": \"loginBack\",\n \"name\":\"Server\",\n \"info\": \"loginFail\"}\n # 用户状态=1\n elif Userinfo[3] == 1:\n flag = False\n back = {\"type\": \"loginBack\",\n \"name\":\"Server\",\n \"info\": \"loginAlready\"}\n # 登录成功\n else:\n flag = True\n address = socket_tuple[0] + \":\" + str(socket_tuple[1])\n # key=登录用户的套接字,value=用户名\n self.name[address] = username\n self.userlist.append(username)\n # 这个用户的LASTLOGIN字段(数据库里)\n self.lastlogintime = Userinfo[2]\n # 更新LASTLOGIN=这次登录的时间\n self.dbcursor.execute(\"UPDATE USERINFO set LASTLOGIN = {logintime}, STATUS = 1 where USERNAME=\\\"{Uname}\\\"\".format(\n logintime = time.time(), #time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(msg_recv[\"time\"])),\n Uname = username))\n self.dbconn.commit()\n # 成功登录,反馈给客户端\n back = {\"type\": \"loginBack\",\n \"name\":\"Server\",\n \"info\": \"loginSucc\",\n \"userlist\": self.userlist}\n forward = {\"type\": \"sysmsg\",\n \"info\": \"userlogin\",\n \"name\": username,\n \"time\": time.time(),\n \"msg\": \">> {name} enters Chatting Room\".format(name=username)}\n\n for connection in self.connections:\n # 这个连接的客户端套接字\n connection_socket = connection.getpeername()\n # 这次登录的用户的套接字,和已连接的某个套接字一样\n if connection_socket == socket_tuple:\n # 登录成功\n if flag:\n # dict:用户名——>连接\n self.nametoconn[self.name[address]] = connection #key=用户名,value=连接\n connection.send(str(back).encode())\n # 这次登录的用户的套接字是新的\n elif flag:\n connection.send(str(forward).encode())\n\n def register_request(self, msg_recv, register_socket):\n # 要注册的用户名\n username = msg_recv[\"name\"]\n # 查已有用户名\n self.dbcursor.execute(\"SELECT * from USERINFO where USERNAME=\\\"{Uname}\\\"\".format(Uname = username)) # 通过用户名检索出对应的用户实体\n Userinfo = self.dbcursor.fetchone()\n\n # 用户名不存在\n if Userinfo == None: \n # 用户信息存储\n self.dbcursor.execute(\"INSERT INTO USERINFO (USERNAME, PASSWORD, LASTLOGIN, STATUS) \\\n VALUES (\\\"{Uname}\\\", \\\"{Passwd}\\\", \\\"Never\\\", 0)\".format(Uname = username, Passwd = msg_recv[\"password\"]))\n self.dbconn.commit()\n # key=套接字,value=用户名\n self.name[register_socket] = username\n self.lastlogintime = \"Never\"\n back = {\"type\": \"rgtrBack\",\n \"name\":\"Server\",\n \"info\": \"rgtrSucc\"}\n # 用户已存在\n else:\n back = {\"type\": \"rgtrBack\",\n \"name\":\"Server\",\n \"info\": \"rgtrFail\"}\n \n # 查找转发对象连接\n for connection in self.connections:\n connection_socket = connection.getpeername()\n if connection_socket == register_socket:\n connection.send(str(back).encode())\n\n # 删除连接、用户名-套接字列表name、已登录用户名列表userlist、修改数据库的用户状态、通知其他连接的套接字\n def remove_connection(self, removed_connection):\n try:\n self.connections.remove(removed_connection) \n except ValueError:\n pass\n address = removed_connection.getpeername() \n socket_str = address[0] + \":\" + str(address[1]) \n username = self.name[socket_str]\n self.name.pop(socket_str)\n # 若用户已登录\n if username in self.userlist:\n self.userlist.remove(username)\n # 用户信息数据库,修改用户状态\n dbconn1 = sqlite3.connect('userinfo.db') \n dbcursor1 = dbconn1.cursor()\n dbcursor1.execute(\"UPDATE USERINFO set STATUS=0 where USERNAME=\\\"{Uname}\\\"\".format(Uname=username))\n dbconn1.commit()\n back = {\"type\": \"sysmsg\",\n \"info\": \"userexit\",\n \"name\": username,\n \"time\": time.time(),\n \"msg\": \"<< {name} exits Chatting Room.\".format(name=username)}\n # 通知其他用户\n for c in self.connections:\n c.send(str(back).encode())\n\n def msg_forward(self, msg_forward, addr):\n socket_str = addr[0] + \":\" + str(addr[1]) \n\n if msg_forward[\"destname\"] == \"all\":\n #给所有连接了的客户端转发msg\n for c in self.connections:\n print(\"forward \")\n c.send(str(msg_forward).encode())\n else:\n #要转发的用户名的连接\n self.nametoconn[msg_forward[\"destname\"]].send(str(msg_forward).encode())\n self.nametoconn[msg_forward[\"name\"]].send(str(msg_forward).encode())\n print(\"forward \")\n\n # 监听、消息处理线程\n def run(self):\n func1 = threading.Thread(target=self.listen_to_port)\n func2 = threading.Thread(target=self.msg_queue)\n func1.start()\n func2.start()\n while True:\n # 消息队列空\n if self.queue.empty():\n continue\n # 非空,取一条\n addr, msg, conn = self.queue.get()\n\n # 3种消息类型,使用3种处理函数\n # 登录:账号密码\n if msg[\"type\"] == \"login\":\n self.login_request(msg, addr)\n # 转发信息\n elif msg[\"type\"] in (\"usermsg\", \"msglen\"):\n self.msg_forward(msg, addr)\n # 注册:用户名、密码\n elif msg[\"type\"] == \"register\":\n self.register_request(msg, addr)\n\nif __name__ == '__main__':\n server = initial_server(addr=\"127.0.0.1\", port=15000)\n server.run()\n\n","sub_path":"chatroom/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"548263314","text":"# Software Engineer Intern Interview\n# \n# You are climbing a stair case. Each time you can either make 1 step or 2 \n# steps. The staircase has n steps. In how many distinct ways can you climb \n# the staircase ?\n\ndef calc(steps):\n\tprint(steps)\n\tif steps == 0:\n\t\tprint('a')\n\t\treturn 1\n\telif steps < 0:\n\t\tprint('b')\n\t\treturn 0\n\telse:\n\t\ttotal = 0\n\t\ttotal += calc(steps - 1)\n\t\ttotal += calc(steps - 2)\n\t\tprint('c')\n\t\treturn total\n\nprint(calc(4))\n","sub_path":"HackerRank/staircase .py","file_name":"staircase .py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}